From d25ab3a860188fd32710d75eb7173548f4d7292d Mon Sep 17 00:00:00 2001 From: Andreas Wellving Date: Fri, 26 Oct 2018 13:59:52 +0200 Subject: mm: CVE-2018-17182 mm: get rid of vmacache_flush_all() entirely Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=84580567f1f856d2c7a610273315852e345bc3ac Change-Id: Ic336b6562f3a04292ca75a114a6c651a98d69f58 Signed-off-by: Andreas Wellving --- patches/cve/4.9.x.scc | 3 + ...mm-get-rid-of-vmacache_flush_all-entirely.patch | 175 +++++++++++++++++++++ 2 files changed, 178 insertions(+) create mode 100644 patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch diff --git a/patches/cve/4.9.x.scc b/patches/cve/4.9.x.scc index 7aec14a..c3eca4d 100644 --- a/patches/cve/4.9.x.scc +++ b/patches/cve/4.9.x.scc @@ -30,3 +30,6 @@ SRC_URI += "file://CVE-2018-5390-tcp-free-batches-of-packets-in-tcp_prune_ofo_qu #CVEs fixed in 4.9.121: SRC_URI += "file://CVE-2018-9363-Bluetooth-hidp-buffer-overflow-in-hidp_process_repor.patch" + +#CVEs fixed in 4.9.128: +SRC_URI += "file://CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch" diff --git a/patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch b/patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch new file mode 100644 index 0000000..6d6c2ca --- /dev/null +++ b/patches/cve/CVE-2018-17182-mm-get-rid-of-vmacache_flush_all-entirely.patch @@ -0,0 +1,175 @@ +From 84580567f1f856d2c7a610273315852e345bc3ac Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Wed, 12 Sep 2018 23:57:48 -1000 +Subject: [PATCH] mm: get rid of vmacache_flush_all() entirely + +commit 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 upstream. + +Jann Horn points out that the vmacache_flush_all() function is not only +potentially expensive, it's buggy too. It also happens to be entirely +unnecessary, because the sequence number overflow case can be avoided by +simply making the sequence number be 64-bit. That doesn't even grow the +data structures in question, because the other adjacent fields are +already 64-bit. + +So simplify the whole thing by just making the sequence number overflow +case go away entirely, which gets rid of all the complications and makes +the code faster too. Win-win. + +[ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics + also just goes away entirely with this ] + +CVE: CVE-2018-17182 +Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=84580567f1f856d2c7a610273315852e345bc3ac] + +Reported-by: Jann Horn +Suggested-by: Will Deacon +Acked-by: Davidlohr Bueso +Cc: Oleg Nesterov +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Andreas Wellving +--- + include/linux/mm_types.h | 2 +- + include/linux/sched.h | 2 +- + include/linux/vm_event_item.h | 1 - + include/linux/vmacache.h | 5 ----- + mm/debug.c | 4 ++-- + mm/vmacache.c | 38 ----------------------------------- + 6 files changed, 4 insertions(+), 48 deletions(-) + +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index e8471c2ca83a..8d6decd50220 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -396,7 +396,7 @@ struct kioctx_table; + struct mm_struct { + struct vm_area_struct *mmap; /* list of VMAs */ + struct rb_root mm_rb; +- u32 vmacache_seqnum; /* per-thread vmacache */ ++ u64 vmacache_seqnum; /* per-thread vmacache */ + #ifdef CONFIG_MMU + unsigned long (*get_unmapped_area) (struct file *filp, + unsigned long addr, unsigned long len, +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 1cc5723a7821..f4a551a5482c 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1559,7 +1559,7 @@ struct task_struct { + + struct mm_struct *mm, *active_mm; + /* per-thread vma caching */ +- u32 vmacache_seqnum; ++ u64 vmacache_seqnum; + struct vm_area_struct *vmacache[VMACACHE_SIZE]; + #if defined(SPLIT_RSS_COUNTING) + struct task_rss_stat rss_stat; +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h +index 2edb150f1a4d..544cd50fbbd0 100644 +--- a/include/linux/vm_event_item.h ++++ b/include/linux/vm_event_item.h +@@ -97,7 +97,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + #ifdef CONFIG_DEBUG_VM_VMACACHE + VMACACHE_FIND_CALLS, + VMACACHE_FIND_HITS, +- VMACACHE_FULL_FLUSHES, + #endif + NR_VM_EVENT_ITEMS + }; +diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h +index c3fa0fd43949..4f58ff2dacd6 100644 +--- a/include/linux/vmacache.h ++++ b/include/linux/vmacache.h +@@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk) + memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); + } + +-extern void vmacache_flush_all(struct mm_struct *mm); + extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); + extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, + unsigned long addr); +@@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, + static inline void vmacache_invalidate(struct mm_struct *mm) + { + mm->vmacache_seqnum++; +- +- /* deal with overflows */ +- if (unlikely(mm->vmacache_seqnum == 0)) +- vmacache_flush_all(mm); + } + + #endif /* __LINUX_VMACACHE_H */ +diff --git a/mm/debug.c b/mm/debug.c +index 9feb699c5d25..bebe48aece6d 100644 +--- a/mm/debug.c ++++ b/mm/debug.c +@@ -95,7 +95,7 @@ EXPORT_SYMBOL(dump_vma); + + void dump_mm(const struct mm_struct *mm) + { +- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" ++ pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n" + #ifdef CONFIG_MMU + "get_unmapped_area %p\n" + #endif +@@ -125,7 +125,7 @@ void dump_mm(const struct mm_struct *mm) + #endif + "def_flags: %#lx(%pGv)\n", + +- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, ++ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, + #ifdef CONFIG_MMU + mm->get_unmapped_area, + #endif +diff --git a/mm/vmacache.c b/mm/vmacache.c +index 035fdeb35b43..c9ca3dd46b97 100644 +--- a/mm/vmacache.c ++++ b/mm/vmacache.c +@@ -5,44 +5,6 @@ + #include + #include + +-/* +- * Flush vma caches for threads that share a given mm. +- * +- * The operation is safe because the caller holds the mmap_sem +- * exclusively and other threads accessing the vma cache will +- * have mmap_sem held at least for read, so no extra locking +- * is required to maintain the vma cache. +- */ +-void vmacache_flush_all(struct mm_struct *mm) +-{ +- struct task_struct *g, *p; +- +- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); +- +- /* +- * Single threaded tasks need not iterate the entire +- * list of process. We can avoid the flushing as well +- * since the mm's seqnum was increased and don't have +- * to worry about other threads' seqnum. Current's +- * flush will occur upon the next lookup. +- */ +- if (atomic_read(&mm->mm_users) == 1) +- return; +- +- rcu_read_lock(); +- for_each_process_thread(g, p) { +- /* +- * Only flush the vmacache pointers as the +- * mm seqnum is already set and curr's will +- * be set upon invalidation when the next +- * lookup is done. +- */ +- if (mm == p->mm) +- vmacache_flush(p); +- } +- rcu_read_unlock(); +-} +- + /* + * This task may be accessing a foreign mm via (for example) + * get_user_pages()->find_vma(). The vmacache is task-local and this + + -- cgit v1.2.3-54-g00ecf