summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Wellving <andreas.wellving@enea.com>2019-07-10 12:26:20 +0200
committerAdrian Stratulat <adrian.stratulat@enea.com>2019-07-12 15:13:12 +0200
commit993de5e62725cf99ded17687e68c127e2bdf557e (patch)
treeb850117122f01de04f40b609e7761f7d7761690c
parent7b166ca3cd7089aeb912b54d218697fb01591818 (diff)
downloadenea-kernel-cache-intel-4.9.tar.gz
mremap: CVE-2018-18281intel-4.9
mremap: properly flush TLB before releasing the page References: https://nvd.nist.gov/vuln/detail/CVE-2018-18281 https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=e34bd9a96704f7089ccad61b6e01ea985fa54dd6 Change-Id: Iae36afb200b136808d0e1a81fd1f1ded24fe9c71 Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
-rw-r--r--patches/cve/4.9.x.scc3
-rw-r--r--patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch180
2 files changed, 183 insertions, 0 deletions
diff --git a/patches/cve/4.9.x.scc b/patches/cve/4.9.x.scc
index b4740c7..cdc7341 100644
--- a/patches/cve/4.9.x.scc
+++ b/patches/cve/4.9.x.scc
@@ -23,6 +23,9 @@ patch CVE-2018-13099-f2fs-fix-to-do-sanity-check-with-reserved-blkaddr-of.patch
23#CVEs fixed in 4.9.131: 23#CVEs fixed in 4.9.131:
24patch CVE-2018-10880-ext4-never-move-the-system.data-xattr-out-of-the-ino.patch 24patch CVE-2018-10880-ext4-never-move-the-system.data-xattr-out-of-the-ino.patch
25 25
26#CVEs fixed in 4.9.135:
27patch CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch
28
26#CVEs fixed in 4.9.138: 29#CVEs fixed in 4.9.138:
27patch CVE-2018-16871-nfsd-COPY-and-CLONE-operations-require-the-saved-fil.patch 30patch CVE-2018-16871-nfsd-COPY-and-CLONE-operations-require-the-saved-fil.patch
28 31
diff --git a/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch b/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch
new file mode 100644
index 0000000..e1424d0
--- /dev/null
+++ b/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch
@@ -0,0 +1,180 @@
1From e34bd9a96704f7089ccad61b6e01ea985fa54dd6 Mon Sep 17 00:00:00 2001
2From: Linus Torvalds <torvalds@linux-foundation.org>
3Date: Fri, 12 Oct 2018 15:22:59 -0700
4Subject: [PATCH] mremap: properly flush TLB before releasing the page
5
6commit eb66ae030829605d61fbef1909ce310e29f78821 upstream.
7
8Jann Horn points out that our TLB flushing was subtly wrong for the
9mremap() case. What makes mremap() special is that we don't follow the
10usual "add page to list of pages to be freed, then flush tlb, and then
11free pages". No, mremap() obviously just _moves_ the page from one page
12table location to another.
13
14That matters, because mremap() thus doesn't directly control the
15lifetime of the moved page with a freelist: instead, the lifetime of the
16page is controlled by the page table locking, that serializes access to
17the entry.
18
19As a result, we need to flush the TLB not just before releasing the lock
20for the source location (to avoid any concurrent accesses to the entry),
21but also before we release the destination page table lock (to avoid the
22TLB being flushed after somebody else has already done something to that
23page).
24
25This also makes the whole "need_flush" logic unnecessary, since we now
26always end up flushing the TLB for every valid entry.
27
28CVE: CVE-2018-18281
29Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=e34bd9a96704f7089ccad61b6e01ea985fa54dd6]
30
31Reported-and-tested-by: Jann Horn <jannh@google.com>
32Acked-by: Will Deacon <will.deacon@arm.com>
33Tested-by: Ingo Molnar <mingo@kernel.org>
34Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
35Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
36Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
37Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
38Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
39---
40 include/linux/huge_mm.h | 2 +-
41 mm/huge_memory.c | 10 ++++------
42 mm/mremap.c | 30 +++++++++++++-----------------
43 3 files changed, 18 insertions(+), 24 deletions(-)
44
45diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
46index e35e6de633b9..9b9f65d99873 100644
47--- a/include/linux/huge_mm.h
48+++ b/include/linux/huge_mm.h
49@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
50 unsigned char *vec);
51 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
52 unsigned long new_addr, unsigned long old_end,
53- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
54+ pmd_t *old_pmd, pmd_t *new_pmd);
55 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
56 unsigned long addr, pgprot_t newprot,
57 int prot_numa);
58diff --git a/mm/huge_memory.c b/mm/huge_memory.c
59index e4c6c3edaf6a..9f7bba700e4e 100644
60--- a/mm/huge_memory.c
61+++ b/mm/huge_memory.c
62@@ -1445,7 +1445,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
63
64 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
65 unsigned long new_addr, unsigned long old_end,
66- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
67+ pmd_t *old_pmd, pmd_t *new_pmd)
68 {
69 spinlock_t *old_ptl, *new_ptl;
70 pmd_t pmd;
71@@ -1476,7 +1476,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
72 if (new_ptl != old_ptl)
73 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
74 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
75- if (pmd_present(pmd) && pmd_dirty(pmd))
76+ if (pmd_present(pmd))
77 force_flush = true;
78 VM_BUG_ON(!pmd_none(*new_pmd));
79
80@@ -1487,12 +1487,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
81 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
82 }
83 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
84- if (new_ptl != old_ptl)
85- spin_unlock(new_ptl);
86 if (force_flush)
87 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
88- else
89- *need_flush = true;
90+ if (new_ptl != old_ptl)
91+ spin_unlock(new_ptl);
92 spin_unlock(old_ptl);
93 return true;
94 }
95diff --git a/mm/mremap.c b/mm/mremap.c
96index 15976716dd40..9e6035969d7b 100644
97--- a/mm/mremap.c
98+++ b/mm/mremap.c
99@@ -104,7 +104,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
100 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
101 unsigned long old_addr, unsigned long old_end,
102 struct vm_area_struct *new_vma, pmd_t *new_pmd,
103- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
104+ unsigned long new_addr, bool need_rmap_locks)
105 {
106 struct mm_struct *mm = vma->vm_mm;
107 pte_t *old_pte, *new_pte, pte;
108@@ -152,15 +152,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
109
110 pte = ptep_get_and_clear(mm, old_addr, old_pte);
111 /*
112- * If we are remapping a dirty PTE, make sure
113+ * If we are remapping a valid PTE, make sure
114 * to flush TLB before we drop the PTL for the
115- * old PTE or we may race with page_mkclean().
116+ * PTE.
117 *
118- * This check has to be done after we removed the
119- * old PTE from page tables or another thread may
120- * dirty it after the check and before the removal.
121+ * NOTE! Both old and new PTL matter: the old one
122+ * for racing with page_mkclean(), the new one to
123+ * make sure the physical page stays valid until
124+ * the TLB entry for the old mapping has been
125+ * flushed.
126 */
127- if (pte_present(pte) && pte_dirty(pte))
128+ if (pte_present(pte))
129 force_flush = true;
130 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
131 pte = move_soft_dirty_pte(pte);
132@@ -168,13 +170,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
133 }
134
135 arch_leave_lazy_mmu_mode();
136+ if (force_flush)
137+ flush_tlb_range(vma, old_end - len, old_end);
138 if (new_ptl != old_ptl)
139 spin_unlock(new_ptl);
140 pte_unmap(new_pte - 1);
141- if (force_flush)
142- flush_tlb_range(vma, old_end - len, old_end);
143- else
144- *need_flush = true;
145 pte_unmap_unlock(old_pte - 1, old_ptl);
146 if (need_rmap_locks)
147 drop_rmap_locks(vma);
148@@ -189,7 +189,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
149 {
150 unsigned long extent, next, old_end;
151 pmd_t *old_pmd, *new_pmd;
152- bool need_flush = false;
153 unsigned long mmun_start; /* For mmu_notifiers */
154 unsigned long mmun_end; /* For mmu_notifiers */
155
156@@ -220,8 +219,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
157 if (need_rmap_locks)
158 take_rmap_locks(vma);
159 moved = move_huge_pmd(vma, old_addr, new_addr,
160- old_end, old_pmd, new_pmd,
161- &need_flush);
162+ old_end, old_pmd, new_pmd);
163 if (need_rmap_locks)
164 drop_rmap_locks(vma);
165 if (moved)
166@@ -239,10 +237,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
167 if (extent > LATENCY_LIMIT)
168 extent = LATENCY_LIMIT;
169 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
170- new_pmd, new_addr, need_rmap_locks, &need_flush);
171+ new_pmd, new_addr, need_rmap_locks);
172 }
173- if (need_flush)
174- flush_tlb_range(vma, old_end-len, old_addr);
175
176 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
177
178--
1792.20.1
180