summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Wellving <andreas.wellving@enea.com>2019-02-04 14:41:23 +0100
committerAndreas Wellving <andreas.wellving@enea.com>2019-02-04 14:41:23 +0100
commitbc705e9e02d6b81dbad043a2b6f9d131404d7dec (patch)
tree4d5b6102ec50a7fff1e84a40792dd422db3d38de
parent0cfa86bc13729a11d1ab643b8b35f93299b19537 (diff)
downloadenea-kernel-cache-bc705e9e02d6b81dbad043a2b6f9d131404d7dec.tar.gz
mremap: CVE-2018-18281
mremap: properly flush TLB before releasing the page References: https://nvd.nist.gov/vuln/detail/CVE-2018-18281 https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.14.y&id=541500abfe9eb30a89ff0a6eb42a21521996d68d Change-Id: I084b67b7f5a6e099ddf8de78f377e343606b92a2 Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
-rw-r--r--patches/cve/4.14.x.scc2
-rw-r--r--patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch179
2 files changed, 181 insertions, 0 deletions
diff --git a/patches/cve/4.14.x.scc b/patches/cve/4.14.x.scc
index 41bfe7a..4915ffe 100644
--- a/patches/cve/4.14.x.scc
+++ b/patches/cve/4.14.x.scc
@@ -4,6 +4,8 @@ patch CVE-2018-13099-f2fs-fix-to-do-sanity-check-with-reserved-blkaddr-of.patch
4patch CVE-2018-14633-scsi-target-iscsi-Use-hex2bin-instead-of-a-re-implem.patch 4patch CVE-2018-14633-scsi-target-iscsi-Use-hex2bin-instead-of-a-re-implem.patch
5#CVEs fixed in 4.14.75: 5#CVEs fixed in 4.14.75:
6patch CVE-2018-17972-proc-restrict-kernel-stack-dumps-to-root.patch 6patch CVE-2018-17972-proc-restrict-kernel-stack-dumps-to-root.patch
7#CVEs fixed in 4.14.78:
8patch CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch
7#CVEs fixed in 4.14.86: 9#CVEs fixed in 4.14.86:
8patch CVE-2018-13097-f2fs-fix-to-do-sanity-check-with-user_block_count.patch 10patch CVE-2018-13097-f2fs-fix-to-do-sanity-check-with-user_block_count.patch
9patch CVE-2018-14610-btrfs-Check-that-each-block-group-has-corresponding-.patch 11patch CVE-2018-14610-btrfs-Check-that-each-block-group-has-corresponding-.patch
diff --git a/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch b/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch
new file mode 100644
index 0000000..c768a9b
--- /dev/null
+++ b/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch
@@ -0,0 +1,179 @@
1From 541500abfe9eb30a89ff0a6eb42a21521996d68d Mon Sep 17 00:00:00 2001
2From: Linus Torvalds <torvalds@linux-foundation.org>
3Date: Fri, 12 Oct 2018 15:22:59 -0700
4Subject: [PATCH] mremap: properly flush TLB before releasing the page
5
6commit eb66ae030829605d61fbef1909ce310e29f78821 upstream.
7
8Jann Horn points out that our TLB flushing was subtly wrong for the
9mremap() case. What makes mremap() special is that we don't follow the
10usual "add page to list of pages to be freed, then flush tlb, and then
11free pages". No, mremap() obviously just _moves_ the page from one page
12table location to another.
13
14That matters, because mremap() thus doesn't directly control the
15lifetime of the moved page with a freelist: instead, the lifetime of the
16page is controlled by the page table locking, that serializes access to
17the entry.
18
19As a result, we need to flush the TLB not just before releasing the lock
20for the source location (to avoid any concurrent accesses to the entry),
21but also before we release the destination page table lock (to avoid the
22TLB being flushed after somebody else has already done something to that
23page).
24
25This also makes the whole "need_flush" logic unnecessary, since we now
26always end up flushing the TLB for every valid entry.
27
28CVE: CVE-2018-18281
29Upstream-Status: Backport
30
31Reported-and-tested-by: Jann Horn <jannh@google.com>
32Acked-by: Will Deacon <will.deacon@arm.com>
33Tested-by: Ingo Molnar <mingo@kernel.org>
34Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
35Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
36Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
37Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
38---
39 include/linux/huge_mm.h | 2 +-
40 mm/huge_memory.c | 10 ++++------
41 mm/mremap.c | 30 +++++++++++++-----------------
42 3 files changed, 18 insertions(+), 24 deletions(-)
43
44diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
45index 87067d23a48b..bfa38da4c261 100644
46--- a/include/linux/huge_mm.h
47+++ b/include/linux/huge_mm.h
48@@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
49 unsigned char *vec);
50 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
51 unsigned long new_addr, unsigned long old_end,
52- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
53+ pmd_t *old_pmd, pmd_t *new_pmd);
54 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
55 unsigned long addr, pgprot_t newprot,
56 int prot_numa);
57diff --git a/mm/huge_memory.c b/mm/huge_memory.c
58index 39c1fedcfdb4..adacfe66cf3d 100644
59--- a/mm/huge_memory.c
60+++ b/mm/huge_memory.c
61@@ -1765,7 +1765,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
62
63 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
64 unsigned long new_addr, unsigned long old_end,
65- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
66+ pmd_t *old_pmd, pmd_t *new_pmd)
67 {
68 spinlock_t *old_ptl, *new_ptl;
69 pmd_t pmd;
70@@ -1796,7 +1796,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
71 if (new_ptl != old_ptl)
72 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
73 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
74- if (pmd_present(pmd) && pmd_dirty(pmd))
75+ if (pmd_present(pmd))
76 force_flush = true;
77 VM_BUG_ON(!pmd_none(*new_pmd));
78
79@@ -1807,12 +1807,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
80 }
81 pmd = move_soft_dirty_pmd(pmd);
82 set_pmd_at(mm, new_addr, new_pmd, pmd);
83- if (new_ptl != old_ptl)
84- spin_unlock(new_ptl);
85 if (force_flush)
86 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
87- else
88- *need_flush = true;
89+ if (new_ptl != old_ptl)
90+ spin_unlock(new_ptl);
91 spin_unlock(old_ptl);
92 return true;
93 }
94diff --git a/mm/mremap.c b/mm/mremap.c
95index 049470aa1e3e..88ceeb4ef817 100644
96--- a/mm/mremap.c
97+++ b/mm/mremap.c
98@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
99 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
100 unsigned long old_addr, unsigned long old_end,
101 struct vm_area_struct *new_vma, pmd_t *new_pmd,
102- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
103+ unsigned long new_addr, bool need_rmap_locks)
104 {
105 struct mm_struct *mm = vma->vm_mm;
106 pte_t *old_pte, *new_pte, pte;
107@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
108
109 pte = ptep_get_and_clear(mm, old_addr, old_pte);
110 /*
111- * If we are remapping a dirty PTE, make sure
112+ * If we are remapping a valid PTE, make sure
113 * to flush TLB before we drop the PTL for the
114- * old PTE or we may race with page_mkclean().
115+ * PTE.
116 *
117- * This check has to be done after we removed the
118- * old PTE from page tables or another thread may
119- * dirty it after the check and before the removal.
120+ * NOTE! Both old and new PTL matter: the old one
121+ * for racing with page_mkclean(), the new one to
122+ * make sure the physical page stays valid until
123+ * the TLB entry for the old mapping has been
124+ * flushed.
125 */
126- if (pte_present(pte) && pte_dirty(pte))
127+ if (pte_present(pte))
128 force_flush = true;
129 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
130 pte = move_soft_dirty_pte(pte);
131@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
132 }
133
134 arch_leave_lazy_mmu_mode();
135+ if (force_flush)
136+ flush_tlb_range(vma, old_end - len, old_end);
137 if (new_ptl != old_ptl)
138 spin_unlock(new_ptl);
139 pte_unmap(new_pte - 1);
140- if (force_flush)
141- flush_tlb_range(vma, old_end - len, old_end);
142- else
143- *need_flush = true;
144 pte_unmap_unlock(old_pte - 1, old_ptl);
145 if (need_rmap_locks)
146 drop_rmap_locks(vma);
147@@ -200,7 +200,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
148 {
149 unsigned long extent, next, old_end;
150 pmd_t *old_pmd, *new_pmd;
151- bool need_flush = false;
152 unsigned long mmun_start; /* For mmu_notifiers */
153 unsigned long mmun_end; /* For mmu_notifiers */
154
155@@ -231,8 +230,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
156 if (need_rmap_locks)
157 take_rmap_locks(vma);
158 moved = move_huge_pmd(vma, old_addr, new_addr,
159- old_end, old_pmd, new_pmd,
160- &need_flush);
161+ old_end, old_pmd, new_pmd);
162 if (need_rmap_locks)
163 drop_rmap_locks(vma);
164 if (moved)
165@@ -250,10 +248,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
166 if (extent > LATENCY_LIMIT)
167 extent = LATENCY_LIMIT;
168 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
169- new_pmd, new_addr, need_rmap_locks, &need_flush);
170+ new_pmd, new_addr, need_rmap_locks);
171 }
172- if (need_flush)
173- flush_tlb_range(vma, old_end-len, old_addr);
174
175 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
176
177--
1782.19.2
179