diff options
| author | Andreas Wellving <andreas.wellving@enea.com> | 2019-07-10 15:21:34 +0200 |
|---|---|---|
| committer | Adrian Stratulat <adrian.stratulat@enea.com> | 2019-07-12 12:11:33 +0200 |
| commit | c6047e4f17101452e1b14f26b809f835247e80d9 (patch) | |
| tree | e6e56d168df04f2ebcfbd587962a960ef7514247 | |
| parent | 6481019cced41109508ec7356686657233a12a7a (diff) | |
| download | enea-kernel-cache-altera-4.9.tar.gz | |
mremap: CVE-2018-18281altera-4.9
mremap: properly flush TLB before releasing the page
References:
https://nvd.nist.gov/vuln/detail/CVE-2018-18281
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=e34bd9a96704f7089ccad61b6e01ea985fa54dd6
Change-Id: I16200712d4c85eceef94c4711ffef7a67349691a
Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
| -rw-r--r-- | patches/cve/4.9.x.scc | 3 | ||||
| -rw-r--r-- | patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch | 180 |
2 files changed, 183 insertions, 0 deletions
diff --git a/patches/cve/4.9.x.scc b/patches/cve/4.9.x.scc index 935280b..39c7de0 100644 --- a/patches/cve/4.9.x.scc +++ b/patches/cve/4.9.x.scc | |||
| @@ -58,6 +58,9 @@ patch CVE-2018-13099-f2fs-fix-to-do-sanity-check-with-reserved-blkaddr-of.patch | |||
| 58 | #CVEs fixed in 4.9.131: | 58 | #CVEs fixed in 4.9.131: |
| 59 | patch CVE-2018-10880-ext4-never-move-the-system.data-xattr-out-of-the-ino.patch | 59 | patch CVE-2018-10880-ext4-never-move-the-system.data-xattr-out-of-the-ino.patch |
| 60 | 60 | ||
| 61 | #CVEs fixed in 4.9.135: | ||
| 62 | patch CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch | ||
| 63 | |||
| 61 | #CVEs fixed in 4.9.138: | 64 | #CVEs fixed in 4.9.138: |
| 62 | patch CVE-2018-16871-nfsd-COPY-and-CLONE-operations-require-the-saved-fil.patch | 65 | patch CVE-2018-16871-nfsd-COPY-and-CLONE-operations-require-the-saved-fil.patch |
| 63 | 66 | ||
diff --git a/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch b/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch new file mode 100644 index 0000000..e1424d0 --- /dev/null +++ b/patches/cve/CVE-2018-18281-mremap-properly-flush-TLB-before-releasing-the-page.patch | |||
| @@ -0,0 +1,180 @@ | |||
| 1 | From e34bd9a96704f7089ccad61b6e01ea985fa54dd6 Mon Sep 17 00:00:00 2001 | ||
| 2 | From: Linus Torvalds <torvalds@linux-foundation.org> | ||
| 3 | Date: Fri, 12 Oct 2018 15:22:59 -0700 | ||
| 4 | Subject: [PATCH] mremap: properly flush TLB before releasing the page | ||
| 5 | |||
| 6 | commit eb66ae030829605d61fbef1909ce310e29f78821 upstream. | ||
| 7 | |||
| 8 | Jann Horn points out that our TLB flushing was subtly wrong for the | ||
| 9 | mremap() case. What makes mremap() special is that we don't follow the | ||
| 10 | usual "add page to list of pages to be freed, then flush tlb, and then | ||
| 11 | free pages". No, mremap() obviously just _moves_ the page from one page | ||
| 12 | table location to another. | ||
| 13 | |||
| 14 | That matters, because mremap() thus doesn't directly control the | ||
| 15 | lifetime of the moved page with a freelist: instead, the lifetime of the | ||
| 16 | page is controlled by the page table locking, that serializes access to | ||
| 17 | the entry. | ||
| 18 | |||
| 19 | As a result, we need to flush the TLB not just before releasing the lock | ||
| 20 | for the source location (to avoid any concurrent accesses to the entry), | ||
| 21 | but also before we release the destination page table lock (to avoid the | ||
| 22 | TLB being flushed after somebody else has already done something to that | ||
| 23 | page). | ||
| 24 | |||
| 25 | This also makes the whole "need_flush" logic unnecessary, since we now | ||
| 26 | always end up flushing the TLB for every valid entry. | ||
| 27 | |||
| 28 | CVE: CVE-2018-18281 | ||
| 29 | Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.9.y&id=e34bd9a96704f7089ccad61b6e01ea985fa54dd6] | ||
| 30 | |||
| 31 | Reported-and-tested-by: Jann Horn <jannh@google.com> | ||
| 32 | Acked-by: Will Deacon <will.deacon@arm.com> | ||
| 33 | Tested-by: Ingo Molnar <mingo@kernel.org> | ||
| 34 | Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> | ||
| 35 | Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | ||
| 36 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | ||
| 37 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | ||
| 38 | Signed-off-by: Andreas Wellving <andreas.wellving@enea.com> | ||
| 39 | --- | ||
| 40 | include/linux/huge_mm.h | 2 +- | ||
| 41 | mm/huge_memory.c | 10 ++++------ | ||
| 42 | mm/mremap.c | 30 +++++++++++++----------------- | ||
| 43 | 3 files changed, 18 insertions(+), 24 deletions(-) | ||
| 44 | |||
| 45 | diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h | ||
| 46 | index e35e6de633b9..9b9f65d99873 100644 | ||
| 47 | --- a/include/linux/huge_mm.h | ||
| 48 | +++ b/include/linux/huge_mm.h | ||
| 49 | @@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
| 50 | unsigned char *vec); | ||
| 51 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | ||
| 52 | unsigned long new_addr, unsigned long old_end, | ||
| 53 | - pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); | ||
| 54 | + pmd_t *old_pmd, pmd_t *new_pmd); | ||
| 55 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
| 56 | unsigned long addr, pgprot_t newprot, | ||
| 57 | int prot_numa); | ||
| 58 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c | ||
| 59 | index e4c6c3edaf6a..9f7bba700e4e 100644 | ||
| 60 | --- a/mm/huge_memory.c | ||
| 61 | +++ b/mm/huge_memory.c | ||
| 62 | @@ -1445,7 +1445,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | ||
| 63 | |||
| 64 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | ||
| 65 | unsigned long new_addr, unsigned long old_end, | ||
| 66 | - pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) | ||
| 67 | + pmd_t *old_pmd, pmd_t *new_pmd) | ||
| 68 | { | ||
| 69 | spinlock_t *old_ptl, *new_ptl; | ||
| 70 | pmd_t pmd; | ||
| 71 | @@ -1476,7 +1476,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | ||
| 72 | if (new_ptl != old_ptl) | ||
| 73 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); | ||
| 74 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); | ||
| 75 | - if (pmd_present(pmd) && pmd_dirty(pmd)) | ||
| 76 | + if (pmd_present(pmd)) | ||
| 77 | force_flush = true; | ||
| 78 | VM_BUG_ON(!pmd_none(*new_pmd)); | ||
| 79 | |||
| 80 | @@ -1487,12 +1487,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | ||
| 81 | pgtable_trans_huge_deposit(mm, new_pmd, pgtable); | ||
| 82 | } | ||
| 83 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); | ||
| 84 | - if (new_ptl != old_ptl) | ||
| 85 | - spin_unlock(new_ptl); | ||
| 86 | if (force_flush) | ||
| 87 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); | ||
| 88 | - else | ||
| 89 | - *need_flush = true; | ||
| 90 | + if (new_ptl != old_ptl) | ||
| 91 | + spin_unlock(new_ptl); | ||
| 92 | spin_unlock(old_ptl); | ||
| 93 | return true; | ||
| 94 | } | ||
| 95 | diff --git a/mm/mremap.c b/mm/mremap.c | ||
| 96 | index 15976716dd40..9e6035969d7b 100644 | ||
| 97 | --- a/mm/mremap.c | ||
| 98 | +++ b/mm/mremap.c | ||
| 99 | @@ -104,7 +104,7 @@ static pte_t move_soft_dirty_pte(pte_t pte) | ||
| 100 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | ||
| 101 | unsigned long old_addr, unsigned long old_end, | ||
| 102 | struct vm_area_struct *new_vma, pmd_t *new_pmd, | ||
| 103 | - unsigned long new_addr, bool need_rmap_locks, bool *need_flush) | ||
| 104 | + unsigned long new_addr, bool need_rmap_locks) | ||
| 105 | { | ||
| 106 | struct mm_struct *mm = vma->vm_mm; | ||
| 107 | pte_t *old_pte, *new_pte, pte; | ||
| 108 | @@ -152,15 +152,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | ||
| 109 | |||
| 110 | pte = ptep_get_and_clear(mm, old_addr, old_pte); | ||
| 111 | /* | ||
| 112 | - * If we are remapping a dirty PTE, make sure | ||
| 113 | + * If we are remapping a valid PTE, make sure | ||
| 114 | * to flush TLB before we drop the PTL for the | ||
| 115 | - * old PTE or we may race with page_mkclean(). | ||
| 116 | + * PTE. | ||
| 117 | * | ||
| 118 | - * This check has to be done after we removed the | ||
| 119 | - * old PTE from page tables or another thread may | ||
| 120 | - * dirty it after the check and before the removal. | ||
| 121 | + * NOTE! Both old and new PTL matter: the old one | ||
| 122 | + * for racing with page_mkclean(), the new one to | ||
| 123 | + * make sure the physical page stays valid until | ||
| 124 | + * the TLB entry for the old mapping has been | ||
| 125 | + * flushed. | ||
| 126 | */ | ||
| 127 | - if (pte_present(pte) && pte_dirty(pte)) | ||
| 128 | + if (pte_present(pte)) | ||
| 129 | force_flush = true; | ||
| 130 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); | ||
| 131 | pte = move_soft_dirty_pte(pte); | ||
| 132 | @@ -168,13 +170,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | ||
| 133 | } | ||
| 134 | |||
| 135 | arch_leave_lazy_mmu_mode(); | ||
| 136 | + if (force_flush) | ||
| 137 | + flush_tlb_range(vma, old_end - len, old_end); | ||
| 138 | if (new_ptl != old_ptl) | ||
| 139 | spin_unlock(new_ptl); | ||
| 140 | pte_unmap(new_pte - 1); | ||
| 141 | - if (force_flush) | ||
| 142 | - flush_tlb_range(vma, old_end - len, old_end); | ||
| 143 | - else | ||
| 144 | - *need_flush = true; | ||
| 145 | pte_unmap_unlock(old_pte - 1, old_ptl); | ||
| 146 | if (need_rmap_locks) | ||
| 147 | drop_rmap_locks(vma); | ||
| 148 | @@ -189,7 +189,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | ||
| 149 | { | ||
| 150 | unsigned long extent, next, old_end; | ||
| 151 | pmd_t *old_pmd, *new_pmd; | ||
| 152 | - bool need_flush = false; | ||
| 153 | unsigned long mmun_start; /* For mmu_notifiers */ | ||
| 154 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
| 155 | |||
| 156 | @@ -220,8 +219,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | ||
| 157 | if (need_rmap_locks) | ||
| 158 | take_rmap_locks(vma); | ||
| 159 | moved = move_huge_pmd(vma, old_addr, new_addr, | ||
| 160 | - old_end, old_pmd, new_pmd, | ||
| 161 | - &need_flush); | ||
| 162 | + old_end, old_pmd, new_pmd); | ||
| 163 | if (need_rmap_locks) | ||
| 164 | drop_rmap_locks(vma); | ||
| 165 | if (moved) | ||
| 166 | @@ -239,10 +237,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | ||
| 167 | if (extent > LATENCY_LIMIT) | ||
| 168 | extent = LATENCY_LIMIT; | ||
| 169 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, | ||
| 170 | - new_pmd, new_addr, need_rmap_locks, &need_flush); | ||
| 171 | + new_pmd, new_addr, need_rmap_locks); | ||
| 172 | } | ||
| 173 | - if (need_flush) | ||
| 174 | - flush_tlb_range(vma, old_end-len, old_addr); | ||
| 175 | |||
| 176 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | ||
| 177 | |||
| 178 | -- | ||
| 179 | 2.20.1 | ||
| 180 | |||
