summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Wellving <andreas.wellving@enea.com>2019-02-04 13:40:19 +0100
committerAndreas Wellving <andreas.wellving@enea.com>2019-02-04 13:40:19 +0100
commitb6643e162871f2c81cc342f510ac9bd35e7744bd (patch)
tree92b443e03b847a708ea46c0d2db7aacfb71e2025
parent838e3893300a078ef12aa1d8d8c2336df259d2e0 (diff)
downloadenea-kernel-cache-b6643e162871f2c81cc342f510ac9bd35e7744bd.tar.gz
userfaultfd: CVE-2018-18397
userfaultfd: use ENOENT instead of EFAULT if the atomic copy user fails References: https://nvd.nist.gov/vuln/detail/CVE-2018-18397 https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.14.y&id=82c5a8c0debac552750a00b4fc7551c89c7b34b8 Change-Id: I8b35a87096278dee376107808022c95c2350c80e Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
-rw-r--r--patches/cve/4.14.x.scc2
-rw-r--r--patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch118
2 files changed, 120 insertions, 0 deletions
diff --git a/patches/cve/4.14.x.scc b/patches/cve/4.14.x.scc
index 9a53416..59a57b6 100644
--- a/patches/cve/4.14.x.scc
+++ b/patches/cve/4.14.x.scc
@@ -11,3 +11,5 @@ patch CVE-2018-14611-btrfs-validate-type-when-reading-a-chunk.patch
11patch CVE-2018-14614-f2fs-fix-to-do-sanity-check-with-cp_pack_start_sum.patch 11patch CVE-2018-14614-f2fs-fix-to-do-sanity-check-with-cp_pack_start_sum.patch
12patch CVE-2018-18690-xfs-don-t-fail-when-converting-shortform-attr-to-lon.patch 12patch CVE-2018-18690-xfs-don-t-fail-when-converting-shortform-attr-to-lon.patch
13patch CVE-2018-19407-KVM-X86-Fix-scan-ioapic-use-before-initialization.patch 13patch CVE-2018-19407-KVM-X86-Fix-scan-ioapic-use-before-initialization.patch
14#CVEs fixed in 4.14.87:
15patch CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch
diff --git a/patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch b/patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch
new file mode 100644
index 0000000..0d02d22
--- /dev/null
+++ b/patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch
@@ -0,0 +1,118 @@
1From 82c5a8c0debac552750a00b4fc7551c89c7b34b8 Mon Sep 17 00:00:00 2001
2From: Andrea Arcangeli <aarcange@redhat.com>
3Date: Fri, 30 Nov 2018 14:09:25 -0800
4Subject: [PATCH] userfaultfd: use ENOENT instead of EFAULT if the atomic copy
5 user fails
6
7commit 9e368259ad988356c4c95150fafd1a06af095d98 upstream.
8
9Patch series "userfaultfd shmem updates".
10
11Jann found two bugs in the userfaultfd shmem MAP_SHARED backend: the
12lack of the VM_MAYWRITE check and the lack of i_size checks.
13
14Then looking into the above we also fixed the MAP_PRIVATE case.
15
16Hugh by source review also found a data loss source if UFFDIO_COPY is
17used on shmem MAP_SHARED PROT_READ mappings (the production usages
18incidentally run with PROT_READ|PROT_WRITE, so the data loss couldn't
19happen in those production usages like with QEMU).
20
21The whole patchset is marked for stable.
22
23We verified QEMU postcopy live migration with guest running on shmem
24MAP_PRIVATE run as well as before after the fix of shmem MAP_PRIVATE.
25Regardless if it's shmem or hugetlbfs or MAP_PRIVATE or MAP_SHARED, QEMU
26unconditionally invokes a punch hole if the guest mapping is filebacked
27and a MADV_DONTNEED too (needed to get rid of the MAP_PRIVATE COWs and
28for the anon backend).
29
30This patch (of 5):
31
32We internally used EFAULT to communicate with the caller, switch to
33ENOENT, so EFAULT can be used as a non internal retval.
34
35CVE: CVE-2018-18397
36Upstream-Status: Backport
37
38Link: http://lkml.kernel.org/r/20181126173452.26955-2-aarcange@redhat.com
39Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for userfaultfd support")
40Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
41Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
42Reviewed-by: Hugh Dickins <hughd@google.com>
43Cc: Mike Kravetz <mike.kravetz@oracle.com>
44Cc: Jann Horn <jannh@google.com>
45Cc: Peter Xu <peterx@redhat.com>
46Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
47Cc: <stable@vger.kernel.org>
48Cc: stable@vger.kernel.org
49Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
50Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
51Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
52Signed-off-by: Andreas Wellving <andreas.wellving@enea.com>
53---
54 mm/hugetlb.c | 2 +-
55 mm/shmem.c | 2 +-
56 mm/userfaultfd.c | 6 +++---
57 3 files changed, 5 insertions(+), 5 deletions(-)
58
59diff --git a/mm/hugetlb.c b/mm/hugetlb.c
60index f46040aed2da..224cdd953a79 100644
61--- a/mm/hugetlb.c
62+++ b/mm/hugetlb.c
63@@ -4037,7 +4037,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
64
65 /* fallback to copy_from_user outside mmap_sem */
66 if (unlikely(ret)) {
67- ret = -EFAULT;
68+ ret = -ENOENT;
69 *pagep = page;
70 /* don't free the page */
71 goto out;
72diff --git a/mm/shmem.c b/mm/shmem.c
73index ab7ff0aeae2d..9f856ecda73b 100644
74--- a/mm/shmem.c
75+++ b/mm/shmem.c
76@@ -2266,7 +2266,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
77 *pagep = page;
78 shmem_inode_unacct_blocks(inode, 1);
79 /* don't free the page */
80- return -EFAULT;
81+ return -ENOENT;
82 }
83 } else { /* mfill_zeropage_atomic */
84 clear_highpage(page);
85diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
86index 81192701964d..c63c0fc5ecfa 100644
87--- a/mm/userfaultfd.c
88+++ b/mm/userfaultfd.c
89@@ -49,7 +49,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
90
91 /* fallback to copy_from_user outside mmap_sem */
92 if (unlikely(ret)) {
93- ret = -EFAULT;
94+ ret = -ENOENT;
95 *pagep = page;
96 /* don't free the page */
97 goto out;
98@@ -275,7 +275,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
99
100 cond_resched();
101
102- if (unlikely(err == -EFAULT)) {
103+ if (unlikely(err == -ENOENT)) {
104 up_read(&dst_mm->mmap_sem);
105 BUG_ON(!page);
106
107@@ -521,7 +521,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
108 src_addr, &page, zeropage);
109 cond_resched();
110
111- if (unlikely(err == -EFAULT)) {
112+ if (unlikely(err == -ENOENT)) {
113 void *page_kaddr;
114
115 up_read(&dst_mm->mmap_sem);
116--
1172.19.2
118