From b6643e162871f2c81cc342f510ac9bd35e7744bd Mon Sep 17 00:00:00 2001 From: Andreas Wellving Date: Mon, 4 Feb 2019 13:40:19 +0100 Subject: userfaultfd: CVE-2018-18397 userfaultfd: use ENOENT instead of EFAULT if the atomic copy user fails References: https://nvd.nist.gov/vuln/detail/CVE-2018-18397 https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.14.y&id=82c5a8c0debac552750a00b4fc7551c89c7b34b8 Change-Id: I8b35a87096278dee376107808022c95c2350c80e Signed-off-by: Andreas Wellving --- patches/cve/4.14.x.scc | 2 + ...-use-ENOENT-instead-of-EFAULT-if-the-atom.patch | 118 +++++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch diff --git a/patches/cve/4.14.x.scc b/patches/cve/4.14.x.scc index 9a53416..59a57b6 100644 --- a/patches/cve/4.14.x.scc +++ b/patches/cve/4.14.x.scc @@ -11,3 +11,5 @@ patch CVE-2018-14611-btrfs-validate-type-when-reading-a-chunk.patch patch CVE-2018-14614-f2fs-fix-to-do-sanity-check-with-cp_pack_start_sum.patch patch CVE-2018-18690-xfs-don-t-fail-when-converting-shortform-attr-to-lon.patch patch CVE-2018-19407-KVM-X86-Fix-scan-ioapic-use-before-initialization.patch +#CVEs fixed in 4.14.87: +patch CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch diff --git a/patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch b/patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch new file mode 100644 index 0000000..0d02d22 --- /dev/null +++ b/patches/cve/CVE-2018-18397-userfaultfd-use-ENOENT-instead-of-EFAULT-if-the-atom.patch @@ -0,0 +1,118 @@ +From 82c5a8c0debac552750a00b4fc7551c89c7b34b8 Mon Sep 17 00:00:00 2001 +From: Andrea Arcangeli +Date: Fri, 30 Nov 2018 14:09:25 -0800 +Subject: [PATCH] userfaultfd: use ENOENT instead of EFAULT if the atomic copy + user fails + +commit 9e368259ad988356c4c95150fafd1a06af095d98 upstream. + +Patch series "userfaultfd shmem updates". + +Jann found two bugs in the userfaultfd shmem MAP_SHARED backend: the +lack of the VM_MAYWRITE check and the lack of i_size checks. + +Then looking into the above we also fixed the MAP_PRIVATE case. + +Hugh by source review also found a data loss source if UFFDIO_COPY is +used on shmem MAP_SHARED PROT_READ mappings (the production usages +incidentally run with PROT_READ|PROT_WRITE, so the data loss couldn't +happen in those production usages like with QEMU). + +The whole patchset is marked for stable. + +We verified QEMU postcopy live migration with guest running on shmem +MAP_PRIVATE run as well as before after the fix of shmem MAP_PRIVATE. +Regardless if it's shmem or hugetlbfs or MAP_PRIVATE or MAP_SHARED, QEMU +unconditionally invokes a punch hole if the guest mapping is filebacked +and a MADV_DONTNEED too (needed to get rid of the MAP_PRIVATE COWs and +for the anon backend). + +This patch (of 5): + +We internally used EFAULT to communicate with the caller, switch to +ENOENT, so EFAULT can be used as a non internal retval. + +CVE: CVE-2018-18397 +Upstream-Status: Backport + +Link: http://lkml.kernel.org/r/20181126173452.26955-2-aarcange@redhat.com +Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for userfaultfd support") +Signed-off-by: Andrea Arcangeli +Reviewed-by: Mike Rapoport +Reviewed-by: Hugh Dickins +Cc: Mike Kravetz +Cc: Jann Horn +Cc: Peter Xu +Cc: "Dr. David Alan Gilbert" +Cc: +Cc: stable@vger.kernel.org +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Andreas Wellving +--- + mm/hugetlb.c | 2 +- + mm/shmem.c | 2 +- + mm/userfaultfd.c | 6 +++--- + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index f46040aed2da..224cdd953a79 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4037,7 +4037,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, + + /* fallback to copy_from_user outside mmap_sem */ + if (unlikely(ret)) { +- ret = -EFAULT; ++ ret = -ENOENT; + *pagep = page; + /* don't free the page */ + goto out; +diff --git a/mm/shmem.c b/mm/shmem.c +index ab7ff0aeae2d..9f856ecda73b 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2266,7 +2266,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, + *pagep = page; + shmem_inode_unacct_blocks(inode, 1); + /* don't free the page */ +- return -EFAULT; ++ return -ENOENT; + } + } else { /* mfill_zeropage_atomic */ + clear_highpage(page); +diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c +index 81192701964d..c63c0fc5ecfa 100644 +--- a/mm/userfaultfd.c ++++ b/mm/userfaultfd.c +@@ -49,7 +49,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, + + /* fallback to copy_from_user outside mmap_sem */ + if (unlikely(ret)) { +- ret = -EFAULT; ++ ret = -ENOENT; + *pagep = page; + /* don't free the page */ + goto out; +@@ -275,7 +275,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, + + cond_resched(); + +- if (unlikely(err == -EFAULT)) { ++ if (unlikely(err == -ENOENT)) { + up_read(&dst_mm->mmap_sem); + BUG_ON(!page); + +@@ -521,7 +521,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, + src_addr, &page, zeropage); + cond_resched(); + +- if (unlikely(err == -EFAULT)) { ++ if (unlikely(err == -ENOENT)) { + void *page_kaddr; + + up_read(&dst_mm->mmap_sem); +-- +2.19.2 + -- cgit v1.2.3-54-g00ecf