summaryrefslogtreecommitdiffstats
path: root/recipes-kernel
diff options
context:
space:
mode:
authorSona Sarmadi <sona.sarmadi@enea.com>2015-02-17 12:38:44 +0100
committerZhenhua Luo <zhenhua.luo@freescale.com>2015-03-06 16:28:23 +0800
commit960cf177f385512036e14d21e845900949e39e8b (patch)
tree13d92a54faaa7556a9fa45c8d4b317d2f359843f /recipes-kernel
parent252c07db20a389027565ae7558b4ecdc4f9d74e9 (diff)
downloadmeta-fsl-ppc-960cf177f385512036e14d21e845900949e39e8b.tar.gz
mm/shmem: CVE-2014-4171
Fixes a denial of service flaw in the Linux kernel built with the shared memory suppor Reference: http://www.openwall.com/lists/oss-security/2014/06/18/11 http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-4171 Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
Diffstat (limited to 'recipes-kernel')
-rw-r--r--recipes-kernel/linux/files/0001-shmem-CVE-2014-4171.patch141
-rw-r--r--recipes-kernel/linux/files/0002-shmem-CVE-2014-4171.patch200
-rw-r--r--recipes-kernel/linux/files/0003-shmem-CVE-2014-4171.patch134
-rw-r--r--recipes-kernel/linux/linux-qoriq_3.12.bb3
4 files changed, 478 insertions, 0 deletions
diff --git a/recipes-kernel/linux/files/0001-shmem-CVE-2014-4171.patch b/recipes-kernel/linux/files/0001-shmem-CVE-2014-4171.patch
new file mode 100644
index 0000000..00ead60
--- /dev/null
+++ b/recipes-kernel/linux/files/0001-shmem-CVE-2014-4171.patch
@@ -0,0 +1,141 @@
1From 8685789bd8ec12a02b07ea76df4527b055efbf20 Mon Sep 17 00:00:00 2001
2From: Hugh Dickins <hughd@google.com>
3Date: Mon, 23 Jun 2014 13:22:06 -0700
4Subject: [PATCH 1/3] shmem: fix faulting into a hole while it's punched
5
6commit f00cdc6df7d7cfcabb5b740911e6788cb0802bdb upstream.
7
8Trinity finds that mmap access to a hole while it's punched from shmem
9can prevent the madvise(MADV_REMOVE) or fallocate(FALLOC_FL_PUNCH_HOLE)
10from completing, until the reader chooses to stop; with the puncher's
11hold on i_mutex locking out all other writers until it can complete.
12
13It appears that the tmpfs fault path is too light in comparison with its
14hole-punching path, lacking an i_data_sem to obstruct it; but we don't
15want to slow down the common case.
16
17Extend shmem_fallocate()'s existing range notification mechanism, so
18shmem_fault() can refrain from faulting pages into the hole while it's
19punched, waiting instead on i_mutex (when safe to sleep; or repeatedly
20faulting when not).
21
22Upstream-Status: Backport
23
24[akpm@linux-foundation.org: coding-style fixes]
25Signed-off-by: Hugh Dickins <hughd@google.com>
26Reported-by: Sasha Levin <sasha.levin@oracle.com>
27Tested-by: Sasha Levin <sasha.levin@oracle.com>
28Cc: Dave Jones <davej@redhat.com>
29Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
30Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
31
32Signed-off-by: Jiri Slaby <jslaby@suse.cz>
33Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
34---
35 mm/shmem.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
36 1 file changed, 52 insertions(+), 4 deletions(-)
37
38diff --git a/mm/shmem.c b/mm/shmem.c
39index 8297623..00d412f 100644
40--- a/mm/shmem.c
41+++ b/mm/shmem.c
42@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
43 #define SHORT_SYMLINK_LEN 128
44
45 /*
46- * shmem_fallocate and shmem_writepage communicate via inode->i_private
47- * (with i_mutex making sure that it has only one user at a time):
48- * we would prefer not to enlarge the shmem inode just for that.
49+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
50+ * inode->i_private (with i_mutex making sure that it has only one user at
51+ * a time): we would prefer not to enlarge the shmem inode just for that.
52 */
53 struct shmem_falloc {
54+ int mode; /* FALLOC_FL mode currently operating */
55 pgoff_t start; /* start of range currently being fallocated */
56 pgoff_t next; /* the next page offset to be fallocated */
57 pgoff_t nr_falloced; /* how many new pages have been fallocated */
58@@ -826,6 +827,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
59 spin_lock(&inode->i_lock);
60 shmem_falloc = inode->i_private;
61 if (shmem_falloc &&
62+ !shmem_falloc->mode &&
63 index >= shmem_falloc->start &&
64 index < shmem_falloc->next)
65 shmem_falloc->nr_unswapped++;
66@@ -1300,6 +1302,44 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
67 int error;
68 int ret = VM_FAULT_LOCKED;
69
70+ /*
71+ * Trinity finds that probing a hole which tmpfs is punching can
72+ * prevent the hole-punch from ever completing: which in turn
73+ * locks writers out with its hold on i_mutex. So refrain from
74+ * faulting pages into the hole while it's being punched, and
75+ * wait on i_mutex to be released if vmf->flags permits.
76+ */
77+ if (unlikely(inode->i_private)) {
78+ struct shmem_falloc *shmem_falloc;
79+
80+ spin_lock(&inode->i_lock);
81+ shmem_falloc = inode->i_private;
82+ if (!shmem_falloc ||
83+ shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
84+ vmf->pgoff < shmem_falloc->start ||
85+ vmf->pgoff >= shmem_falloc->next)
86+ shmem_falloc = NULL;
87+ spin_unlock(&inode->i_lock);
88+ /*
89+ * i_lock has protected us from taking shmem_falloc seriously
90+ * once return from shmem_fallocate() went back up that stack.
91+ * i_lock does not serialize with i_mutex at all, but it does
92+ * not matter if sometimes we wait unnecessarily, or sometimes
93+ * miss out on waiting: we just need to make those cases rare.
94+ */
95+ if (shmem_falloc) {
96+ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
97+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
98+ up_read(&vma->vm_mm->mmap_sem);
99+ mutex_lock(&inode->i_mutex);
100+ mutex_unlock(&inode->i_mutex);
101+ return VM_FAULT_RETRY;
102+ }
103+ /* cond_resched? Leave that to GUP or return to user */
104+ return VM_FAULT_NOPAGE;
105+ }
106+ }
107+
108 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
109 if (error)
110 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
111@@ -1815,18 +1855,26 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
112
113 mutex_lock(&inode->i_mutex);
114
115+ shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
116+
117 if (mode & FALLOC_FL_PUNCH_HOLE) {
118 struct address_space *mapping = file->f_mapping;
119 loff_t unmap_start = round_up(offset, PAGE_SIZE);
120 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
121
122+ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
123+ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
124+ spin_lock(&inode->i_lock);
125+ inode->i_private = &shmem_falloc;
126+ spin_unlock(&inode->i_lock);
127+
128 if ((u64)unmap_end > (u64)unmap_start)
129 unmap_mapping_range(mapping, unmap_start,
130 1 + unmap_end - unmap_start, 0);
131 shmem_truncate_range(inode, offset, offset + len - 1);
132 /* No need to unmap again: hole-punching leaves COWed pages */
133 error = 0;
134- goto out;
135+ goto undone;
136 }
137
138 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
139--
1401.9.1
141
diff --git a/recipes-kernel/linux/files/0002-shmem-CVE-2014-4171.patch b/recipes-kernel/linux/files/0002-shmem-CVE-2014-4171.patch
new file mode 100644
index 0000000..a43b895
--- /dev/null
+++ b/recipes-kernel/linux/files/0002-shmem-CVE-2014-4171.patch
@@ -0,0 +1,200 @@
1From 38d05809df1ea5272a658e7f4d5f2a3027ad2fd2 Mon Sep 17 00:00:00 2001
2From: Hugh Dickins <hughd@google.com>
3Date: Wed, 23 Jul 2014 14:00:10 -0700
4Subject: [PATCH 2/3] shmem: fix faulting into a hole, not taking i_mutex
5
6commit 8e205f779d1443a94b5ae81aa359cb535dd3021e upstream.
7
8Commit f00cdc6df7d7 ("shmem: fix faulting into a hole while it's
9punched") was buggy: Sasha sent a lockdep report to remind us that
10grabbing i_mutex in the fault path is a no-no (write syscall may already
11hold i_mutex while faulting user buffer).
12
13We tried a completely different approach (see following patch) but that
14proved inadequate: good enough for a rational workload, but not good
15enough against trinity - which forks off so many mappings of the object
16that contention on i_mmap_mutex while hole-puncher holds i_mutex builds
17into serious starvation when concurrent faults force the puncher to fall
18back to single-page unmap_mapping_range() searches of the i_mmap tree.
19
20So return to the original umbrella approach, but keep away from i_mutex
21this time. We really don't want to bloat every shmem inode with a new
22mutex or completion, just to protect this unlikely case from trinity.
23So extend the original with wait_queue_head on stack at the hole-punch
24end, and wait_queue item on the stack at the fault end.
25
26This involves further use of i_lock to guard against the races: lockdep
27has been happy so far, and I see fs/inode.c:unlock_new_inode() holds
28i_lock around wake_up_bit(), which is comparable to what we do here.
29i_lock is more convenient, but we could switch to shmem's info->lock.
30
31This issue has been tagged with CVE-2014-4171, which will require commit
32f00cdc6df7d7 and this and the following patch to be backported: we
33suggest to 3.1+, though in fact the trinity forkbomb effect might go
34back as far as 2.6.16, when madvise(,,MADV_REMOVE) came in - or might
35not, since much has changed, with i_mmap_mutex a spinlock before 3.0.
36Anyone running trinity on 3.0 and earlier? I don't think we need care.
37
38Upstream-Status: Backport
39
40Signed-off-by: Hugh Dickins <hughd@google.com>
41Reported-by: Sasha Levin <sasha.levin@oracle.com>
42Tested-by: Sasha Levin <sasha.levin@oracle.com>
43Cc: Vlastimil Babka <vbabka@suse.cz>
44Cc: Konstantin Khlebnikov <koct9i@gmail.com>
45Cc: Johannes Weiner <hannes@cmpxchg.org>
46Cc: Lukas Czerner <lczerner@redhat.com>
47Cc: Dave Jones <davej@redhat.com>
48Cc: <stable@vger.kernel.org> [3.1+]
49Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
50Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
51Signed-off-by: Jiri Slaby <jslaby@suse.cz>
52Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
53---
54 mm/shmem.c | 78 +++++++++++++++++++++++++++++++++++++++++---------------------
55 1 file changed, 52 insertions(+), 26 deletions(-)
56
57diff --git a/mm/shmem.c b/mm/shmem.c
58index 00d412f..6f5626f 100644
59--- a/mm/shmem.c
60+++ b/mm/shmem.c
61@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
62 * a time): we would prefer not to enlarge the shmem inode just for that.
63 */
64 struct shmem_falloc {
65- int mode; /* FALLOC_FL mode currently operating */
66+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
67 pgoff_t start; /* start of range currently being fallocated */
68 pgoff_t next; /* the next page offset to be fallocated */
69 pgoff_t nr_falloced; /* how many new pages have been fallocated */
70@@ -827,7 +827,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
71 spin_lock(&inode->i_lock);
72 shmem_falloc = inode->i_private;
73 if (shmem_falloc &&
74- !shmem_falloc->mode &&
75+ !shmem_falloc->waitq &&
76 index >= shmem_falloc->start &&
77 index < shmem_falloc->next)
78 shmem_falloc->nr_unswapped++;
79@@ -1306,38 +1306,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
80 * Trinity finds that probing a hole which tmpfs is punching can
81 * prevent the hole-punch from ever completing: which in turn
82 * locks writers out with its hold on i_mutex. So refrain from
83- * faulting pages into the hole while it's being punched, and
84- * wait on i_mutex to be released if vmf->flags permits.
85+ * faulting pages into the hole while it's being punched. Although
86+ * shmem_undo_range() does remove the additions, it may be unable to
87+ * keep up, as each new page needs its own unmap_mapping_range() call,
88+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
89+ *
90+ * It does not matter if we sometimes reach this check just before the
91+ * hole-punch begins, so that one fault then races with the punch:
92+ * we just need to make racing faults a rare case.
93+ *
94+ * The implementation below would be much simpler if we just used a
95+ * standard mutex or completion: but we cannot take i_mutex in fault,
96+ * and bloating every shmem inode for this unlikely case would be sad.
97 */
98 if (unlikely(inode->i_private)) {
99 struct shmem_falloc *shmem_falloc;
100
101 spin_lock(&inode->i_lock);
102 shmem_falloc = inode->i_private;
103- if (!shmem_falloc ||
104- shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
105- vmf->pgoff < shmem_falloc->start ||
106- vmf->pgoff >= shmem_falloc->next)
107- shmem_falloc = NULL;
108- spin_unlock(&inode->i_lock);
109- /*
110- * i_lock has protected us from taking shmem_falloc seriously
111- * once return from shmem_fallocate() went back up that stack.
112- * i_lock does not serialize with i_mutex at all, but it does
113- * not matter if sometimes we wait unnecessarily, or sometimes
114- * miss out on waiting: we just need to make those cases rare.
115- */
116- if (shmem_falloc) {
117+ if (shmem_falloc &&
118+ shmem_falloc->waitq &&
119+ vmf->pgoff >= shmem_falloc->start &&
120+ vmf->pgoff < shmem_falloc->next) {
121+ wait_queue_head_t *shmem_falloc_waitq;
122+ DEFINE_WAIT(shmem_fault_wait);
123+
124+ ret = VM_FAULT_NOPAGE;
125 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
126 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
127+ /* It's polite to up mmap_sem if we can */
128 up_read(&vma->vm_mm->mmap_sem);
129- mutex_lock(&inode->i_mutex);
130- mutex_unlock(&inode->i_mutex);
131- return VM_FAULT_RETRY;
132+ ret = VM_FAULT_RETRY;
133 }
134- /* cond_resched? Leave that to GUP or return to user */
135- return VM_FAULT_NOPAGE;
136+
137+ shmem_falloc_waitq = shmem_falloc->waitq;
138+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
139+ TASK_UNINTERRUPTIBLE);
140+ spin_unlock(&inode->i_lock);
141+ schedule();
142+
143+ /*
144+ * shmem_falloc_waitq points into the shmem_fallocate()
145+ * stack of the hole-punching task: shmem_falloc_waitq
146+ * is usually invalid by the time we reach here, but
147+ * finish_wait() does not dereference it in that case;
148+ * though i_lock needed lest racing with wake_up_all().
149+ */
150+ spin_lock(&inode->i_lock);
151+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
152+ spin_unlock(&inode->i_lock);
153+ return ret;
154 }
155+ spin_unlock(&inode->i_lock);
156 }
157
158 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
159@@ -1855,13 +1875,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
160
161 mutex_lock(&inode->i_mutex);
162
163- shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
164-
165 if (mode & FALLOC_FL_PUNCH_HOLE) {
166 struct address_space *mapping = file->f_mapping;
167 loff_t unmap_start = round_up(offset, PAGE_SIZE);
168 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
169+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
170
171+ shmem_falloc.waitq = &shmem_falloc_waitq;
172 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
173 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
174 spin_lock(&inode->i_lock);
175@@ -1873,8 +1893,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
176 1 + unmap_end - unmap_start, 0);
177 shmem_truncate_range(inode, offset, offset + len - 1);
178 /* No need to unmap again: hole-punching leaves COWed pages */
179+
180+ spin_lock(&inode->i_lock);
181+ inode->i_private = NULL;
182+ wake_up_all(&shmem_falloc_waitq);
183+ spin_unlock(&inode->i_lock);
184 error = 0;
185- goto undone;
186+ goto out;
187 }
188
189 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
190@@ -1890,6 +1915,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
191 goto out;
192 }
193
194+ shmem_falloc.waitq = NULL;
195 shmem_falloc.start = start;
196 shmem_falloc.next = start;
197 shmem_falloc.nr_falloced = 0;
198--
1991.9.1
200
diff --git a/recipes-kernel/linux/files/0003-shmem-CVE-2014-4171.patch b/recipes-kernel/linux/files/0003-shmem-CVE-2014-4171.patch
new file mode 100644
index 0000000..2b70ec1
--- /dev/null
+++ b/recipes-kernel/linux/files/0003-shmem-CVE-2014-4171.patch
@@ -0,0 +1,134 @@
1From a428dc008e435c5a36b1288fb5b8c4b58472e28c Mon Sep 17 00:00:00 2001
2From: Hugh Dickins <hughd@google.com>
3Date: Wed, 23 Jul 2014 14:00:13 -0700
4Subject: [PATCH 3/3] shmem: fix splicing from a hole while it's punched
5
6commit b1a366500bd537b50c3aad26dc7df083ec03a448 upstream.
7
8shmem_fault() is the actual culprit in trinity's hole-punch starvation,
9and the most significant cause of such problems: since a page faulted is
10one that then appears page_mapped(), needing unmap_mapping_range() and
11i_mmap_mutex to be unmapped again.
12
13But it is not the only way in which a page can be brought into a hole in
14the radix_tree while that hole is being punched; and Vlastimil's testing
15implies that if enough other processors are busy filling in the hole,
16then shmem_undo_range() can be kept from completing indefinitely.
17
18shmem_file_splice_read() is the main other user of SGP_CACHE, which can
19instantiate shmem pagecache pages in the read-only case (without holding
20i_mutex, so perhaps concurrently with a hole-punch). Probably it's
21silly not to use SGP_READ already (using the ZERO_PAGE for holes): which
22ought to be safe, but might bring surprises - not a change to be rushed.
23
24shmem_read_mapping_page_gfp() is an internal interface used by
25drivers/gpu/drm GEM (and next by uprobes): it should be okay. And
26shmem_file_read_iter() uses the SGP_DIRTY variant of SGP_CACHE, when
27called internally by the kernel (perhaps for a stacking filesystem,
28which might rely on holes to be reserved): it's unclear whether it could
29be provoked to keep hole-punch busy or not.
30
31We could apply the same umbrella as now used in shmem_fault() to
32shmem_file_splice_read() and the others; but it looks ugly, and use over
33a range raises questions - should it actually be per page? can these get
34starved themselves?
35
36The origin of this part of the problem is my v3.1 commit d0823576bf4b
37("mm: pincer in truncate_inode_pages_range"), once it was duplicated
38into shmem.c. It seemed like a nice idea at the time, to ensure
39(barring RCU lookup fuzziness) that there's an instant when the entire
40hole is empty; but the indefinitely repeated scans to ensure that make
41it vulnerable.
42
43Revert that "enhancement" to hole-punch from shmem_undo_range(), but
44retain the unproblematic rescanning when it's truncating; add a couple
45of comments there.
46
47Remove the "indices[0] >= end" test: that is now handled satisfactorily
48by the inner loop, and mem_cgroup_uncharge_start()/end() are too light
49to be worth avoiding here.
50
51But if we do not always loop indefinitely, we do need to handle the case
52of swap swizzled back to page before shmem_free_swap() gets it: add a
53retry for that case, as suggested by Konstantin Khlebnikov; and for the
54case of page swizzled back to swap, as suggested by Johannes Weiner.
55
56Upstream-Status: Backport
57
58Signed-off-by: Hugh Dickins <hughd@google.com>
59Reported-by: Sasha Levin <sasha.levin@oracle.com>
60Suggested-by: Vlastimil Babka <vbabka@suse.cz>
61Cc: Konstantin Khlebnikov <koct9i@gmail.com>
62Cc: Johannes Weiner <hannes@cmpxchg.org>
63Cc: Lukas Czerner <lczerner@redhat.com>
64Cc: Dave Jones <davej@redhat.com>
65Cc: <stable@vger.kernel.org> [3.1+]
66Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
67Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
68Signed-off-by: Jiri Slaby <jslaby@suse.cz>
69Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com>
70---
71 mm/shmem.c | 24 +++++++++++++++---------
72 1 file changed, 15 insertions(+), 9 deletions(-)
73
74diff --git a/mm/shmem.c b/mm/shmem.c
75index 6f5626f..0da81aa 100644
76--- a/mm/shmem.c
77+++ b/mm/shmem.c
78@@ -534,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
79 return;
80
81 index = start;
82- for ( ; ; ) {
83+ while (index < end) {
84 cond_resched();
85 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
86 min(end - index, (pgoff_t)PAGEVEC_SIZE),
87 pvec.pages, indices);
88 if (!pvec.nr) {
89- if (index == start || unfalloc)
90+ /* If all gone or hole-punch or unfalloc, we're done */
91+ if (index == start || end != -1)
92 break;
93+ /* But if truncating, restart to make sure all gone */
94 index = start;
95 continue;
96 }
97- if ((index == start || unfalloc) && indices[0] >= end) {
98- shmem_deswap_pagevec(&pvec);
99- pagevec_release(&pvec);
100- break;
101- }
102 mem_cgroup_uncharge_start();
103 for (i = 0; i < pagevec_count(&pvec); i++) {
104 struct page *page = pvec.pages[i];
105@@ -561,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
106 if (radix_tree_exceptional_entry(page)) {
107 if (unfalloc)
108 continue;
109- nr_swaps_freed += !shmem_free_swap(mapping,
110- index, page);
111+ if (shmem_free_swap(mapping, index, page)) {
112+ /* Swap was replaced by page: retry */
113+ index--;
114+ break;
115+ }
116+ nr_swaps_freed++;
117 continue;
118 }
119
120@@ -571,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
121 if (page->mapping == mapping) {
122 VM_BUG_ON(PageWriteback(page));
123 truncate_inode_page(mapping, page);
124+ } else {
125+ /* Page was replaced by swap: retry */
126+ unlock_page(page);
127+ index--;
128+ break;
129 }
130 }
131 unlock_page(page);
132--
1331.9.1
134
diff --git a/recipes-kernel/linux/linux-qoriq_3.12.bb b/recipes-kernel/linux/linux-qoriq_3.12.bb
index 7bf8b22..9727a73 100644
--- a/recipes-kernel/linux/linux-qoriq_3.12.bb
+++ b/recipes-kernel/linux/linux-qoriq_3.12.bb
@@ -31,6 +31,9 @@ SRC_URI = "git://git.freescale.com/ppc/sdk/linux.git;nobranch=1 \
31 file://0002-ALSA-CVE-2014-4656.patch \ 31 file://0002-ALSA-CVE-2014-4656.patch \
32 file://target-CVE-2014-4027.patch \ 32 file://target-CVE-2014-4027.patch \
33 file://mm-2014-3122.patch \ 33 file://mm-2014-3122.patch \
34 file://0001-shmem-CVE-2014-4171.patch \
35 file://0002-shmem-CVE-2014-4171.patch \
36 file://0003-shmem-CVE-2014-4171.patch \
34" 37"
35SRCREV = "6619b8b55796cdf0cec04b66a71288edd3057229" 38SRCREV = "6619b8b55796cdf0cec04b66a71288edd3057229"
36 39