summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0128-SHM_UNLOCK-fix-long-unpreemptible-section.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0128-SHM_UNLOCK-fix-long-unpreemptible-section.patch')
-rw-r--r--recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0128-SHM_UNLOCK-fix-long-unpreemptible-section.patch186
1 files changed, 186 insertions, 0 deletions
diff --git a/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0128-SHM_UNLOCK-fix-long-unpreemptible-section.patch b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0128-SHM_UNLOCK-fix-long-unpreemptible-section.patch
new file mode 100644
index 00000000..f6c0efd2
--- /dev/null
+++ b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0128-SHM_UNLOCK-fix-long-unpreemptible-section.patch
@@ -0,0 +1,186 @@
1From 1dc1d2ffffa4b2e00e43573abdb5db9ad08ce53f Mon Sep 17 00:00:00 2001
2From: Hugh Dickins <hughd@google.com>
3Date: Fri, 20 Jan 2012 14:34:19 -0800
4Subject: [PATCH 128/130] SHM_UNLOCK: fix long unpreemptible section
5
6commit 85046579bde15e532983438f86b36856e358f417 upstream.
7
8scan_mapping_unevictable_pages() is used to make SysV SHM_LOCKed pages
9evictable again once the shared memory is unlocked. It does this with
10pagevec_lookup()s across the whole object (which might occupy most of
11memory), and takes 300ms to unlock 7GB here. A cond_resched() every
12PAGEVEC_SIZE pages would be good.
13
14However, KOSAKI-san points out that this is called under shmem.c's
15info->lock, and it's also under shm.c's shm_lock(), both spinlocks.
16There is no strong reason for that: we need to take these pages off the
17unevictable list soonish, but those locks are not required for it.
18
19So move the call to scan_mapping_unevictable_pages() from shmem.c's
20unlock handling up to shm.c's unlock handling. Remove the recently
21added barrier, not needed now we have spin_unlock() before the scan.
22
23Use get_file(), with subsequent fput(), to make sure we have a reference
24to mapping throughout scan_mapping_unevictable_pages(): that's something
25that was previously guaranteed by the shm_lock().
26
27Remove shmctl's lru_add_drain_all(): we don't fault in pages at SHM_LOCK
28time, and we lazily discover them to be Unevictable later, so it serves
29no purpose for SHM_LOCK; and serves no purpose for SHM_UNLOCK, since
30pages still on pagevec are not marked Unevictable.
31
32The original code avoided redundant rescans by checking VM_LOCKED flag
33at its level: now avoid them by checking shp's SHM_LOCKED.
34
35The original code called scan_mapping_unevictable_pages() on a locked
36area at shm_destroy() time: perhaps we once had accounting cross-checks
37which required that, but not now, so skip the overhead and just let
38inode eviction deal with them.
39
40Put check_move_unevictable_page() and scan_mapping_unevictable_pages()
41under CONFIG_SHMEM (with stub for the TINY case when ramfs is used),
42more as comment than to save space; comment them used for SHM_UNLOCK.
43
44Signed-off-by: Hugh Dickins <hughd@google.com>
45Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
46Cc: Minchan Kim <minchan.kim@gmail.com>
47Cc: Rik van Riel <riel@redhat.com>
48Cc: Shaohua Li <shaohua.li@intel.com>
49Cc: Eric Dumazet <eric.dumazet@gmail.com>
50Cc: Johannes Weiner <hannes@cmpxchg.org>
51Cc: Michel Lespinasse <walken@google.com>
52Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
53Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
54Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
55---
56 ipc/shm.c | 37 ++++++++++++++++++++++---------------
57 mm/shmem.c | 7 -------
58 mm/vmscan.c | 12 +++++++++++-
59 3 files changed, 33 insertions(+), 23 deletions(-)
60
61diff --git a/ipc/shm.c b/ipc/shm.c
62index 02ecf2c..854ab58 100644
63--- a/ipc/shm.c
64+++ b/ipc/shm.c
65@@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
66 case SHM_LOCK:
67 case SHM_UNLOCK:
68 {
69- struct file *uninitialized_var(shm_file);
70-
71- lru_add_drain_all(); /* drain pagevecs to lru lists */
72+ struct file *shm_file;
73
74 shp = shm_lock_check(ns, shmid);
75 if (IS_ERR(shp)) {
76@@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
77 err = security_shm_shmctl(shp, cmd);
78 if (err)
79 goto out_unlock;
80-
81- if(cmd==SHM_LOCK) {
82+
83+ shm_file = shp->shm_file;
84+ if (is_file_hugepages(shm_file))
85+ goto out_unlock;
86+
87+ if (cmd == SHM_LOCK) {
88 struct user_struct *user = current_user();
89- if (!is_file_hugepages(shp->shm_file)) {
90- err = shmem_lock(shp->shm_file, 1, user);
91- if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
92- shp->shm_perm.mode |= SHM_LOCKED;
93- shp->mlock_user = user;
94- }
95+ err = shmem_lock(shm_file, 1, user);
96+ if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
97+ shp->shm_perm.mode |= SHM_LOCKED;
98+ shp->mlock_user = user;
99 }
100- } else if (!is_file_hugepages(shp->shm_file)) {
101- shmem_lock(shp->shm_file, 0, shp->mlock_user);
102- shp->shm_perm.mode &= ~SHM_LOCKED;
103- shp->mlock_user = NULL;
104+ goto out_unlock;
105 }
106+
107+ /* SHM_UNLOCK */
108+ if (!(shp->shm_perm.mode & SHM_LOCKED))
109+ goto out_unlock;
110+ shmem_lock(shm_file, 0, shp->mlock_user);
111+ shp->shm_perm.mode &= ~SHM_LOCKED;
112+ shp->mlock_user = NULL;
113+ get_file(shm_file);
114 shm_unlock(shp);
115+ scan_mapping_unevictable_pages(shm_file->f_mapping);
116+ fput(shm_file);
117 goto out;
118 }
119 case IPC_RMID:
120diff --git a/mm/shmem.c b/mm/shmem.c
121index d672250..cc6d40b2 100644
122--- a/mm/shmem.c
123+++ b/mm/shmem.c
124@@ -1068,13 +1068,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
125 user_shm_unlock(inode->i_size, user);
126 info->flags &= ~VM_LOCKED;
127 mapping_clear_unevictable(file->f_mapping);
128- /*
129- * Ensure that a racing putback_lru_page() can see
130- * the pages of this mapping are evictable when we
131- * skip them due to !PageLRU during the scan.
132- */
133- smp_mb__after_clear_bit();
134- scan_mapping_unevictable_pages(file->f_mapping);
135 }
136 retval = 0;
137
138diff --git a/mm/vmscan.c b/mm/vmscan.c
139index f54a05b..824676a 100644
140--- a/mm/vmscan.c
141+++ b/mm/vmscan.c
142@@ -3353,6 +3353,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
143 return 1;
144 }
145
146+#ifdef CONFIG_SHMEM
147 /**
148 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
149 * @page: page to check evictability and move to appropriate lru list
150@@ -3363,6 +3364,8 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
151 *
152 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
153 * have PageUnevictable set.
154+ *
155+ * This function is only used for SysV IPC SHM_UNLOCK.
156 */
157 static void check_move_unevictable_page(struct page *page, struct zone *zone)
158 {
159@@ -3396,6 +3399,8 @@ retry:
160 *
161 * Scan all pages in mapping. Check unevictable pages for
162 * evictability and move them to the appropriate zone lru list.
163+ *
164+ * This function is only used for SysV IPC SHM_UNLOCK.
165 */
166 void scan_mapping_unevictable_pages(struct address_space *mapping)
167 {
168@@ -3441,9 +3446,14 @@ void scan_mapping_unevictable_pages(struct address_space *mapping)
169 pagevec_release(&pvec);
170
171 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
172+ cond_resched();
173 }
174-
175 }
176+#else
177+void scan_mapping_unevictable_pages(struct address_space *mapping)
178+{
179+}
180+#endif /* CONFIG_SHMEM */
181
182 static void warn_scan_unevictable_pages(void)
183 {
184--
1851.7.7.4
186