summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.24/0049-PM-Hibernate-Hibernate-thaw-fixes-improvements.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.24/0049-PM-Hibernate-Hibernate-thaw-fixes-improvements.patch')
-rw-r--r--recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.24/0049-PM-Hibernate-Hibernate-thaw-fixes-improvements.patch178
1 files changed, 178 insertions, 0 deletions
diff --git a/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.24/0049-PM-Hibernate-Hibernate-thaw-fixes-improvements.patch b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.24/0049-PM-Hibernate-Hibernate-thaw-fixes-improvements.patch
new file mode 100644
index 00000000..b0b708b6
--- /dev/null
+++ b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.24/0049-PM-Hibernate-Hibernate-thaw-fixes-improvements.patch
@@ -0,0 +1,178 @@
1From b6807062ada796cdfde2c0f5ca59390b0c916aae Mon Sep 17 00:00:00 2001
2From: Bojan Smojver <bojan@rexursive.com>
3Date: Sun, 29 Apr 2012 22:42:06 +0200
4Subject: [PATCH 049/109] PM / Hibernate: Hibernate/thaw fixes/improvements
5
6commit 5a21d489fd9541a4a66b9a500659abaca1b19a51 upstream.
7
8 1. Do not allocate memory for buffers from emergency pools, unless
9 absolutely required. Do not warn about and do not retry non-essential
10 failed allocations.
11
12 2. Do not check the amount of free pages left on every single page
13 write, but wait until one map is completely populated and then check.
14
15 3. Set maximum number of pages for read buffering consistently, instead
16 of inadvertently depending on the size of the sector type.
17
18 4. Fix copyright line, which I missed when I submitted the hibernation
19 threading patch.
20
21 5. Dispense with bit shifting arithmetic to improve readability.
22
23 6. Really recalculate the number of pages required to be free after all
24 allocations have been done.
25
26 7. Fix calculation of pages required for read buffering. Only count in
27 pages that do not belong to high memory.
28
29Signed-off-by: Bojan Smojver <bojan@rexursive.com>
30Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
31Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
32---
33 kernel/power/swap.c | 62 ++++++++++++++++++++++++++++++++-------------------
34 1 files changed, 39 insertions(+), 23 deletions(-)
35
36diff --git a/kernel/power/swap.c b/kernel/power/swap.c
37index b313086..64f8f97 100644
38--- a/kernel/power/swap.c
39+++ b/kernel/power/swap.c
40@@ -6,7 +6,7 @@
41 *
42 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
43 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
44- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
45+ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
46 *
47 * This file is released under the GPLv2.
48 *
49@@ -283,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
50 return -ENOSPC;
51
52 if (bio_chain) {
53- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
54+ src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
55+ __GFP_NORETRY);
56 if (src) {
57 copy_page(src, buf);
58 } else {
59 ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
60 if (ret)
61 return ret;
62- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
63+ src = (void *)__get_free_page(__GFP_WAIT |
64+ __GFP_NOWARN |
65+ __GFP_NORETRY);
66 if (src) {
67 copy_page(src, buf);
68 } else {
69@@ -368,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
70 clear_page(handle->cur);
71 handle->cur_swap = offset;
72 handle->k = 0;
73- }
74- if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
75- error = hib_wait_on_bio_chain(bio_chain);
76- if (error)
77- goto out;
78- handle->reqd_free_pages = reqd_free_pages();
79+
80+ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
81+ error = hib_wait_on_bio_chain(bio_chain);
82+ if (error)
83+ goto out;
84+ /*
85+ * Recalculate the number of required free pages, to
86+ * make sure we never take more than half.
87+ */
88+ handle->reqd_free_pages = reqd_free_pages();
89+ }
90 }
91 out:
92 return error;
93@@ -420,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
94 /* Maximum number of threads for compression/decompression. */
95 #define LZO_THREADS 3
96
97-/* Maximum number of pages for read buffering. */
98-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
99+/* Minimum/maximum number of pages for read buffering. */
100+#define LZO_MIN_RD_PAGES 1024
101+#define LZO_MAX_RD_PAGES 8192
102
103
104 /**
105@@ -632,12 +641,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
106 }
107
108 /*
109- * Adjust number of free pages after all allocations have been done.
110- * We don't want to run out of pages when writing.
111- */
112- handle->reqd_free_pages = reqd_free_pages();
113-
114- /*
115 * Start the CRC32 thread.
116 */
117 init_waitqueue_head(&crc->go);
118@@ -658,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
119 goto out_clean;
120 }
121
122+ /*
123+ * Adjust the number of required free pages after all allocations have
124+ * been done. We don't want to run out of pages when writing.
125+ */
126+ handle->reqd_free_pages = reqd_free_pages();
127+
128 printk(KERN_INFO
129 "PM: Using %u thread(s) for compression.\n"
130 "PM: Compressing and saving image data (%u pages) ... ",
131@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
132 unsigned i, thr, run_threads, nr_threads;
133 unsigned ring = 0, pg = 0, ring_size = 0,
134 have = 0, want, need, asked = 0;
135- unsigned long read_pages;
136+ unsigned long read_pages = 0;
137 unsigned char **page = NULL;
138 struct dec_data *data = NULL;
139 struct crc_data *crc = NULL;
140@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
141 nr_threads = num_online_cpus() - 1;
142 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
143
144- page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
145+ page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
146 if (!page) {
147 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
148 ret = -ENOMEM;
149@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
150 }
151
152 /*
153- * Adjust number of pages for read buffering, in case we are short.
154+ * Set the number of pages for read buffering.
155+ * This is complete guesswork, because we'll only know the real
156+ * picture once prepare_image() is called, which is much later on
157+ * during the image load phase. We'll assume the worst case and
158+ * say that none of the image pages are from high memory.
159 */
160- read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
161- read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
162+ if (low_free_pages() > snapshot_get_image_size())
163+ read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
164+ read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
165
166 for (i = 0; i < read_pages; i++) {
167 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
168 __GFP_WAIT | __GFP_HIGH :
169- __GFP_WAIT);
170+ __GFP_WAIT | __GFP_NOWARN |
171+ __GFP_NORETRY);
172+
173 if (!page[i]) {
174 if (i < LZO_CMP_PAGES) {
175 ring_size = i;
176--
1771.7.7.6
178