summaryrefslogtreecommitdiffstats
path: root/patches/boot_time_opt_guest/0154-sysctl-vm-Fine-grained-cache-shrinking.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/boot_time_opt_guest/0154-sysctl-vm-Fine-grained-cache-shrinking.patch')
-rw-r--r--patches/boot_time_opt_guest/0154-sysctl-vm-Fine-grained-cache-shrinking.patch18
1 files changed, 9 insertions, 9 deletions
diff --git a/patches/boot_time_opt_guest/0154-sysctl-vm-Fine-grained-cache-shrinking.patch b/patches/boot_time_opt_guest/0154-sysctl-vm-Fine-grained-cache-shrinking.patch
index 07d4a83..a5d2b29 100644
--- a/patches/boot_time_opt_guest/0154-sysctl-vm-Fine-grained-cache-shrinking.patch
+++ b/patches/boot_time_opt_guest/0154-sysctl-vm-Fine-grained-cache-shrinking.patch
@@ -1,4 +1,4 @@
1From 2c145b5233b504f5226a0f4bc44baeef33b444d8 Mon Sep 17 00:00:00 2001 1From c7d8564d5d34c615e5ab03aa1e270888f49ff8b5 Mon Sep 17 00:00:00 2001
2From: Sebastien Boeuf <sebastien.boeuf@intel.com> 2From: Sebastien Boeuf <sebastien.boeuf@intel.com>
3Date: Mon, 23 Jan 2017 15:32:39 -0800 3Date: Mon, 23 Jan 2017 15:32:39 -0800
4Subject: [PATCH 154/154] sysctl: vm: Fine-grained cache shrinking 4Subject: [PATCH 154/154] sysctl: vm: Fine-grained cache shrinking
@@ -79,10 +79,10 @@ index d72d52b90433..f564dfcc13a4 100644
79+ return 0; 79+ return 0;
80+} 80+}
81diff --git a/include/linux/mm.h b/include/linux/mm.h 81diff --git a/include/linux/mm.h b/include/linux/mm.h
82index 833f23d98baa..0bb66c1c31c9 100644 82index d8bcf5c4b996..9a1fc3cecac8 100644
83--- a/include/linux/mm.h 83--- a/include/linux/mm.h
84+++ b/include/linux/mm.h 84+++ b/include/linux/mm.h
85@@ -2308,6 +2308,10 @@ extern int kvm_ret_mem_advice; 85@@ -2411,6 +2411,10 @@ extern int kvm_ret_mem_advice;
86 int kvm_madv_instant_free_sysctl_handler(struct ctl_table *table, int write, 86 int kvm_madv_instant_free_sysctl_handler(struct ctl_table *table, int write,
87 void __user *buffer, size_t *length, 87 void __user *buffer, size_t *length,
88 loff_t *ppos); 88 loff_t *ppos);
@@ -94,10 +94,10 @@ index 833f23d98baa..0bb66c1c31c9 100644
94 94
95 void drop_slab(void); 95 void drop_slab(void);
96diff --git a/kernel/sysctl.c b/kernel/sysctl.c 96diff --git a/kernel/sysctl.c b/kernel/sysctl.c
97index d8ae774fa042..5dc9a46ae212 100644 97index 771a930cadfa..3bdd8030b7af 100644
98--- a/kernel/sysctl.c 98--- a/kernel/sysctl.c
99+++ b/kernel/sysctl.c 99+++ b/kernel/sysctl.c
100@@ -1405,6 +1405,14 @@ static struct ctl_table vm_table[] = { 100@@ -1394,6 +1394,14 @@ static struct ctl_table vm_table[] = {
101 .mode = 0644, 101 .mode = 0644,
102 .proc_handler = kvm_madv_instant_free_sysctl_handler, 102 .proc_handler = kvm_madv_instant_free_sysctl_handler,
103 }, 103 },
@@ -113,10 +113,10 @@ index d8ae774fa042..5dc9a46ae212 100644
113 { 113 {
114 .procname = "compact_memory", 114 .procname = "compact_memory",
115diff --git a/mm/vmscan.c b/mm/vmscan.c 115diff --git a/mm/vmscan.c b/mm/vmscan.c
116index 30a88b945a44..1198e74d1860 100644 116index 8ad39bbc79e6..d977e489d7f1 100644
117--- a/mm/vmscan.c 117--- a/mm/vmscan.c
118+++ b/mm/vmscan.c 118+++ b/mm/vmscan.c
119@@ -3525,7 +3525,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 119@@ -3574,7 +3574,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
120 wake_up_interruptible(&pgdat->kswapd_wait); 120 wake_up_interruptible(&pgdat->kswapd_wait);
121 } 121 }
122 122
@@ -124,7 +124,7 @@ index 30a88b945a44..1198e74d1860 100644
124 /* 124 /*
125 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 125 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
126 * freed pages. 126 * freed pages.
127@@ -3564,7 +3563,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 127@@ -3614,7 +3613,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
128 128
129 return nr_reclaimed; 129 return nr_reclaimed;
130 } 130 }
@@ -133,5 +133,5 @@ index 30a88b945a44..1198e74d1860 100644
133 /* It's optimal to keep kswapds on the same CPUs as their memory, but 133 /* It's optimal to keep kswapds on the same CPUs as their memory, but
134 not required for correctness. So if the last cpu in a node goes 134 not required for correctness. So if the last cpu in a node goes
135-- 135--
1362.12.1 1362.13.2
137 137