summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.17/0130-sched-Fix-nohz-load-accounting-again.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.17/0130-sched-Fix-nohz-load-accounting-again.patch')
-rw-r--r--recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.17/0130-sched-Fix-nohz-load-accounting-again.patch133
1 files changed, 133 insertions, 0 deletions
diff --git a/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.17/0130-sched-Fix-nohz-load-accounting-again.patch b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.17/0130-sched-Fix-nohz-load-accounting-again.patch
new file mode 100644
index 00000000..44a254b9
--- /dev/null
+++ b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.17/0130-sched-Fix-nohz-load-accounting-again.patch
@@ -0,0 +1,133 @@
1From 01347c8bd017f3c031bc472db23953f9695fd65c Mon Sep 17 00:00:00 2001
2From: Peter Zijlstra <peterz@infradead.org>
3Date: Thu, 1 Mar 2012 15:04:46 +0100
4Subject: [PATCH 130/165] sched: Fix nohz load accounting -- again!
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9commit c308b56b5398779cd3da0f62ab26b0453494c3d4 upstream.
10
11Various people reported nohz load tracking still being wrecked, but Doug
12spotted the actual problem. We fold the nohz remainder in too soon,
13causing us to loose samples and under-account.
14
15So instead of playing catch-up up-front, always do a single load-fold
16with whatever state we encounter and only then fold the nohz remainder
17and play catch-up.
18
19Reported-by: Doug Smythies <dsmythies@telus.net>
20Reported-by: LesÃ…=82aw Kope=C4=87 <leslaw.kopec@nasza-klasa.pl>
21Reported-by: Aman Gupta <aman@tmm1.net>
22Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
23Link: http://lkml.kernel.org/n/tip-4v31etnhgg9kwd6ocgx3rxl8@git.kernel.org
24Signed-off-by: Ingo Molnar <mingo@elte.hu>
25[bwh: Backported to 3.2: change filename]
26Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
27---
28 kernel/sched.c | 53 ++++++++++++++++++++++++++---------------------------
29 1 files changed, 26 insertions(+), 27 deletions(-)
30
31diff --git a/kernel/sched.c b/kernel/sched.c
32index 106a3b8..299f55c 100644
33--- a/kernel/sched.c
34+++ b/kernel/sched.c
35@@ -3538,13 +3538,10 @@ calc_load_n(unsigned long load, unsigned long exp,
36 * Once we've updated the global active value, we need to apply the exponential
37 * weights adjusted to the number of cycles missed.
38 */
39-static void calc_global_nohz(unsigned long ticks)
40+static void calc_global_nohz(void)
41 {
42 long delta, active, n;
43
44- if (time_before(jiffies, calc_load_update))
45- return;
46-
47 /*
48 * If we crossed a calc_load_update boundary, make sure to fold
49 * any pending idle changes, the respective CPUs might have
50@@ -3556,31 +3553,25 @@ static void calc_global_nohz(unsigned long ticks)
51 atomic_long_add(delta, &calc_load_tasks);
52
53 /*
54- * If we were idle for multiple load cycles, apply them.
55+ * It could be the one fold was all it took, we done!
56 */
57- if (ticks >= LOAD_FREQ) {
58- n = ticks / LOAD_FREQ;
59+ if (time_before(jiffies, calc_load_update + 10))
60+ return;
61
62- active = atomic_long_read(&calc_load_tasks);
63- active = active > 0 ? active * FIXED_1 : 0;
64+ /*
65+ * Catch-up, fold however many we are behind still
66+ */
67+ delta = jiffies - calc_load_update - 10;
68+ n = 1 + (delta / LOAD_FREQ);
69
70- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
71- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
72- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
73+ active = atomic_long_read(&calc_load_tasks);
74+ active = active > 0 ? active * FIXED_1 : 0;
75
76- calc_load_update += n * LOAD_FREQ;
77- }
78+ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
79+ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
80+ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
81
82- /*
83- * Its possible the remainder of the above division also crosses
84- * a LOAD_FREQ period, the regular check in calc_global_load()
85- * which comes after this will take care of that.
86- *
87- * Consider us being 11 ticks before a cycle completion, and us
88- * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
89- * age us 4 cycles, and the test in calc_global_load() will
90- * pick up the final one.
91- */
92+ calc_load_update += n * LOAD_FREQ;
93 }
94 #else
95 static void calc_load_account_idle(struct rq *this_rq)
96@@ -3592,7 +3583,7 @@ static inline long calc_load_fold_idle(void)
97 return 0;
98 }
99
100-static void calc_global_nohz(unsigned long ticks)
101+static void calc_global_nohz(void)
102 {
103 }
104 #endif
105@@ -3620,8 +3611,6 @@ void calc_global_load(unsigned long ticks)
106 {
107 long active;
108
109- calc_global_nohz(ticks);
110-
111 if (time_before(jiffies, calc_load_update + 10))
112 return;
113
114@@ -3633,6 +3622,16 @@ void calc_global_load(unsigned long ticks)
115 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
116
117 calc_load_update += LOAD_FREQ;
118+
119+ /*
120+ * Account one period with whatever state we found before
121+ * folding in the nohz state and ageing the entire idle period.
122+ *
123+ * This avoids loosing a sample when we go idle between
124+ * calc_load_account_active() (10 ticks ago) and now and thus
125+ * under-accounting.
126+ */
127+ calc_global_nohz();
128 }
129
130 /*
131--
1321.7.7.6
133