From 6203cbca5c9df9095882544f1eaa370a9cbd364a Mon Sep 17 00:00:00 2001 From: Koen Kooi Date: Wed, 8 Aug 2012 10:41:23 +0200 Subject: linux-ti33x-psp 3.2: update to 3.2.25 Signed-off-by: Koen Kooi Signed-off-by: Denys Dmytriyenko --- ...8-sched-nohz-Fix-rq-cpu_load-calculations.patch | 137 +++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.25/0018-sched-nohz-Fix-rq-cpu_load-calculations.patch (limited to 'recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.25/0018-sched-nohz-Fix-rq-cpu_load-calculations.patch') diff --git a/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.25/0018-sched-nohz-Fix-rq-cpu_load-calculations.patch b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.25/0018-sched-nohz-Fix-rq-cpu_load-calculations.patch new file mode 100644 index 00000000..6548d426 --- /dev/null +++ b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.25/0018-sched-nohz-Fix-rq-cpu_load-calculations.patch @@ -0,0 +1,137 @@ +From af56d9e56ec0729f6aa8c3a51b9bddbdcd8dfcf0 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Fri, 11 May 2012 17:31:26 +0200 +Subject: [PATCH 18/73] sched/nohz: Fix rq->cpu_load[] calculations + +commit 556061b00c9f2fd6a5524b6bde823ef12f299ecf upstream. + +While investigating why the load-balancer did funny I found that the +rq->cpu_load[] tables were completely screwy.. a bit more digging +revealed that the updates that got through were missing ticks followed +by a catchup of 2 ticks. + +The catchup assumes the cpu was idle during that time (since only nohz +can cause missed ticks and the machine is idle etc..) this means that +esp. the higher indices were significantly lower than they ought to +be. + +The reason for this is that its not correct to compare against jiffies +on every jiffy on any other cpu than the cpu that updates jiffies. + +This patch cludges around it by only doing the catch-up stuff from +nohz_idle_balance() and doing the regular stuff unconditionally from +the tick. + +Signed-off-by: Peter Zijlstra +Cc: pjt@google.com +Cc: Venkatesh Pallipadi +Link: http://lkml.kernel.org/n/tip-tp4kj18xdd5aj4vvj0qg55s2@git.kernel.org +Signed-off-by: Ingo Molnar +[bwh: Backported to 3.2: adjust filenames and context; keep functions static] +Signed-off-by: Ben Hutchings +--- + kernel/sched.c | 53 ++++++++++++++++++++++++++++++++++++++------------ + kernel/sched_fair.c | 2 +- + 2 files changed, 41 insertions(+), 14 deletions(-) + +diff --git a/kernel/sched.c b/kernel/sched.c +index 52ac69b..a409d81 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -1887,7 +1887,7 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) + + static void update_sysctl(void); + static int get_update_sysctl_factor(void); +-static void update_cpu_load(struct rq *this_rq); ++static void update_idle_cpu_load(struct rq *this_rq); + + static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) + { +@@ -3855,22 +3855,13 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) + * scheduler tick (TICK_NSEC). With tickless idle this will not be called + * every tick. We fix it up based on jiffies. + */ +-static void update_cpu_load(struct rq *this_rq) ++static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, ++ unsigned long pending_updates) + { +- unsigned long this_load = this_rq->load.weight; +- unsigned long curr_jiffies = jiffies; +- unsigned long pending_updates; + int i, scale; + + this_rq->nr_load_updates++; + +- /* Avoid repeated calls on same jiffy, when moving in and out of idle */ +- if (curr_jiffies == this_rq->last_load_update_tick) +- return; +- +- pending_updates = curr_jiffies - this_rq->last_load_update_tick; +- this_rq->last_load_update_tick = curr_jiffies; +- + /* Update our load: */ + this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ + for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { +@@ -3895,9 +3886,45 @@ static void update_cpu_load(struct rq *this_rq) + sched_avg_update(this_rq); + } + ++/* ++ * Called from nohz_idle_balance() to update the load ratings before doing the ++ * idle balance. ++ */ ++static void update_idle_cpu_load(struct rq *this_rq) ++{ ++ unsigned long curr_jiffies = jiffies; ++ unsigned long load = this_rq->load.weight; ++ unsigned long pending_updates; ++ ++ /* ++ * Bloody broken means of dealing with nohz, but better than nothing.. ++ * jiffies is updated by one cpu, another cpu can drift wrt the jiffy ++ * update and see 0 difference the one time and 2 the next, even though ++ * we ticked at roughtly the same rate. ++ * ++ * Hence we only use this from nohz_idle_balance() and skip this ++ * nonsense when called from the scheduler_tick() since that's ++ * guaranteed a stable rate. ++ */ ++ if (load || curr_jiffies == this_rq->last_load_update_tick) ++ return; ++ ++ pending_updates = curr_jiffies - this_rq->last_load_update_tick; ++ this_rq->last_load_update_tick = curr_jiffies; ++ ++ __update_cpu_load(this_rq, load, pending_updates); ++} ++ ++/* ++ * Called from scheduler_tick() ++ */ + static void update_cpu_load_active(struct rq *this_rq) + { +- update_cpu_load(this_rq); ++ /* ++ * See the mess in update_idle_cpu_load(). ++ */ ++ this_rq->last_load_update_tick = jiffies; ++ __update_cpu_load(this_rq, this_rq->load.weight, 1); + + calc_load_account_active(this_rq); + } +diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c +index 8a39fa3..66e4576 100644 +--- a/kernel/sched_fair.c ++++ b/kernel/sched_fair.c +@@ -4735,7 +4735,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) + + raw_spin_lock_irq(&this_rq->lock); + update_rq_clock(this_rq); +- update_cpu_load(this_rq); ++ update_idle_cpu_load(this_rq); + raw_spin_unlock_irq(&this_rq->lock); + + rebalance_domains(balance_cpu, CPU_IDLE); +-- +1.7.7.6 + -- cgit v1.2.3-54-g00ecf