summaryrefslogtreecommitdiffstats
path: root/patches/boot_time_opt/0111-overload-on-wakeup.patch
blob: d5125be25dbe5b6b1fac61999342dd84d3abf84d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
From cc7c761946c6b9fa820acc90d7514795af3f42f5 Mon Sep 17 00:00:00 2001
From: jplozi <jplozi@unice.fr>
Date: Fri, 11 Mar 2016 15:18:06 +0100
Subject: [PATCH 111/126] overload on wakeup

source https://github.com/jplozi/wastedcores

as an experiment, apply the learnings from the wasted-cores paper
and see how the performance works out. With the data from this we should
be able to work with Peter and the rest of the scheduler folks on
a more permanent/elegant solution.
---
 kernel/sched/fair.c | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

--- linux-4.14/kernel/sched/fair.c.org	2017-11-18 23:01:57.353611850 +0000
+++ linux-4.14/kernel/sched/fair.c	2017-11-19 15:24:38.093637926 +0000
@@ -5925,6 +5925,8 @@
 	return min_cap * 1024 < task_util(p) * capacity_margin;
 }
 
+
+static unsigned int once_in_a_while;
 /*
  * select_task_rq_fair: Select target runqueue for the waking task in domains
  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
@@ -5953,6 +5955,30 @@
 	}
 
 	rcu_read_lock();
+	
+	once_in_a_while++;
+
+	if (cpu_rq(prev_cpu)->nr_running || (once_in_a_while & 15) == 0) {
+		int _cpu;
+		int bestprio = -5000;
+		int bestcpu = -1;
+
+		for_each_online_cpu(_cpu) {
+			if (!cpumask_test_cpu(_cpu, &p->cpus_allowed) ||
+				cpu_rq(_cpu)->nr_running)
+				continue;
+			if (arch_asym_cpu_priority(_cpu) > bestprio || (prev_cpu == _cpu && bestprio == arch_asym_cpu_priority(_cpu))) {
+				bestcpu = _cpu;
+				bestprio = arch_asym_cpu_priority(_cpu);
+			}
+		}
+		
+		if (bestcpu > 0) {
+			rcu_read_unlock();
+			return bestcpu;
+		}
+	}
+
 	for_each_domain(cpu, tmp) {
 		if (!(tmp->flags & SD_LOAD_BALANCE))
 			break;