summaryrefslogtreecommitdiffstats
path: root/patches/boot_time_opt_guest/0106-cgroup-delayed-work.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/boot_time_opt_guest/0106-cgroup-delayed-work.patch')
-rw-r--r--patches/boot_time_opt_guest/0106-cgroup-delayed-work.patch133
1 files changed, 133 insertions, 0 deletions
diff --git a/patches/boot_time_opt_guest/0106-cgroup-delayed-work.patch b/patches/boot_time_opt_guest/0106-cgroup-delayed-work.patch
new file mode 100644
index 0000000..438ed97
--- /dev/null
+++ b/patches/boot_time_opt_guest/0106-cgroup-delayed-work.patch
@@ -0,0 +1,133 @@
1From f80cc54895e35a762036382c73bc48ac813e05a5 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Fri, 28 Aug 2015 11:00:36 -0500
4Subject: [PATCH] cgroup: delayed work
5
6---
7 include/linux/cgroup-defs.h | 2 +-
8 kernel/cgroup/cgroup-internal.h | 8 ++++++++
9 kernel/cgroup/cgroup-v1.c | 8 --------
10 kernel/cgroup/cgroup.c | 21 ++++++++++++---------
11 4 files changed, 21 insertions(+), 18 deletions(-)
12
13diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
14index ec47101cb1bf..4827bf0809d7 100644
15--- a/include/linux/cgroup-defs.h
16+++ b/include/linux/cgroup-defs.h
17@@ -136,7 +136,7 @@ struct cgroup_subsys_state {
18
19 /* percpu_ref killing and RCU release */
20 struct rcu_head rcu_head;
21- struct work_struct destroy_work;
22+ struct delayed_work destroy_work;
23
24 /*
25 * PI: the parent css. Placed here for cache proximity to following
26diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
27index 00f4d6bf048f..854ef3216429 100644
28--- a/kernel/cgroup/cgroup-internal.h
29+++ b/kernel/cgroup/cgroup-internal.h
30@@ -8,6 +8,14 @@
31 #include <linux/refcount.h>
32
33 /*
34+ * pidlists linger the following amount before being destroyed. The goal
35+ * is avoiding frequent destruction in the middle of consecutive read calls
36+ * Expiring in the middle is a performance problem not a correctness one.
37+ * 1 sec should be enough.
38+ */
39+#define CGROUP_PIDLIST_DESTROY_DELAY round_jiffies_relative(HZ)
40+
41+/*
42 * A cgroup can be associated with multiple css_sets as different tasks may
43 * belong to different cgroups on different hierarchies. In the other
44 * direction, a css_set is naturally associated with multiple cgroups.
45diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
46index 85d75152402d..60bb59d44d01 100644
47--- a/kernel/cgroup/cgroup-v1.c
48+++ b/kernel/cgroup/cgroup-v1.c
49@@ -16,14 +16,6 @@
50
51 #include <trace/events/cgroup.h>
52
53-/*
54- * pidlists linger the following amount before being destroyed. The goal
55- * is avoiding frequent destruction in the middle of consecutive read calls
56- * Expiring in the middle is a performance problem not a correctness one.
57- * 1 sec should be enough.
58- */
59-#define CGROUP_PIDLIST_DESTROY_DELAY HZ
60-
61 /* Controllers blocked by the commandline in v1 */
62 static u16 cgroup_no_v1_mask;
63
64diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
65index 8d4e85eae42c..7c8294298983 100644
66--- a/kernel/cgroup/cgroup.c
67+++ b/kernel/cgroup/cgroup.c
68@@ -3841,8 +3841,9 @@ static struct cftype cgroup_base_files[] = {
69 */
70 static void css_free_work_fn(struct work_struct *work)
71 {
72+ struct delayed_work *dwork = to_delayed_work(work);
73 struct cgroup_subsys_state *css =
74- container_of(work, struct cgroup_subsys_state, destroy_work);
75+ container_of(dwork, struct cgroup_subsys_state, destroy_work);
76 struct cgroup_subsys *ss = css->ss;
77 struct cgroup *cgrp = css->cgroup;
78
79@@ -3891,14 +3892,15 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
80 struct cgroup_subsys_state *css =
81 container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
82
83- INIT_WORK(&css->destroy_work, css_free_work_fn);
84- queue_work(cgroup_destroy_wq, &css->destroy_work);
85+ INIT_DELAYED_WORK(&css->destroy_work, css_free_work_fn);
86+ queue_delayed_work(cgroup_destroy_wq, &css->destroy_work, CGROUP_PIDLIST_DESTROY_DELAY);
87 }
88
89 static void css_release_work_fn(struct work_struct *work)
90 {
91+ struct delayed_work *dwork = to_delayed_work(work);
92 struct cgroup_subsys_state *css =
93- container_of(work, struct cgroup_subsys_state, destroy_work);
94+ container_of(dwork, struct cgroup_subsys_state, destroy_work);
95 struct cgroup_subsys *ss = css->ss;
96 struct cgroup *cgrp = css->cgroup;
97
98@@ -3943,8 +3945,8 @@ static void css_release(struct percpu_ref *ref)
99 struct cgroup_subsys_state *css =
100 container_of(ref, struct cgroup_subsys_state, refcnt);
101
102- INIT_WORK(&css->destroy_work, css_release_work_fn);
103- queue_work(cgroup_destroy_wq, &css->destroy_work);
104+ INIT_DELAYED_WORK(&css->destroy_work, css_release_work_fn);
105+ queue_delayed_work(cgroup_destroy_wq, &css->destroy_work, CGROUP_PIDLIST_DESTROY_DELAY);
106 }
107
108 static void init_and_link_css(struct cgroup_subsys_state *css,
109@@ -4225,8 +4227,9 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
110 */
111 static void css_killed_work_fn(struct work_struct *work)
112 {
113+ struct delayed_work *dwork = to_delayed_work(work);
114 struct cgroup_subsys_state *css =
115- container_of(work, struct cgroup_subsys_state, destroy_work);
116+ container_of(dwork, struct cgroup_subsys_state, destroy_work);
117
118 mutex_lock(&cgroup_mutex);
119
120@@ -4247,8 +4250,8 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
121 container_of(ref, struct cgroup_subsys_state, refcnt);
122
123 if (atomic_dec_and_test(&css->online_cnt)) {
124- INIT_WORK(&css->destroy_work, css_killed_work_fn);
125- queue_work(cgroup_destroy_wq, &css->destroy_work);
126+ INIT_DELAYED_WORK(&css->destroy_work, css_killed_work_fn);
127+ queue_delayed_work(cgroup_destroy_wq, &css->destroy_work, CGROUP_PIDLIST_DESTROY_DELAY);
128 }
129 }
130
131--
1322.13.2
133