summaryrefslogtreecommitdiffstats
path: root/patches/boot_time_opt_guest/0106-cgroup-delayed-work.patch
blob: 438ed97404e500de59d4a1c5ba141aedeb27f28e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
From f80cc54895e35a762036382c73bc48ac813e05a5 Mon Sep 17 00:00:00 2001
From: Arjan van de Ven <arjan@linux.intel.com>
Date: Fri, 28 Aug 2015 11:00:36 -0500
Subject: [PATCH] cgroup: delayed work

---
 include/linux/cgroup-defs.h     |  2 +-
 kernel/cgroup/cgroup-internal.h |  8 ++++++++
 kernel/cgroup/cgroup-v1.c       |  8 --------
 kernel/cgroup/cgroup.c          | 21 ++++++++++++---------
 4 files changed, 21 insertions(+), 18 deletions(-)

diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ec47101cb1bf..4827bf0809d7 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -136,7 +136,7 @@ struct cgroup_subsys_state {
 
 	/* percpu_ref killing and RCU release */
 	struct rcu_head rcu_head;
-	struct work_struct destroy_work;
+	struct delayed_work destroy_work;
 
 	/*
 	 * PI: the parent css.	Placed here for cache proximity to following
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 00f4d6bf048f..854ef3216429 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -8,6 +8,14 @@
 #include <linux/refcount.h>
 
 /*
+ * pidlists linger the following amount before being destroyed.  The goal
+ * is avoiding frequent destruction in the middle of consecutive read calls
+ * Expiring in the middle is a performance problem not a correctness one.
+ * 1 sec should be enough.
+ */
+#define CGROUP_PIDLIST_DESTROY_DELAY	round_jiffies_relative(HZ)
+
+/*
  * A cgroup can be associated with multiple css_sets as different tasks may
  * belong to different cgroups on different hierarchies.  In the other
  * direction, a css_set is naturally associated with multiple cgroups.
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 85d75152402d..60bb59d44d01 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -16,14 +16,6 @@
 
 #include <trace/events/cgroup.h>
 
-/*
- * pidlists linger the following amount before being destroyed.  The goal
- * is avoiding frequent destruction in the middle of consecutive read calls
- * Expiring in the middle is a performance problem not a correctness one.
- * 1 sec should be enough.
- */
-#define CGROUP_PIDLIST_DESTROY_DELAY	HZ
-
 /* Controllers blocked by the commandline in v1 */
 static u16 cgroup_no_v1_mask;
 
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 8d4e85eae42c..7c8294298983 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3841,8 +3841,9 @@ static struct cftype cgroup_base_files[] = {
  */
 static void css_free_work_fn(struct work_struct *work)
 {
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct cgroup_subsys_state *css =
-		container_of(work, struct cgroup_subsys_state, destroy_work);
+		container_of(dwork, struct cgroup_subsys_state, destroy_work);
 	struct cgroup_subsys *ss = css->ss;
 	struct cgroup *cgrp = css->cgroup;
 
@@ -3891,14 +3892,15 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
 	struct cgroup_subsys_state *css =
 		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
 
-	INIT_WORK(&css->destroy_work, css_free_work_fn);
-	queue_work(cgroup_destroy_wq, &css->destroy_work);
+	INIT_DELAYED_WORK(&css->destroy_work, css_free_work_fn);
+	queue_delayed_work(cgroup_destroy_wq, &css->destroy_work, CGROUP_PIDLIST_DESTROY_DELAY);
 }
 
 static void css_release_work_fn(struct work_struct *work)
 {
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct cgroup_subsys_state *css =
-		container_of(work, struct cgroup_subsys_state, destroy_work);
+		container_of(dwork, struct cgroup_subsys_state, destroy_work);
 	struct cgroup_subsys *ss = css->ss;
 	struct cgroup *cgrp = css->cgroup;
 
@@ -3943,8 +3945,8 @@ static void css_release(struct percpu_ref *ref)
 	struct cgroup_subsys_state *css =
 		container_of(ref, struct cgroup_subsys_state, refcnt);
 
-	INIT_WORK(&css->destroy_work, css_release_work_fn);
-	queue_work(cgroup_destroy_wq, &css->destroy_work);
+	INIT_DELAYED_WORK(&css->destroy_work, css_release_work_fn);
+	queue_delayed_work(cgroup_destroy_wq, &css->destroy_work, CGROUP_PIDLIST_DESTROY_DELAY);
 }
 
 static void init_and_link_css(struct cgroup_subsys_state *css,
@@ -4225,8 +4227,9 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
  */
 static void css_killed_work_fn(struct work_struct *work)
 {
+	struct delayed_work *dwork = to_delayed_work(work);
 	struct cgroup_subsys_state *css =
-		container_of(work, struct cgroup_subsys_state, destroy_work);
+		container_of(dwork, struct cgroup_subsys_state, destroy_work);
 
 	mutex_lock(&cgroup_mutex);
 
@@ -4247,8 +4250,8 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
 		container_of(ref, struct cgroup_subsys_state, refcnt);
 
 	if (atomic_dec_and_test(&css->online_cnt)) {
-		INIT_WORK(&css->destroy_work, css_killed_work_fn);
-		queue_work(cgroup_destroy_wq, &css->destroy_work);
+		INIT_DELAYED_WORK(&css->destroy_work, css_killed_work_fn);
+		queue_delayed_work(cgroup_destroy_wq, &css->destroy_work, CGROUP_PIDLIST_DESTROY_DELAY);
 	}
 }
 
-- 
2.13.2