]>
Commit | Line | Data |
---|---|---|
e8976aa5 GKH |
1 | From 82d6489d0fed2ec8a8c48c19e8d8a04ac8e5bb26 Mon Sep 17 00:00:00 2001 |
2 | From: Daniel Bristot de Oliveira <bristot@redhat.com> | |
3 | Date: Wed, 22 Jun 2016 17:28:41 -0300 | |
4 | Subject: cgroup: Disable IRQs while holding css_set_lock | |
5 | ||
6 | From: Daniel Bristot de Oliveira <bristot@redhat.com> | |
7 | ||
8 | commit 82d6489d0fed2ec8a8c48c19e8d8a04ac8e5bb26 upstream. | |
9 | ||
10 | While testing the deadline scheduler + cgroup setup I hit this | |
11 | warning. | |
12 | ||
13 | [ 132.612935] ------------[ cut here ]------------ | |
14 | [ 132.612951] WARNING: CPU: 5 PID: 0 at kernel/softirq.c:150 __local_bh_enable_ip+0x6b/0x80 | |
15 | [ 132.612952] Modules linked in: (a ton of modules...) | |
16 | [ 132.612981] CPU: 5 PID: 0 Comm: swapper/5 Not tainted 4.7.0-rc2 #2 | |
17 | [ 132.612981] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.8.2-20150714_191134- 04/01/2014 | |
18 | [ 132.612982] 0000000000000086 45c8bb5effdd088b ffff88013fd43da0 ffffffff813d229e | |
19 | [ 132.612984] 0000000000000000 0000000000000000 ffff88013fd43de0 ffffffff810a652b | |
20 | [ 132.612985] 00000096811387b5 0000000000000200 ffff8800bab29d80 ffff880034c54c00 | |
21 | [ 132.612986] Call Trace: | |
22 | [ 132.612987] <IRQ> [<ffffffff813d229e>] dump_stack+0x63/0x85 | |
23 | [ 132.612994] [<ffffffff810a652b>] __warn+0xcb/0xf0 | |
24 | [ 132.612997] [<ffffffff810e76a0>] ? push_dl_task.part.32+0x170/0x170 | |
25 | [ 132.612999] [<ffffffff810a665d>] warn_slowpath_null+0x1d/0x20 | |
26 | [ 132.613000] [<ffffffff810aba5b>] __local_bh_enable_ip+0x6b/0x80 | |
27 | [ 132.613008] [<ffffffff817d6c8a>] _raw_write_unlock_bh+0x1a/0x20 | |
28 | [ 132.613010] [<ffffffff817d6c9e>] _raw_spin_unlock_bh+0xe/0x10 | |
29 | [ 132.613015] [<ffffffff811388ac>] put_css_set+0x5c/0x60 | |
30 | [ 132.613016] [<ffffffff8113dc7f>] cgroup_free+0x7f/0xa0 | |
31 | [ 132.613017] [<ffffffff810a3912>] __put_task_struct+0x42/0x140 | |
32 | [ 132.613018] [<ffffffff810e776a>] dl_task_timer+0xca/0x250 | |
33 | [ 132.613027] [<ffffffff810e76a0>] ? push_dl_task.part.32+0x170/0x170 | |
34 | [ 132.613030] [<ffffffff8111371e>] __hrtimer_run_queues+0xee/0x270 | |
35 | [ 132.613031] [<ffffffff81113ec8>] hrtimer_interrupt+0xa8/0x190 | |
36 | [ 132.613034] [<ffffffff81051a58>] local_apic_timer_interrupt+0x38/0x60 | |
37 | [ 132.613035] [<ffffffff817d9b0d>] smp_apic_timer_interrupt+0x3d/0x50 | |
38 | [ 132.613037] [<ffffffff817d7c5c>] apic_timer_interrupt+0x8c/0xa0 | |
39 | [ 132.613038] <EOI> [<ffffffff81063466>] ? native_safe_halt+0x6/0x10 | |
40 | [ 132.613043] [<ffffffff81037a4e>] default_idle+0x1e/0xd0 | |
41 | [ 132.613044] [<ffffffff810381cf>] arch_cpu_idle+0xf/0x20 | |
42 | [ 132.613046] [<ffffffff810e8fda>] default_idle_call+0x2a/0x40 | |
43 | [ 132.613047] [<ffffffff810e92d7>] cpu_startup_entry+0x2e7/0x340 | |
44 | [ 132.613048] [<ffffffff81050235>] start_secondary+0x155/0x190 | |
45 | [ 132.613049] ---[ end trace f91934d162ce9977 ]--- | |
46 | ||
47 | The warn is the spin_(lock|unlock)_bh(&css_set_lock) in the interrupt | |
48 | context. Converting the spin_lock_bh to spin_lock_irq(save) to avoid | |
49 | this problem - and other problems of sharing a spinlock with an | |
50 | interrupt. | |
51 | ||
52 | Cc: Tejun Heo <tj@kernel.org> | |
53 | Cc: Li Zefan <lizefan@huawei.com> | |
54 | Cc: Johannes Weiner <hannes@cmpxchg.org> | |
55 | Cc: Juri Lelli <juri.lelli@arm.com> | |
56 | Cc: Steven Rostedt <rostedt@goodmis.org> | |
57 | Cc: cgroups@vger.kernel.org | |
58 | Cc: linux-kernel@vger.kernel.org | |
59 | Reviewed-by: Rik van Riel <riel@redhat.com> | |
60 | Reviewed-by: "Luis Claudio R. Goncalves" <lgoncalv@redhat.com> | |
61 | Signed-off-by: Daniel Bristot de Oliveira <bristot@redhat.com> | |
62 | Acked-by: Zefan Li <lizefan@huawei.com> | |
63 | Signed-off-by: Tejun Heo <tj@kernel.org> | |
64 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
65 | ||
66 | --- | |
67 | kernel/cgroup.c | 142 +++++++++++++++++++++++++++++--------------------------- | |
68 | 1 file changed, 74 insertions(+), 68 deletions(-) | |
69 | ||
70 | --- a/kernel/cgroup.c | |
71 | +++ b/kernel/cgroup.c | |
72 | @@ -837,6 +837,8 @@ static void put_css_set_locked(struct cs | |
73 | ||
74 | static void put_css_set(struct css_set *cset) | |
75 | { | |
76 | + unsigned long flags; | |
77 | + | |
78 | /* | |
79 | * Ensure that the refcount doesn't hit zero while any readers | |
80 | * can see it. Similar to atomic_dec_and_lock(), but for an | |
81 | @@ -845,9 +847,9 @@ static void put_css_set(struct css_set * | |
82 | if (atomic_add_unless(&cset->refcount, -1, 1)) | |
83 | return; | |
84 | ||
85 | - spin_lock_bh(&css_set_lock); | |
86 | + spin_lock_irqsave(&css_set_lock, flags); | |
87 | put_css_set_locked(cset); | |
88 | - spin_unlock_bh(&css_set_lock); | |
89 | + spin_unlock_irqrestore(&css_set_lock, flags); | |
90 | } | |
91 | ||
92 | /* | |
93 | @@ -1070,11 +1072,11 @@ static struct css_set *find_css_set(stru | |
94 | ||
95 | /* First see if we already have a cgroup group that matches | |
96 | * the desired set */ | |
97 | - spin_lock_bh(&css_set_lock); | |
98 | + spin_lock_irq(&css_set_lock); | |
99 | cset = find_existing_css_set(old_cset, cgrp, template); | |
100 | if (cset) | |
101 | get_css_set(cset); | |
102 | - spin_unlock_bh(&css_set_lock); | |
103 | + spin_unlock_irq(&css_set_lock); | |
104 | ||
105 | if (cset) | |
106 | return cset; | |
107 | @@ -1102,7 +1104,7 @@ static struct css_set *find_css_set(stru | |
108 | * find_existing_css_set() */ | |
109 | memcpy(cset->subsys, template, sizeof(cset->subsys)); | |
110 | ||
111 | - spin_lock_bh(&css_set_lock); | |
112 | + spin_lock_irq(&css_set_lock); | |
113 | /* Add reference counts and links from the new css_set. */ | |
114 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { | |
115 | struct cgroup *c = link->cgrp; | |
116 | @@ -1128,7 +1130,7 @@ static struct css_set *find_css_set(stru | |
117 | css_get(css); | |
118 | } | |
119 | ||
120 | - spin_unlock_bh(&css_set_lock); | |
121 | + spin_unlock_irq(&css_set_lock); | |
122 | ||
123 | return cset; | |
124 | } | |
125 | @@ -1192,7 +1194,7 @@ static void cgroup_destroy_root(struct c | |
126 | * Release all the links from cset_links to this hierarchy's | |
127 | * root cgroup | |
128 | */ | |
129 | - spin_lock_bh(&css_set_lock); | |
130 | + spin_lock_irq(&css_set_lock); | |
131 | ||
132 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { | |
133 | list_del(&link->cset_link); | |
134 | @@ -1200,7 +1202,7 @@ static void cgroup_destroy_root(struct c | |
135 | kfree(link); | |
136 | } | |
137 | ||
138 | - spin_unlock_bh(&css_set_lock); | |
139 | + spin_unlock_irq(&css_set_lock); | |
140 | ||
141 | if (!list_empty(&root->root_list)) { | |
142 | list_del(&root->root_list); | |
143 | @@ -1600,11 +1602,11 @@ static int rebind_subsystems(struct cgro | |
144 | ss->root = dst_root; | |
145 | css->cgroup = dcgrp; | |
146 | ||
147 | - spin_lock_bh(&css_set_lock); | |
148 | + spin_lock_irq(&css_set_lock); | |
149 | hash_for_each(css_set_table, i, cset, hlist) | |
150 | list_move_tail(&cset->e_cset_node[ss->id], | |
151 | &dcgrp->e_csets[ss->id]); | |
152 | - spin_unlock_bh(&css_set_lock); | |
153 | + spin_unlock_irq(&css_set_lock); | |
154 | ||
155 | /* default hierarchy doesn't enable controllers by default */ | |
156 | dst_root->subsys_mask |= 1 << ssid; | |
157 | @@ -1640,10 +1642,10 @@ static int cgroup_show_path(struct seq_f | |
158 | if (!buf) | |
159 | return -ENOMEM; | |
160 | ||
161 | - spin_lock_bh(&css_set_lock); | |
162 | + spin_lock_irq(&css_set_lock); | |
163 | ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot); | |
164 | len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX); | |
165 | - spin_unlock_bh(&css_set_lock); | |
166 | + spin_unlock_irq(&css_set_lock); | |
167 | ||
168 | if (len >= PATH_MAX) | |
169 | len = -ERANGE; | |
170 | @@ -1897,7 +1899,7 @@ static void cgroup_enable_task_cg_lists( | |
171 | { | |
172 | struct task_struct *p, *g; | |
173 | ||
174 | - spin_lock_bh(&css_set_lock); | |
175 | + spin_lock_irq(&css_set_lock); | |
176 | ||
177 | if (use_task_css_set_links) | |
178 | goto out_unlock; | |
179 | @@ -1922,8 +1924,12 @@ static void cgroup_enable_task_cg_lists( | |
180 | * entry won't be deleted though the process has exited. | |
181 | * Do it while holding siglock so that we don't end up | |
182 | * racing against cgroup_exit(). | |
183 | + * | |
184 | + * Interrupts were already disabled while acquiring | |
185 | + * the css_set_lock, so we do not need to disable it | |
186 | + * again when acquiring the sighand->siglock here. | |
187 | */ | |
188 | - spin_lock_irq(&p->sighand->siglock); | |
189 | + spin_lock(&p->sighand->siglock); | |
190 | if (!(p->flags & PF_EXITING)) { | |
191 | struct css_set *cset = task_css_set(p); | |
192 | ||
193 | @@ -1932,11 +1938,11 @@ static void cgroup_enable_task_cg_lists( | |
194 | list_add_tail(&p->cg_list, &cset->tasks); | |
195 | get_css_set(cset); | |
196 | } | |
197 | - spin_unlock_irq(&p->sighand->siglock); | |
198 | + spin_unlock(&p->sighand->siglock); | |
199 | } while_each_thread(g, p); | |
200 | read_unlock(&tasklist_lock); | |
201 | out_unlock: | |
202 | - spin_unlock_bh(&css_set_lock); | |
203 | + spin_unlock_irq(&css_set_lock); | |
204 | } | |
205 | ||
206 | static void init_cgroup_housekeeping(struct cgroup *cgrp) | |
207 | @@ -2043,13 +2049,13 @@ static int cgroup_setup_root(struct cgro | |
208 | * Link the root cgroup in this hierarchy into all the css_set | |
209 | * objects. | |
210 | */ | |
211 | - spin_lock_bh(&css_set_lock); | |
212 | + spin_lock_irq(&css_set_lock); | |
213 | hash_for_each(css_set_table, i, cset, hlist) { | |
214 | link_css_set(&tmp_links, cset, root_cgrp); | |
215 | if (css_set_populated(cset)) | |
216 | cgroup_update_populated(root_cgrp, true); | |
217 | } | |
218 | - spin_unlock_bh(&css_set_lock); | |
219 | + spin_unlock_irq(&css_set_lock); | |
220 | ||
221 | BUG_ON(!list_empty(&root_cgrp->self.children)); | |
222 | BUG_ON(atomic_read(&root->nr_cgrps) != 1); | |
223 | @@ -2256,11 +2262,11 @@ out_mount: | |
224 | struct cgroup *cgrp; | |
225 | ||
226 | mutex_lock(&cgroup_mutex); | |
227 | - spin_lock_bh(&css_set_lock); | |
228 | + spin_lock_irq(&css_set_lock); | |
229 | ||
230 | cgrp = cset_cgroup_from_root(ns->root_cset, root); | |
231 | ||
232 | - spin_unlock_bh(&css_set_lock); | |
233 | + spin_unlock_irq(&css_set_lock); | |
234 | mutex_unlock(&cgroup_mutex); | |
235 | ||
236 | nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb); | |
237 | @@ -2337,11 +2343,11 @@ char *cgroup_path_ns(struct cgroup *cgrp | |
238 | char *ret; | |
239 | ||
240 | mutex_lock(&cgroup_mutex); | |
241 | - spin_lock_bh(&css_set_lock); | |
242 | + spin_lock_irq(&css_set_lock); | |
243 | ||
244 | ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); | |
245 | ||
246 | - spin_unlock_bh(&css_set_lock); | |
247 | + spin_unlock_irq(&css_set_lock); | |
248 | mutex_unlock(&cgroup_mutex); | |
249 | ||
250 | return ret; | |
251 | @@ -2369,7 +2375,7 @@ char *task_cgroup_path(struct task_struc | |
252 | char *path = NULL; | |
253 | ||
254 | mutex_lock(&cgroup_mutex); | |
255 | - spin_lock_bh(&css_set_lock); | |
256 | + spin_lock_irq(&css_set_lock); | |
257 | ||
258 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); | |
259 | ||
260 | @@ -2382,7 +2388,7 @@ char *task_cgroup_path(struct task_struc | |
261 | path = buf; | |
262 | } | |
263 | ||
264 | - spin_unlock_bh(&css_set_lock); | |
265 | + spin_unlock_irq(&css_set_lock); | |
266 | mutex_unlock(&cgroup_mutex); | |
267 | return path; | |
268 | } | |
269 | @@ -2557,7 +2563,7 @@ static int cgroup_taskset_migrate(struct | |
270 | * the new cgroup. There are no failure cases after here, so this | |
271 | * is the commit point. | |
272 | */ | |
273 | - spin_lock_bh(&css_set_lock); | |
274 | + spin_lock_irq(&css_set_lock); | |
275 | list_for_each_entry(cset, &tset->src_csets, mg_node) { | |
276 | list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { | |
277 | struct css_set *from_cset = task_css_set(task); | |
278 | @@ -2568,7 +2574,7 @@ static int cgroup_taskset_migrate(struct | |
279 | put_css_set_locked(from_cset); | |
280 | } | |
281 | } | |
282 | - spin_unlock_bh(&css_set_lock); | |
283 | + spin_unlock_irq(&css_set_lock); | |
284 | ||
285 | /* | |
286 | * Migration is committed, all target tasks are now on dst_csets. | |
287 | @@ -2597,13 +2603,13 @@ out_cancel_attach: | |
288 | } | |
289 | } while_each_subsys_mask(); | |
290 | out_release_tset: | |
291 | - spin_lock_bh(&css_set_lock); | |
292 | + spin_lock_irq(&css_set_lock); | |
293 | list_splice_init(&tset->dst_csets, &tset->src_csets); | |
294 | list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { | |
295 | list_splice_tail_init(&cset->mg_tasks, &cset->tasks); | |
296 | list_del_init(&cset->mg_node); | |
297 | } | |
298 | - spin_unlock_bh(&css_set_lock); | |
299 | + spin_unlock_irq(&css_set_lock); | |
300 | return ret; | |
301 | } | |
302 | ||
303 | @@ -2634,7 +2640,7 @@ static void cgroup_migrate_finish(struct | |
304 | ||
305 | lockdep_assert_held(&cgroup_mutex); | |
306 | ||
307 | - spin_lock_bh(&css_set_lock); | |
308 | + spin_lock_irq(&css_set_lock); | |
309 | list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { | |
310 | cset->mg_src_cgrp = NULL; | |
311 | cset->mg_dst_cgrp = NULL; | |
312 | @@ -2642,7 +2648,7 @@ static void cgroup_migrate_finish(struct | |
313 | list_del_init(&cset->mg_preload_node); | |
314 | put_css_set_locked(cset); | |
315 | } | |
316 | - spin_unlock_bh(&css_set_lock); | |
317 | + spin_unlock_irq(&css_set_lock); | |
318 | } | |
319 | ||
320 | /** | |
321 | @@ -2783,7 +2789,7 @@ static int cgroup_migrate(struct task_st | |
322 | * already PF_EXITING could be freed from underneath us unless we | |
323 | * take an rcu_read_lock. | |
324 | */ | |
325 | - spin_lock_bh(&css_set_lock); | |
326 | + spin_lock_irq(&css_set_lock); | |
327 | rcu_read_lock(); | |
328 | task = leader; | |
329 | do { | |
330 | @@ -2792,7 +2798,7 @@ static int cgroup_migrate(struct task_st | |
331 | break; | |
332 | } while_each_thread(leader, task); | |
333 | rcu_read_unlock(); | |
334 | - spin_unlock_bh(&css_set_lock); | |
335 | + spin_unlock_irq(&css_set_lock); | |
336 | ||
337 | return cgroup_taskset_migrate(&tset, root); | |
338 | } | |
339 | @@ -2816,7 +2822,7 @@ static int cgroup_attach_task(struct cgr | |
340 | return -EBUSY; | |
341 | ||
342 | /* look up all src csets */ | |
343 | - spin_lock_bh(&css_set_lock); | |
344 | + spin_lock_irq(&css_set_lock); | |
345 | rcu_read_lock(); | |
346 | task = leader; | |
347 | do { | |
348 | @@ -2826,7 +2832,7 @@ static int cgroup_attach_task(struct cgr | |
349 | break; | |
350 | } while_each_thread(leader, task); | |
351 | rcu_read_unlock(); | |
352 | - spin_unlock_bh(&css_set_lock); | |
353 | + spin_unlock_irq(&css_set_lock); | |
354 | ||
355 | /* prepare dst csets and commit */ | |
356 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); | |
357 | @@ -2859,9 +2865,9 @@ static int cgroup_procs_write_permission | |
358 | struct cgroup *cgrp; | |
359 | struct inode *inode; | |
360 | ||
361 | - spin_lock_bh(&css_set_lock); | |
362 | + spin_lock_irq(&css_set_lock); | |
363 | cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); | |
364 | - spin_unlock_bh(&css_set_lock); | |
365 | + spin_unlock_irq(&css_set_lock); | |
366 | ||
367 | while (!cgroup_is_descendant(dst_cgrp, cgrp)) | |
368 | cgrp = cgroup_parent(cgrp); | |
369 | @@ -2962,9 +2968,9 @@ int cgroup_attach_task_all(struct task_s | |
370 | if (root == &cgrp_dfl_root) | |
371 | continue; | |
372 | ||
373 | - spin_lock_bh(&css_set_lock); | |
374 | + spin_lock_irq(&css_set_lock); | |
375 | from_cgrp = task_cgroup_from_root(from, root); | |
376 | - spin_unlock_bh(&css_set_lock); | |
377 | + spin_unlock_irq(&css_set_lock); | |
378 | ||
379 | retval = cgroup_attach_task(from_cgrp, tsk, false); | |
380 | if (retval) | |
381 | @@ -3080,7 +3086,7 @@ static int cgroup_update_dfl_csses(struc | |
382 | percpu_down_write(&cgroup_threadgroup_rwsem); | |
383 | ||
384 | /* look up all csses currently attached to @cgrp's subtree */ | |
385 | - spin_lock_bh(&css_set_lock); | |
386 | + spin_lock_irq(&css_set_lock); | |
387 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { | |
388 | struct cgrp_cset_link *link; | |
389 | ||
390 | @@ -3088,14 +3094,14 @@ static int cgroup_update_dfl_csses(struc | |
391 | cgroup_migrate_add_src(link->cset, dsct, | |
392 | &preloaded_csets); | |
393 | } | |
394 | - spin_unlock_bh(&css_set_lock); | |
395 | + spin_unlock_irq(&css_set_lock); | |
396 | ||
397 | /* NULL dst indicates self on default hierarchy */ | |
398 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); | |
399 | if (ret) | |
400 | goto out_finish; | |
401 | ||
402 | - spin_lock_bh(&css_set_lock); | |
403 | + spin_lock_irq(&css_set_lock); | |
404 | list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) { | |
405 | struct task_struct *task, *ntask; | |
406 | ||
407 | @@ -3107,7 +3113,7 @@ static int cgroup_update_dfl_csses(struc | |
408 | list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) | |
409 | cgroup_taskset_add(task, &tset); | |
410 | } | |
411 | - spin_unlock_bh(&css_set_lock); | |
412 | + spin_unlock_irq(&css_set_lock); | |
413 | ||
414 | ret = cgroup_taskset_migrate(&tset, cgrp->root); | |
415 | out_finish: | |
416 | @@ -3908,10 +3914,10 @@ static int cgroup_task_count(const struc | |
417 | int count = 0; | |
418 | struct cgrp_cset_link *link; | |
419 | ||
420 | - spin_lock_bh(&css_set_lock); | |
421 | + spin_lock_irq(&css_set_lock); | |
422 | list_for_each_entry(link, &cgrp->cset_links, cset_link) | |
423 | count += atomic_read(&link->cset->refcount); | |
424 | - spin_unlock_bh(&css_set_lock); | |
425 | + spin_unlock_irq(&css_set_lock); | |
426 | return count; | |
427 | } | |
428 | ||
429 | @@ -4249,7 +4255,7 @@ void css_task_iter_start(struct cgroup_s | |
430 | ||
431 | memset(it, 0, sizeof(*it)); | |
432 | ||
433 | - spin_lock_bh(&css_set_lock); | |
434 | + spin_lock_irq(&css_set_lock); | |
435 | ||
436 | it->ss = css->ss; | |
437 | ||
438 | @@ -4262,7 +4268,7 @@ void css_task_iter_start(struct cgroup_s | |
439 | ||
440 | css_task_iter_advance_css_set(it); | |
441 | ||
442 | - spin_unlock_bh(&css_set_lock); | |
443 | + spin_unlock_irq(&css_set_lock); | |
444 | } | |
445 | ||
446 | /** | |
447 | @@ -4280,7 +4286,7 @@ struct task_struct *css_task_iter_next(s | |
448 | it->cur_task = NULL; | |
449 | } | |
450 | ||
451 | - spin_lock_bh(&css_set_lock); | |
452 | + spin_lock_irq(&css_set_lock); | |
453 | ||
454 | if (it->task_pos) { | |
455 | it->cur_task = list_entry(it->task_pos, struct task_struct, | |
456 | @@ -4289,7 +4295,7 @@ struct task_struct *css_task_iter_next(s | |
457 | css_task_iter_advance(it); | |
458 | } | |
459 | ||
460 | - spin_unlock_bh(&css_set_lock); | |
461 | + spin_unlock_irq(&css_set_lock); | |
462 | ||
463 | return it->cur_task; | |
464 | } | |
465 | @@ -4303,10 +4309,10 @@ struct task_struct *css_task_iter_next(s | |
466 | void css_task_iter_end(struct css_task_iter *it) | |
467 | { | |
468 | if (it->cur_cset) { | |
469 | - spin_lock_bh(&css_set_lock); | |
470 | + spin_lock_irq(&css_set_lock); | |
471 | list_del(&it->iters_node); | |
472 | put_css_set_locked(it->cur_cset); | |
473 | - spin_unlock_bh(&css_set_lock); | |
474 | + spin_unlock_irq(&css_set_lock); | |
475 | } | |
476 | ||
477 | if (it->cur_task) | |
478 | @@ -4338,10 +4344,10 @@ int cgroup_transfer_tasks(struct cgroup | |
479 | mutex_lock(&cgroup_mutex); | |
480 | ||
481 | /* all tasks in @from are being moved, all csets are source */ | |
482 | - spin_lock_bh(&css_set_lock); | |
483 | + spin_lock_irq(&css_set_lock); | |
484 | list_for_each_entry(link, &from->cset_links, cset_link) | |
485 | cgroup_migrate_add_src(link->cset, to, &preloaded_csets); | |
486 | - spin_unlock_bh(&css_set_lock); | |
487 | + spin_unlock_irq(&css_set_lock); | |
488 | ||
489 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); | |
490 | if (ret) | |
491 | @@ -5449,10 +5455,10 @@ static int cgroup_destroy_locked(struct | |
492 | */ | |
493 | cgrp->self.flags &= ~CSS_ONLINE; | |
494 | ||
495 | - spin_lock_bh(&css_set_lock); | |
496 | + spin_lock_irq(&css_set_lock); | |
497 | list_for_each_entry(link, &cgrp->cset_links, cset_link) | |
498 | link->cset->dead = true; | |
499 | - spin_unlock_bh(&css_set_lock); | |
500 | + spin_unlock_irq(&css_set_lock); | |
501 | ||
502 | /* initiate massacre of all css's */ | |
503 | for_each_css(css, ssid, cgrp) | |
504 | @@ -5723,7 +5729,7 @@ int proc_cgroup_show(struct seq_file *m, | |
505 | goto out; | |
506 | ||
507 | mutex_lock(&cgroup_mutex); | |
508 | - spin_lock_bh(&css_set_lock); | |
509 | + spin_lock_irq(&css_set_lock); | |
510 | ||
511 | for_each_root(root) { | |
512 | struct cgroup_subsys *ss; | |
513 | @@ -5776,7 +5782,7 @@ int proc_cgroup_show(struct seq_file *m, | |
514 | ||
515 | retval = 0; | |
516 | out_unlock: | |
517 | - spin_unlock_bh(&css_set_lock); | |
518 | + spin_unlock_irq(&css_set_lock); | |
519 | mutex_unlock(&cgroup_mutex); | |
520 | kfree(buf); | |
521 | out: | |
522 | @@ -5921,13 +5927,13 @@ void cgroup_post_fork(struct task_struct | |
523 | if (use_task_css_set_links) { | |
524 | struct css_set *cset; | |
525 | ||
526 | - spin_lock_bh(&css_set_lock); | |
527 | + spin_lock_irq(&css_set_lock); | |
528 | cset = task_css_set(current); | |
529 | if (list_empty(&child->cg_list)) { | |
530 | get_css_set(cset); | |
531 | css_set_move_task(child, NULL, cset, false); | |
532 | } | |
533 | - spin_unlock_bh(&css_set_lock); | |
534 | + spin_unlock_irq(&css_set_lock); | |
535 | } | |
536 | ||
537 | /* | |
538 | @@ -5972,9 +5978,9 @@ void cgroup_exit(struct task_struct *tsk | |
539 | cset = task_css_set(tsk); | |
540 | ||
541 | if (!list_empty(&tsk->cg_list)) { | |
542 | - spin_lock_bh(&css_set_lock); | |
543 | + spin_lock_irq(&css_set_lock); | |
544 | css_set_move_task(tsk, cset, NULL, false); | |
545 | - spin_unlock_bh(&css_set_lock); | |
546 | + spin_unlock_irq(&css_set_lock); | |
547 | } else { | |
548 | get_css_set(cset); | |
549 | } | |
550 | @@ -6042,9 +6048,9 @@ static void cgroup_release_agent(struct | |
551 | if (!pathbuf || !agentbuf) | |
552 | goto out; | |
553 | ||
554 | - spin_lock_bh(&css_set_lock); | |
555 | + spin_lock_irq(&css_set_lock); | |
556 | path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); | |
557 | - spin_unlock_bh(&css_set_lock); | |
558 | + spin_unlock_irq(&css_set_lock); | |
559 | if (!path) | |
560 | goto out; | |
561 | ||
562 | @@ -6304,12 +6310,12 @@ struct cgroup_namespace *copy_cgroup_ns( | |
563 | return ERR_PTR(-EPERM); | |
564 | ||
565 | mutex_lock(&cgroup_mutex); | |
566 | - spin_lock_bh(&css_set_lock); | |
567 | + spin_lock_irq(&css_set_lock); | |
568 | ||
569 | cset = task_css_set(current); | |
570 | get_css_set(cset); | |
571 | ||
572 | - spin_unlock_bh(&css_set_lock); | |
573 | + spin_unlock_irq(&css_set_lock); | |
574 | mutex_unlock(&cgroup_mutex); | |
575 | ||
576 | new_ns = alloc_cgroup_ns(); | |
577 | @@ -6433,7 +6439,7 @@ static int current_css_set_cg_links_read | |
578 | if (!name_buf) | |
579 | return -ENOMEM; | |
580 | ||
581 | - spin_lock_bh(&css_set_lock); | |
582 | + spin_lock_irq(&css_set_lock); | |
583 | rcu_read_lock(); | |
584 | cset = rcu_dereference(current->cgroups); | |
585 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { | |
586 | @@ -6444,7 +6450,7 @@ static int current_css_set_cg_links_read | |
587 | c->root->hierarchy_id, name_buf); | |
588 | } | |
589 | rcu_read_unlock(); | |
590 | - spin_unlock_bh(&css_set_lock); | |
591 | + spin_unlock_irq(&css_set_lock); | |
592 | kfree(name_buf); | |
593 | return 0; | |
594 | } | |
595 | @@ -6455,7 +6461,7 @@ static int cgroup_css_links_read(struct | |
596 | struct cgroup_subsys_state *css = seq_css(seq); | |
597 | struct cgrp_cset_link *link; | |
598 | ||
599 | - spin_lock_bh(&css_set_lock); | |
600 | + spin_lock_irq(&css_set_lock); | |
601 | list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { | |
602 | struct css_set *cset = link->cset; | |
603 | struct task_struct *task; | |
604 | @@ -6478,7 +6484,7 @@ static int cgroup_css_links_read(struct | |
605 | overflow: | |
606 | seq_puts(seq, " ...\n"); | |
607 | } | |
608 | - spin_unlock_bh(&css_set_lock); | |
609 | + spin_unlock_irq(&css_set_lock); | |
610 | return 0; | |
611 | } | |
612 |