]> git.ipfire.org Git - people/arne_f/kernel.git/blame - kernel/softirq.c
Merge branch 'clockevents/3.14' of git://git.linaro.org/people/daniel.lezcano/linux...
[people/arne_f/kernel.git] / kernel / softirq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
b10db7f0
PM
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
9 */
10
9984de1a 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel_stat.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/notifier.h>
17#include <linux/percpu.h>
18#include <linux/cpu.h>
83144186 19#include <linux/freezer.h>
1da177e4
LT
20#include <linux/kthread.h>
21#include <linux/rcupdate.h>
7e49fcce 22#include <linux/ftrace.h>
78eef01b 23#include <linux/smp.h>
3e339b5d 24#include <linux/smpboot.h>
79bf2bb3 25#include <linux/tick.h>
a0e39ed3
HC
26
27#define CREATE_TRACE_POINTS
ad8d75ff 28#include <trace/events/irq.h>
1da177e4 29
1da177e4
LT
30/*
31 - No shared variables, all the data are CPU local.
32 - If a softirq needs serialization, let it serialize itself
33 by its own spinlocks.
34 - Even if softirq is serialized, only local cpu is marked for
35 execution. Hence, we get something sort of weak cpu binding.
36 Though it is still not clear, will it result in better locality
37 or will not.
38
39 Examples:
40 - NET RX softirq. It is multithreaded and does not require
41 any global serialization.
42 - NET TX softirq. It kicks software netdevice queues, hence
43 it is logically serialized per device, but this serialization
44 is invisible to common code.
45 - Tasklets: serialized wrt itself.
46 */
47
48#ifndef __ARCH_IRQ_STAT
49irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50EXPORT_SYMBOL(irq_stat);
51#endif
52
978b0116 53static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 54
4dd53d89 55DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 56
5d592b44 57char *softirq_to_name[NR_SOFTIRQS] = {
5dd4de58 58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
09223371 59 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
60};
61
1da177e4
LT
62/*
63 * we cannot loop indefinitely here to avoid userspace starvation,
64 * but we also don't want to introduce a worst case 1/HZ latency
65 * to the pending events, so lets the scheduler to balance
66 * the softirq load for us.
67 */
676cb02d 68static void wakeup_softirqd(void)
1da177e4
LT
69{
70 /* Interrupts are disabled: no need to stop preemption */
909ea964 71 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4
LT
72
73 if (tsk && tsk->state != TASK_RUNNING)
74 wake_up_process(tsk);
75}
76
75e1056f
VP
77/*
78 * preempt_count and SOFTIRQ_OFFSET usage:
79 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
80 * softirq processing.
81 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
82 * on local_bh_disable or local_bh_enable.
83 * This lets us distinguish between whether we are currently processing
84 * softirq and whether we just have bh disabled.
85 */
86
de30a2b3
IM
87/*
88 * This one is for softirq.c-internal use,
89 * where hardirqs are disabled legitimately:
90 */
3c829c36 91#ifdef CONFIG_TRACE_IRQFLAGS
75e1056f 92static void __local_bh_disable(unsigned long ip, unsigned int cnt)
de30a2b3
IM
93{
94 unsigned long flags;
95
96 WARN_ON_ONCE(in_irq());
97
98 raw_local_irq_save(flags);
7e49fcce 99 /*
bdb43806 100 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
101 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
102 * is set and before current->softirq_enabled is cleared.
103 * We must manually increment preempt_count here and manually
104 * call the trace_preempt_off later.
105 */
bdb43806 106 __preempt_count_add(cnt);
de30a2b3
IM
107 /*
108 * Were softirqs turned off above:
109 */
75e1056f 110 if (softirq_count() == cnt)
de30a2b3
IM
111 trace_softirqs_off(ip);
112 raw_local_irq_restore(flags);
7e49fcce 113
75e1056f 114 if (preempt_count() == cnt)
7e49fcce 115 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
de30a2b3 116}
3c829c36 117#else /* !CONFIG_TRACE_IRQFLAGS */
75e1056f 118static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
3c829c36 119{
bdb43806 120 preempt_count_add(cnt);
3c829c36
TC
121 barrier();
122}
123#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3
IM
124
125void local_bh_disable(void)
126{
d2e08473 127 __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
de30a2b3
IM
128}
129
130EXPORT_SYMBOL(local_bh_disable);
131
75e1056f
VP
132static void __local_bh_enable(unsigned int cnt)
133{
75e1056f
VP
134 WARN_ON_ONCE(!irqs_disabled());
135
136 if (softirq_count() == cnt)
d2e08473 137 trace_softirqs_on(_RET_IP_);
bdb43806 138 preempt_count_sub(cnt);
75e1056f
VP
139}
140
de30a2b3
IM
141/*
142 * Special-case - softirqs can safely be enabled in
143 * cond_resched_softirq(), or by __do_softirq(),
144 * without processing still-pending softirqs:
145 */
146void _local_bh_enable(void)
147{
5d60d3e7 148 WARN_ON_ONCE(in_irq());
75e1056f 149 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3
IM
150}
151
152EXPORT_SYMBOL(_local_bh_enable);
153
0f476b6d 154static inline void _local_bh_enable_ip(unsigned long ip)
de30a2b3 155{
0f476b6d 156 WARN_ON_ONCE(in_irq() || irqs_disabled());
3c829c36 157#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 158 local_irq_disable();
3c829c36 159#endif
de30a2b3
IM
160 /*
161 * Are softirqs going to be turned on now:
162 */
75e1056f 163 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0f476b6d 164 trace_softirqs_on(ip);
de30a2b3
IM
165 /*
166 * Keep preemption disabled until we are done with
167 * softirq processing:
168 */
bdb43806 169 preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
de30a2b3 170
0bed698a
FW
171 if (unlikely(!in_interrupt() && local_softirq_pending())) {
172 /*
173 * Run softirq if any pending. And do it in its own stack
174 * as we may be calling this deep in a task call stack already.
175 */
de30a2b3 176 do_softirq();
0bed698a 177 }
de30a2b3 178
bdb43806 179 preempt_count_dec();
3c829c36 180#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 181 local_irq_enable();
3c829c36 182#endif
de30a2b3
IM
183 preempt_check_resched();
184}
0f476b6d
JB
185
186void local_bh_enable(void)
187{
d2e08473 188 _local_bh_enable_ip(_RET_IP_);
0f476b6d 189}
de30a2b3
IM
190EXPORT_SYMBOL(local_bh_enable);
191
192void local_bh_enable_ip(unsigned long ip)
193{
0f476b6d 194 _local_bh_enable_ip(ip);
de30a2b3
IM
195}
196EXPORT_SYMBOL(local_bh_enable_ip);
197
1da177e4 198/*
34376a50
BG
199 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
200 * but break the loop if need_resched() is set or after 2 ms.
201 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
202 * certain cases, such as stop_machine(), jiffies may cease to
203 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
204 * well to make sure we eventually return from this method.
1da177e4 205 *
c10d7367 206 * These limits have been established via experimentation.
1da177e4
LT
207 * The two things to balance is latency against fairness -
208 * we want to handle softirqs as soon as possible, but they
209 * should not be able to lock up the box.
210 */
c10d7367 211#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 212#define MAX_SOFTIRQ_RESTART 10
1da177e4
LT
213
214asmlinkage void __do_softirq(void)
215{
216 struct softirq_action *h;
217 __u32 pending;
c10d7367 218 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
1da177e4 219 int cpu;
907aed48 220 unsigned long old_flags = current->flags;
34376a50 221 int max_restart = MAX_SOFTIRQ_RESTART;
907aed48
MG
222
223 /*
224 * Mask out PF_MEMALLOC s current task context is borrowed for the
225 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
226 * again if the socket is related to swap
227 */
228 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
229
230 pending = local_softirq_pending();
6a61671b 231 account_irq_enter_time(current);
829035fd 232
d2e08473 233 __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
d820ac4c 234 lockdep_softirq_enter();
1da177e4 235
1da177e4
LT
236 cpu = smp_processor_id();
237restart:
238 /* Reset the pending bitmask before enabling irqs */
3f74478b 239 set_softirq_pending(0);
1da177e4 240
c70f5d66 241 local_irq_enable();
1da177e4
LT
242
243 h = softirq_vec;
244
245 do {
246 if (pending & 1) {
f4bc6bb2 247 unsigned int vec_nr = h - softirq_vec;
8e85b4b5
TG
248 int prev_count = preempt_count();
249
f4bc6bb2
TG
250 kstat_incr_softirqs_this_cpu(vec_nr);
251
252 trace_softirq_entry(vec_nr);
1da177e4 253 h->action(h);
f4bc6bb2 254 trace_softirq_exit(vec_nr);
8e85b4b5 255 if (unlikely(prev_count != preempt_count())) {
f4bc6bb2 256 printk(KERN_ERR "huh, entered softirq %u %s %p"
8e85b4b5 257 "with preempt_count %08x,"
f4bc6bb2
TG
258 " exited with %08x?\n", vec_nr,
259 softirq_to_name[vec_nr], h->action,
260 prev_count, preempt_count());
4a2b4b22 261 preempt_count_set(prev_count);
8e85b4b5
TG
262 }
263
d6714c22 264 rcu_bh_qs(cpu);
1da177e4
LT
265 }
266 h++;
267 pending >>= 1;
268 } while (pending);
269
c70f5d66 270 local_irq_disable();
1da177e4
LT
271
272 pending = local_softirq_pending();
c10d7367 273 if (pending) {
34376a50
BG
274 if (time_before(jiffies, end) && !need_resched() &&
275 --max_restart)
c10d7367 276 goto restart;
1da177e4 277
1da177e4 278 wakeup_softirqd();
c10d7367 279 }
1da177e4 280
d820ac4c 281 lockdep_softirq_exit();
829035fd 282
6a61671b 283 account_irq_exit_time(current);
75e1056f 284 __local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7 285 WARN_ON_ONCE(in_interrupt());
907aed48 286 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
1da177e4
LT
287}
288
7d65f4a6 289
1da177e4
LT
290
291asmlinkage void do_softirq(void)
292{
293 __u32 pending;
294 unsigned long flags;
295
296 if (in_interrupt())
297 return;
298
299 local_irq_save(flags);
300
301 pending = local_softirq_pending();
302
303 if (pending)
7d65f4a6 304 do_softirq_own_stack();
1da177e4
LT
305
306 local_irq_restore(flags);
307}
308
dde4b2b5
IM
309/*
310 * Enter an interrupt context.
311 */
312void irq_enter(void)
313{
64db4cff 314 rcu_irq_enter();
0a8a2e78 315 if (is_idle_task(current) && !in_interrupt()) {
d267f87f
VP
316 /*
317 * Prevent raise_softirq from needlessly waking up ksoftirqd
318 * here, as softirq will be serviced on return from interrupt.
319 */
320 local_bh_disable();
e8fcaa5c 321 tick_check_idle();
d267f87f
VP
322 _local_bh_enable();
323 }
324
325 __irq_enter();
dde4b2b5
IM
326}
327
8d32a307
TG
328static inline void invoke_softirq(void)
329{
ded79754 330 if (!force_irqthreads) {
cc1f0274 331#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
ded79754
FW
332 /*
333 * We can safely execute softirq on the current stack if
334 * it is the irq stack, because it should be near empty
cc1f0274
FW
335 * at this stage.
336 */
337 __do_softirq();
338#else
339 /*
340 * Otherwise, irq_exit() is called on the task stack that can
341 * be potentially deep already. So call softirq in its own stack
342 * to prevent from any overrun.
ded79754 343 */
be6e1016 344 do_softirq_own_stack();
cc1f0274 345#endif
ded79754 346 } else {
8d32a307 347 wakeup_softirqd();
ded79754 348 }
8d32a307 349}
1da177e4 350
67826eae
FW
351static inline void tick_irq_exit(void)
352{
353#ifdef CONFIG_NO_HZ_COMMON
354 int cpu = smp_processor_id();
355
356 /* Make sure that timer wheel updates are propagated */
357 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
358 if (!in_interrupt())
359 tick_nohz_irq_exit();
360 }
361#endif
362}
363
1da177e4
LT
364/*
365 * Exit an interrupt context. Process softirqs if needed and possible:
366 */
367void irq_exit(void)
368{
74eed016 369#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 370 local_irq_disable();
74eed016
TG
371#else
372 WARN_ON_ONCE(!irqs_disabled());
373#endif
374
6a61671b 375 account_irq_exit_time(current);
de30a2b3 376 trace_hardirq_exit();
bdb43806 377 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
378 if (!in_interrupt() && local_softirq_pending())
379 invoke_softirq();
79bf2bb3 380
67826eae 381 tick_irq_exit();
416eb33c 382 rcu_irq_exit();
1da177e4
LT
383}
384
385/*
386 * This function must run with irqs disabled!
387 */
7ad5b3a5 388inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
389{
390 __raise_softirq_irqoff(nr);
391
392 /*
393 * If we're in an interrupt or softirq, we're done
394 * (this also catches softirq-disabled code). We will
395 * actually run the softirq once we return from
396 * the irq or softirq.
397 *
398 * Otherwise we wake up ksoftirqd to make sure we
399 * schedule the softirq soon.
400 */
401 if (!in_interrupt())
402 wakeup_softirqd();
403}
404
7ad5b3a5 405void raise_softirq(unsigned int nr)
1da177e4
LT
406{
407 unsigned long flags;
408
409 local_irq_save(flags);
410 raise_softirq_irqoff(nr);
411 local_irq_restore(flags);
412}
413
f069686e
SR
414void __raise_softirq_irqoff(unsigned int nr)
415{
416 trace_softirq_raise(nr);
417 or_softirq_pending(1UL << nr);
418}
419
962cf36c 420void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 421{
1da177e4
LT
422 softirq_vec[nr].action = action;
423}
424
9ba5f005
PZ
425/*
426 * Tasklets
427 */
1da177e4
LT
428struct tasklet_head
429{
48f20a9a
OJ
430 struct tasklet_struct *head;
431 struct tasklet_struct **tail;
1da177e4
LT
432};
433
4620b49f
VN
434static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
435static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 436
7ad5b3a5 437void __tasklet_schedule(struct tasklet_struct *t)
1da177e4
LT
438{
439 unsigned long flags;
440
441 local_irq_save(flags);
48f20a9a 442 t->next = NULL;
909ea964
CL
443 *__this_cpu_read(tasklet_vec.tail) = t;
444 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
445 raise_softirq_irqoff(TASKLET_SOFTIRQ);
446 local_irq_restore(flags);
447}
448
449EXPORT_SYMBOL(__tasklet_schedule);
450
7ad5b3a5 451void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4
LT
452{
453 unsigned long flags;
454
455 local_irq_save(flags);
48f20a9a 456 t->next = NULL;
909ea964
CL
457 *__this_cpu_read(tasklet_hi_vec.tail) = t;
458 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
459 raise_softirq_irqoff(HI_SOFTIRQ);
460 local_irq_restore(flags);
461}
462
463EXPORT_SYMBOL(__tasklet_hi_schedule);
464
7c692cba
VN
465void __tasklet_hi_schedule_first(struct tasklet_struct *t)
466{
467 BUG_ON(!irqs_disabled());
468
909ea964
CL
469 t->next = __this_cpu_read(tasklet_hi_vec.head);
470 __this_cpu_write(tasklet_hi_vec.head, t);
7c692cba
VN
471 __raise_softirq_irqoff(HI_SOFTIRQ);
472}
473
474EXPORT_SYMBOL(__tasklet_hi_schedule_first);
475
1da177e4
LT
476static void tasklet_action(struct softirq_action *a)
477{
478 struct tasklet_struct *list;
479
480 local_irq_disable();
909ea964
CL
481 list = __this_cpu_read(tasklet_vec.head);
482 __this_cpu_write(tasklet_vec.head, NULL);
483 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
1da177e4
LT
484 local_irq_enable();
485
486 while (list) {
487 struct tasklet_struct *t = list;
488
489 list = list->next;
490
491 if (tasklet_trylock(t)) {
492 if (!atomic_read(&t->count)) {
493 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
494 BUG();
495 t->func(t->data);
496 tasklet_unlock(t);
497 continue;
498 }
499 tasklet_unlock(t);
500 }
501
502 local_irq_disable();
48f20a9a 503 t->next = NULL;
909ea964
CL
504 *__this_cpu_read(tasklet_vec.tail) = t;
505 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
506 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
507 local_irq_enable();
508 }
509}
510
511static void tasklet_hi_action(struct softirq_action *a)
512{
513 struct tasklet_struct *list;
514
515 local_irq_disable();
909ea964
CL
516 list = __this_cpu_read(tasklet_hi_vec.head);
517 __this_cpu_write(tasklet_hi_vec.head, NULL);
518 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
1da177e4
LT
519 local_irq_enable();
520
521 while (list) {
522 struct tasklet_struct *t = list;
523
524 list = list->next;
525
526 if (tasklet_trylock(t)) {
527 if (!atomic_read(&t->count)) {
528 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
529 BUG();
530 t->func(t->data);
531 tasklet_unlock(t);
532 continue;
533 }
534 tasklet_unlock(t);
535 }
536
537 local_irq_disable();
48f20a9a 538 t->next = NULL;
909ea964
CL
539 *__this_cpu_read(tasklet_hi_vec.tail) = t;
540 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
541 __raise_softirq_irqoff(HI_SOFTIRQ);
542 local_irq_enable();
543 }
544}
545
546
547void tasklet_init(struct tasklet_struct *t,
548 void (*func)(unsigned long), unsigned long data)
549{
550 t->next = NULL;
551 t->state = 0;
552 atomic_set(&t->count, 0);
553 t->func = func;
554 t->data = data;
555}
556
557EXPORT_SYMBOL(tasklet_init);
558
559void tasklet_kill(struct tasklet_struct *t)
560{
561 if (in_interrupt())
562 printk("Attempt to kill tasklet from interrupt\n");
563
564 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 565 do {
1da177e4 566 yield();
79d381c9 567 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
568 }
569 tasklet_unlock_wait(t);
570 clear_bit(TASKLET_STATE_SCHED, &t->state);
571}
572
573EXPORT_SYMBOL(tasklet_kill);
574
9ba5f005
PZ
575/*
576 * tasklet_hrtimer
577 */
578
579/*
b9c30322
PZ
580 * The trampoline is called when the hrtimer expires. It schedules a tasklet
581 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
582 * hrtimer callback, but from softirq context.
9ba5f005
PZ
583 */
584static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
585{
586 struct tasklet_hrtimer *ttimer =
587 container_of(timer, struct tasklet_hrtimer, timer);
588
b9c30322
PZ
589 tasklet_hi_schedule(&ttimer->tasklet);
590 return HRTIMER_NORESTART;
9ba5f005
PZ
591}
592
593/*
594 * Helper function which calls the hrtimer callback from
595 * tasklet/softirq context
596 */
597static void __tasklet_hrtimer_trampoline(unsigned long data)
598{
599 struct tasklet_hrtimer *ttimer = (void *)data;
600 enum hrtimer_restart restart;
601
602 restart = ttimer->function(&ttimer->timer);
603 if (restart != HRTIMER_NORESTART)
604 hrtimer_restart(&ttimer->timer);
605}
606
607/**
608 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
609 * @ttimer: tasklet_hrtimer which is initialized
25985edc 610 * @function: hrtimer callback function which gets called from softirq context
9ba5f005
PZ
611 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
612 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
613 */
614void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
615 enum hrtimer_restart (*function)(struct hrtimer *),
616 clockid_t which_clock, enum hrtimer_mode mode)
617{
618 hrtimer_init(&ttimer->timer, which_clock, mode);
619 ttimer->timer.function = __hrtimer_tasklet_trampoline;
620 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
621 (unsigned long)ttimer);
622 ttimer->function = function;
623}
624EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
625
1da177e4
LT
626void __init softirq_init(void)
627{
48f20a9a
OJ
628 int cpu;
629
630 for_each_possible_cpu(cpu) {
631 per_cpu(tasklet_vec, cpu).tail =
632 &per_cpu(tasklet_vec, cpu).head;
633 per_cpu(tasklet_hi_vec, cpu).tail =
634 &per_cpu(tasklet_hi_vec, cpu).head;
635 }
636
962cf36c
CM
637 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
638 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
639}
640
3e339b5d 641static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 642{
3e339b5d
TG
643 return local_softirq_pending();
644}
1da177e4 645
3e339b5d
TG
646static void run_ksoftirqd(unsigned int cpu)
647{
648 local_irq_disable();
649 if (local_softirq_pending()) {
0bed698a
FW
650 /*
651 * We can safely run softirq on inline stack, as we are not deep
652 * in the task stack here.
653 */
3e339b5d
TG
654 __do_softirq();
655 rcu_note_context_switch(cpu);
656 local_irq_enable();
657 cond_resched();
658 return;
1da177e4 659 }
3e339b5d 660 local_irq_enable();
1da177e4
LT
661}
662
663#ifdef CONFIG_HOTPLUG_CPU
664/*
665 * tasklet_kill_immediate is called to remove a tasklet which can already be
666 * scheduled for execution on @cpu.
667 *
668 * Unlike tasklet_kill, this function removes the tasklet
669 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
670 *
671 * When this function is called, @cpu must be in the CPU_DEAD state.
672 */
673void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
674{
675 struct tasklet_struct **i;
676
677 BUG_ON(cpu_online(cpu));
678 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
679
680 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
681 return;
682
683 /* CPU is dead, so no lock needed. */
48f20a9a 684 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
685 if (*i == t) {
686 *i = t->next;
48f20a9a
OJ
687 /* If this was the tail element, move the tail ptr */
688 if (*i == NULL)
689 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
690 return;
691 }
692 }
693 BUG();
694}
695
696static void takeover_tasklets(unsigned int cpu)
697{
1da177e4
LT
698 /* CPU is dead, so no lock needed. */
699 local_irq_disable();
700
701 /* Find end, append list for that CPU. */
e5e41723 702 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964
CL
703 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
704 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
705 per_cpu(tasklet_vec, cpu).head = NULL;
706 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
707 }
1da177e4
LT
708 raise_softirq_irqoff(TASKLET_SOFTIRQ);
709
e5e41723 710 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
711 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
712 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
713 per_cpu(tasklet_hi_vec, cpu).head = NULL;
714 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
715 }
1da177e4
LT
716 raise_softirq_irqoff(HI_SOFTIRQ);
717
718 local_irq_enable();
719}
720#endif /* CONFIG_HOTPLUG_CPU */
721
0db0628d 722static int cpu_callback(struct notifier_block *nfb,
1da177e4
LT
723 unsigned long action,
724 void *hcpu)
725{
1da177e4 726 switch (action) {
1da177e4 727#ifdef CONFIG_HOTPLUG_CPU
1da177e4 728 case CPU_DEAD:
3e339b5d
TG
729 case CPU_DEAD_FROZEN:
730 takeover_tasklets((unsigned long)hcpu);
1da177e4
LT
731 break;
732#endif /* CONFIG_HOTPLUG_CPU */
3e339b5d 733 }
1da177e4
LT
734 return NOTIFY_OK;
735}
736
0db0628d 737static struct notifier_block cpu_nfb = {
1da177e4
LT
738 .notifier_call = cpu_callback
739};
740
3e339b5d
TG
741static struct smp_hotplug_thread softirq_threads = {
742 .store = &ksoftirqd,
743 .thread_should_run = ksoftirqd_should_run,
744 .thread_fn = run_ksoftirqd,
745 .thread_comm = "ksoftirqd/%u",
746};
747
7babe8db 748static __init int spawn_ksoftirqd(void)
1da177e4 749{
1da177e4 750 register_cpu_notifier(&cpu_nfb);
3e339b5d
TG
751
752 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
753
1da177e4
LT
754 return 0;
755}
7babe8db 756early_initcall(spawn_ksoftirqd);
78eef01b 757
43a25632
YL
758/*
759 * [ These __weak aliases are kept in a separate compilation unit, so that
760 * GCC does not inline them incorrectly. ]
761 */
762
763int __init __weak early_irq_init(void)
764{
765 return 0;
766}
767
4a046d17
YL
768int __init __weak arch_probe_nr_irqs(void)
769{
b683de2b 770 return NR_IRQS_LEGACY;
4a046d17
YL
771}
772
43a25632
YL
773int __init __weak arch_early_irq_init(void)
774{
775 return 0;
776}