]> git.ipfire.org Git - people/arne_f/kernel.git/blame - kernel/softirq.c
softirq: use ffs() in __do_softirq()
[people/arne_f/kernel.git] / kernel / softirq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
b10db7f0
PM
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
9 */
10
9984de1a 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel_stat.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/notifier.h>
17#include <linux/percpu.h>
18#include <linux/cpu.h>
83144186 19#include <linux/freezer.h>
1da177e4
LT
20#include <linux/kthread.h>
21#include <linux/rcupdate.h>
7e49fcce 22#include <linux/ftrace.h>
78eef01b 23#include <linux/smp.h>
3e339b5d 24#include <linux/smpboot.h>
79bf2bb3 25#include <linux/tick.h>
a0e39ed3
HC
26
27#define CREATE_TRACE_POINTS
ad8d75ff 28#include <trace/events/irq.h>
1da177e4 29
1da177e4
LT
30/*
31 - No shared variables, all the data are CPU local.
32 - If a softirq needs serialization, let it serialize itself
33 by its own spinlocks.
34 - Even if softirq is serialized, only local cpu is marked for
35 execution. Hence, we get something sort of weak cpu binding.
36 Though it is still not clear, will it result in better locality
37 or will not.
38
39 Examples:
40 - NET RX softirq. It is multithreaded and does not require
41 any global serialization.
42 - NET TX softirq. It kicks software netdevice queues, hence
43 it is logically serialized per device, but this serialization
44 is invisible to common code.
45 - Tasklets: serialized wrt itself.
46 */
47
48#ifndef __ARCH_IRQ_STAT
49irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50EXPORT_SYMBOL(irq_stat);
51#endif
52
978b0116 53static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 54
4dd53d89 55DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 56
5d592b44 57char *softirq_to_name[NR_SOFTIRQS] = {
5dd4de58 58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
09223371 59 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
60};
61
1da177e4
LT
62/*
63 * we cannot loop indefinitely here to avoid userspace starvation,
64 * but we also don't want to introduce a worst case 1/HZ latency
65 * to the pending events, so lets the scheduler to balance
66 * the softirq load for us.
67 */
676cb02d 68static void wakeup_softirqd(void)
1da177e4
LT
69{
70 /* Interrupts are disabled: no need to stop preemption */
909ea964 71 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4
LT
72
73 if (tsk && tsk->state != TASK_RUNNING)
74 wake_up_process(tsk);
75}
76
75e1056f
VP
77/*
78 * preempt_count and SOFTIRQ_OFFSET usage:
79 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
80 * softirq processing.
81 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
82 * on local_bh_disable or local_bh_enable.
83 * This lets us distinguish between whether we are currently processing
84 * softirq and whether we just have bh disabled.
85 */
86
de30a2b3
IM
87/*
88 * This one is for softirq.c-internal use,
89 * where hardirqs are disabled legitimately:
90 */
3c829c36 91#ifdef CONFIG_TRACE_IRQFLAGS
0bd3a173 92void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b3
IM
93{
94 unsigned long flags;
95
96 WARN_ON_ONCE(in_irq());
97
98 raw_local_irq_save(flags);
7e49fcce 99 /*
bdb43806 100 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
101 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
102 * is set and before current->softirq_enabled is cleared.
103 * We must manually increment preempt_count here and manually
104 * call the trace_preempt_off later.
105 */
bdb43806 106 __preempt_count_add(cnt);
de30a2b3
IM
107 /*
108 * Were softirqs turned off above:
109 */
9ea4c380 110 if (softirq_count() == (cnt & SOFTIRQ_MASK))
de30a2b3
IM
111 trace_softirqs_off(ip);
112 raw_local_irq_restore(flags);
7e49fcce 113
75e1056f 114 if (preempt_count() == cnt)
7e49fcce 115 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
de30a2b3 116}
0bd3a173 117EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c36 118#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3 119
75e1056f
VP
120static void __local_bh_enable(unsigned int cnt)
121{
75e1056f
VP
122 WARN_ON_ONCE(!irqs_disabled());
123
9ea4c380 124 if (softirq_count() == (cnt & SOFTIRQ_MASK))
d2e08473 125 trace_softirqs_on(_RET_IP_);
bdb43806 126 preempt_count_sub(cnt);
75e1056f
VP
127}
128
de30a2b3
IM
129/*
130 * Special-case - softirqs can safely be enabled in
131 * cond_resched_softirq(), or by __do_softirq(),
132 * without processing still-pending softirqs:
133 */
134void _local_bh_enable(void)
135{
5d60d3e7 136 WARN_ON_ONCE(in_irq());
75e1056f 137 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3
IM
138}
139
140EXPORT_SYMBOL(_local_bh_enable);
141
0bd3a173 142void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b3 143{
0f476b6d 144 WARN_ON_ONCE(in_irq() || irqs_disabled());
3c829c36 145#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 146 local_irq_disable();
3c829c36 147#endif
de30a2b3
IM
148 /*
149 * Are softirqs going to be turned on now:
150 */
75e1056f 151 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0f476b6d 152 trace_softirqs_on(ip);
de30a2b3
IM
153 /*
154 * Keep preemption disabled until we are done with
155 * softirq processing:
156 */
0bd3a173 157 preempt_count_sub(cnt - 1);
de30a2b3 158
0bed698a
FW
159 if (unlikely(!in_interrupt() && local_softirq_pending())) {
160 /*
161 * Run softirq if any pending. And do it in its own stack
162 * as we may be calling this deep in a task call stack already.
163 */
de30a2b3 164 do_softirq();
0bed698a 165 }
de30a2b3 166
bdb43806 167 preempt_count_dec();
3c829c36 168#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 169 local_irq_enable();
3c829c36 170#endif
de30a2b3
IM
171 preempt_check_resched();
172}
0bd3a173 173EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b3 174
1da177e4 175/*
34376a50
BG
176 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
177 * but break the loop if need_resched() is set or after 2 ms.
178 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
179 * certain cases, such as stop_machine(), jiffies may cease to
180 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
181 * well to make sure we eventually return from this method.
1da177e4 182 *
c10d7367 183 * These limits have been established via experimentation.
1da177e4
LT
184 * The two things to balance is latency against fairness -
185 * we want to handle softirqs as soon as possible, but they
186 * should not be able to lock up the box.
187 */
c10d7367 188#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 189#define MAX_SOFTIRQ_RESTART 10
1da177e4 190
f1a83e65
PZ
191#ifdef CONFIG_TRACE_IRQFLAGS
192/*
f1a83e65
PZ
193 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
194 * to keep the lockdep irq context tracking as tight as possible in order to
195 * not miss-qualify lock contexts and miss possible deadlocks.
196 */
f1a83e65 197
5c4853b6 198static inline bool lockdep_softirq_start(void)
f1a83e65 199{
5c4853b6 200 bool in_hardirq = false;
f1a83e65 201
5c4853b6
FW
202 if (trace_hardirq_context(current)) {
203 in_hardirq = true;
f1a83e65 204 trace_hardirq_exit();
5c4853b6
FW
205 }
206
f1a83e65 207 lockdep_softirq_enter();
5c4853b6
FW
208
209 return in_hardirq;
f1a83e65
PZ
210}
211
5c4853b6 212static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e65
PZ
213{
214 lockdep_softirq_exit();
5c4853b6
FW
215
216 if (in_hardirq)
f1a83e65 217 trace_hardirq_enter();
f1a83e65 218}
f1a83e65 219#else
5c4853b6
FW
220static inline bool lockdep_softirq_start(void) { return false; }
221static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e65
PZ
222#endif
223
1da177e4
LT
224asmlinkage void __do_softirq(void)
225{
c10d7367 226 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48 227 unsigned long old_flags = current->flags;
34376a50 228 int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e65 229 struct softirq_action *h;
5c4853b6 230 bool in_hardirq;
f1a83e65 231 __u32 pending;
2e702b9f 232 int softirq_bit;
f1a83e65 233 int cpu;
907aed48
MG
234
235 /*
236 * Mask out PF_MEMALLOC s current task context is borrowed for the
237 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
238 * again if the socket is related to swap
239 */
240 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
241
242 pending = local_softirq_pending();
6a61671b 243 account_irq_enter_time(current);
829035fd 244
0bd3a173 245 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
5c4853b6 246 in_hardirq = lockdep_softirq_start();
1da177e4 247
1da177e4
LT
248 cpu = smp_processor_id();
249restart:
250 /* Reset the pending bitmask before enabling irqs */
3f74478b 251 set_softirq_pending(0);
1da177e4 252
c70f5d66 253 local_irq_enable();
1da177e4
LT
254
255 h = softirq_vec;
256
2e702b9f
JP
257 while ((softirq_bit = ffs(pending))) {
258 unsigned int vec_nr;
259 int prev_count;
260
261 h += softirq_bit - 1;
262
263 vec_nr = h - softirq_vec;
264 prev_count = preempt_count();
265
266 kstat_incr_softirqs_this_cpu(vec_nr);
267
268 trace_softirq_entry(vec_nr);
269 h->action(h);
270 trace_softirq_exit(vec_nr);
271 if (unlikely(prev_count != preempt_count())) {
272 printk(KERN_ERR "huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
273 vec_nr, softirq_to_name[vec_nr], h->action,
274 prev_count, preempt_count());
275 preempt_count_set(prev_count);
1da177e4 276 }
2e702b9f 277 rcu_bh_qs(cpu);
1da177e4 278 h++;
2e702b9f
JP
279 pending >>= softirq_bit;
280 }
1da177e4 281
c70f5d66 282 local_irq_disable();
1da177e4
LT
283
284 pending = local_softirq_pending();
c10d7367 285 if (pending) {
34376a50
BG
286 if (time_before(jiffies, end) && !need_resched() &&
287 --max_restart)
c10d7367 288 goto restart;
1da177e4 289
1da177e4 290 wakeup_softirqd();
c10d7367 291 }
1da177e4 292
5c4853b6 293 lockdep_softirq_end(in_hardirq);
6a61671b 294 account_irq_exit_time(current);
75e1056f 295 __local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7 296 WARN_ON_ONCE(in_interrupt());
907aed48 297 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
1da177e4
LT
298}
299
1da177e4
LT
300asmlinkage void do_softirq(void)
301{
302 __u32 pending;
303 unsigned long flags;
304
305 if (in_interrupt())
306 return;
307
308 local_irq_save(flags);
309
310 pending = local_softirq_pending();
311
312 if (pending)
7d65f4a6 313 do_softirq_own_stack();
1da177e4
LT
314
315 local_irq_restore(flags);
316}
317
dde4b2b5
IM
318/*
319 * Enter an interrupt context.
320 */
321void irq_enter(void)
322{
64db4cff 323 rcu_irq_enter();
0a8a2e78 324 if (is_idle_task(current) && !in_interrupt()) {
d267f87f
VP
325 /*
326 * Prevent raise_softirq from needlessly waking up ksoftirqd
327 * here, as softirq will be serviced on return from interrupt.
328 */
329 local_bh_disable();
e8fcaa5c 330 tick_check_idle();
d267f87f
VP
331 _local_bh_enable();
332 }
333
334 __irq_enter();
dde4b2b5
IM
335}
336
8d32a307
TG
337static inline void invoke_softirq(void)
338{
ded79754 339 if (!force_irqthreads) {
cc1f0274 340#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
ded79754
FW
341 /*
342 * We can safely execute softirq on the current stack if
343 * it is the irq stack, because it should be near empty
cc1f0274
FW
344 * at this stage.
345 */
346 __do_softirq();
347#else
348 /*
349 * Otherwise, irq_exit() is called on the task stack that can
350 * be potentially deep already. So call softirq in its own stack
351 * to prevent from any overrun.
ded79754 352 */
be6e1016 353 do_softirq_own_stack();
cc1f0274 354#endif
ded79754 355 } else {
8d32a307 356 wakeup_softirqd();
ded79754 357 }
8d32a307 358}
1da177e4 359
67826eae
FW
360static inline void tick_irq_exit(void)
361{
362#ifdef CONFIG_NO_HZ_COMMON
363 int cpu = smp_processor_id();
364
365 /* Make sure that timer wheel updates are propagated */
366 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
367 if (!in_interrupt())
368 tick_nohz_irq_exit();
369 }
370#endif
371}
372
1da177e4
LT
373/*
374 * Exit an interrupt context. Process softirqs if needed and possible:
375 */
376void irq_exit(void)
377{
74eed016 378#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 379 local_irq_disable();
74eed016
TG
380#else
381 WARN_ON_ONCE(!irqs_disabled());
382#endif
383
6a61671b 384 account_irq_exit_time(current);
bdb43806 385 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
386 if (!in_interrupt() && local_softirq_pending())
387 invoke_softirq();
79bf2bb3 388
67826eae 389 tick_irq_exit();
416eb33c 390 rcu_irq_exit();
f1a83e65 391 trace_hardirq_exit(); /* must be last! */
1da177e4
LT
392}
393
394/*
395 * This function must run with irqs disabled!
396 */
7ad5b3a5 397inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
398{
399 __raise_softirq_irqoff(nr);
400
401 /*
402 * If we're in an interrupt or softirq, we're done
403 * (this also catches softirq-disabled code). We will
404 * actually run the softirq once we return from
405 * the irq or softirq.
406 *
407 * Otherwise we wake up ksoftirqd to make sure we
408 * schedule the softirq soon.
409 */
410 if (!in_interrupt())
411 wakeup_softirqd();
412}
413
7ad5b3a5 414void raise_softirq(unsigned int nr)
1da177e4
LT
415{
416 unsigned long flags;
417
418 local_irq_save(flags);
419 raise_softirq_irqoff(nr);
420 local_irq_restore(flags);
421}
422
f069686e
SR
423void __raise_softirq_irqoff(unsigned int nr)
424{
425 trace_softirq_raise(nr);
426 or_softirq_pending(1UL << nr);
427}
428
962cf36c 429void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 430{
1da177e4
LT
431 softirq_vec[nr].action = action;
432}
433
9ba5f005
PZ
434/*
435 * Tasklets
436 */
1da177e4
LT
437struct tasklet_head
438{
48f20a9a
OJ
439 struct tasklet_struct *head;
440 struct tasklet_struct **tail;
1da177e4
LT
441};
442
4620b49f
VN
443static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
444static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 445
7ad5b3a5 446void __tasklet_schedule(struct tasklet_struct *t)
1da177e4
LT
447{
448 unsigned long flags;
449
450 local_irq_save(flags);
48f20a9a 451 t->next = NULL;
909ea964
CL
452 *__this_cpu_read(tasklet_vec.tail) = t;
453 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
454 raise_softirq_irqoff(TASKLET_SOFTIRQ);
455 local_irq_restore(flags);
456}
457
458EXPORT_SYMBOL(__tasklet_schedule);
459
7ad5b3a5 460void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4
LT
461{
462 unsigned long flags;
463
464 local_irq_save(flags);
48f20a9a 465 t->next = NULL;
909ea964
CL
466 *__this_cpu_read(tasklet_hi_vec.tail) = t;
467 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
468 raise_softirq_irqoff(HI_SOFTIRQ);
469 local_irq_restore(flags);
470}
471
472EXPORT_SYMBOL(__tasklet_hi_schedule);
473
7c692cba
VN
474void __tasklet_hi_schedule_first(struct tasklet_struct *t)
475{
476 BUG_ON(!irqs_disabled());
477
909ea964
CL
478 t->next = __this_cpu_read(tasklet_hi_vec.head);
479 __this_cpu_write(tasklet_hi_vec.head, t);
7c692cba
VN
480 __raise_softirq_irqoff(HI_SOFTIRQ);
481}
482
483EXPORT_SYMBOL(__tasklet_hi_schedule_first);
484
1da177e4
LT
485static void tasklet_action(struct softirq_action *a)
486{
487 struct tasklet_struct *list;
488
489 local_irq_disable();
909ea964
CL
490 list = __this_cpu_read(tasklet_vec.head);
491 __this_cpu_write(tasklet_vec.head, NULL);
492 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
1da177e4
LT
493 local_irq_enable();
494
495 while (list) {
496 struct tasklet_struct *t = list;
497
498 list = list->next;
499
500 if (tasklet_trylock(t)) {
501 if (!atomic_read(&t->count)) {
502 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
503 BUG();
504 t->func(t->data);
505 tasklet_unlock(t);
506 continue;
507 }
508 tasklet_unlock(t);
509 }
510
511 local_irq_disable();
48f20a9a 512 t->next = NULL;
909ea964
CL
513 *__this_cpu_read(tasklet_vec.tail) = t;
514 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
515 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
516 local_irq_enable();
517 }
518}
519
520static void tasklet_hi_action(struct softirq_action *a)
521{
522 struct tasklet_struct *list;
523
524 local_irq_disable();
909ea964
CL
525 list = __this_cpu_read(tasklet_hi_vec.head);
526 __this_cpu_write(tasklet_hi_vec.head, NULL);
527 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
1da177e4
LT
528 local_irq_enable();
529
530 while (list) {
531 struct tasklet_struct *t = list;
532
533 list = list->next;
534
535 if (tasklet_trylock(t)) {
536 if (!atomic_read(&t->count)) {
537 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
538 BUG();
539 t->func(t->data);
540 tasklet_unlock(t);
541 continue;
542 }
543 tasklet_unlock(t);
544 }
545
546 local_irq_disable();
48f20a9a 547 t->next = NULL;
909ea964
CL
548 *__this_cpu_read(tasklet_hi_vec.tail) = t;
549 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
550 __raise_softirq_irqoff(HI_SOFTIRQ);
551 local_irq_enable();
552 }
553}
554
555
556void tasklet_init(struct tasklet_struct *t,
557 void (*func)(unsigned long), unsigned long data)
558{
559 t->next = NULL;
560 t->state = 0;
561 atomic_set(&t->count, 0);
562 t->func = func;
563 t->data = data;
564}
565
566EXPORT_SYMBOL(tasklet_init);
567
568void tasklet_kill(struct tasklet_struct *t)
569{
570 if (in_interrupt())
571 printk("Attempt to kill tasklet from interrupt\n");
572
573 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 574 do {
1da177e4 575 yield();
79d381c9 576 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
577 }
578 tasklet_unlock_wait(t);
579 clear_bit(TASKLET_STATE_SCHED, &t->state);
580}
581
582EXPORT_SYMBOL(tasklet_kill);
583
9ba5f005
PZ
584/*
585 * tasklet_hrtimer
586 */
587
588/*
b9c30322
PZ
589 * The trampoline is called when the hrtimer expires. It schedules a tasklet
590 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
591 * hrtimer callback, but from softirq context.
9ba5f005
PZ
592 */
593static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
594{
595 struct tasklet_hrtimer *ttimer =
596 container_of(timer, struct tasklet_hrtimer, timer);
597
b9c30322
PZ
598 tasklet_hi_schedule(&ttimer->tasklet);
599 return HRTIMER_NORESTART;
9ba5f005
PZ
600}
601
602/*
603 * Helper function which calls the hrtimer callback from
604 * tasklet/softirq context
605 */
606static void __tasklet_hrtimer_trampoline(unsigned long data)
607{
608 struct tasklet_hrtimer *ttimer = (void *)data;
609 enum hrtimer_restart restart;
610
611 restart = ttimer->function(&ttimer->timer);
612 if (restart != HRTIMER_NORESTART)
613 hrtimer_restart(&ttimer->timer);
614}
615
616/**
617 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
618 * @ttimer: tasklet_hrtimer which is initialized
25985edc 619 * @function: hrtimer callback function which gets called from softirq context
9ba5f005
PZ
620 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
621 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
622 */
623void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
624 enum hrtimer_restart (*function)(struct hrtimer *),
625 clockid_t which_clock, enum hrtimer_mode mode)
626{
627 hrtimer_init(&ttimer->timer, which_clock, mode);
628 ttimer->timer.function = __hrtimer_tasklet_trampoline;
629 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
630 (unsigned long)ttimer);
631 ttimer->function = function;
632}
633EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
634
1da177e4
LT
635void __init softirq_init(void)
636{
48f20a9a
OJ
637 int cpu;
638
639 for_each_possible_cpu(cpu) {
640 per_cpu(tasklet_vec, cpu).tail =
641 &per_cpu(tasklet_vec, cpu).head;
642 per_cpu(tasklet_hi_vec, cpu).tail =
643 &per_cpu(tasklet_hi_vec, cpu).head;
644 }
645
962cf36c
CM
646 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
647 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
648}
649
3e339b5d 650static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 651{
3e339b5d
TG
652 return local_softirq_pending();
653}
1da177e4 654
3e339b5d
TG
655static void run_ksoftirqd(unsigned int cpu)
656{
657 local_irq_disable();
658 if (local_softirq_pending()) {
0bed698a
FW
659 /*
660 * We can safely run softirq on inline stack, as we are not deep
661 * in the task stack here.
662 */
3e339b5d
TG
663 __do_softirq();
664 rcu_note_context_switch(cpu);
665 local_irq_enable();
666 cond_resched();
667 return;
1da177e4 668 }
3e339b5d 669 local_irq_enable();
1da177e4
LT
670}
671
672#ifdef CONFIG_HOTPLUG_CPU
673/*
674 * tasklet_kill_immediate is called to remove a tasklet which can already be
675 * scheduled for execution on @cpu.
676 *
677 * Unlike tasklet_kill, this function removes the tasklet
678 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
679 *
680 * When this function is called, @cpu must be in the CPU_DEAD state.
681 */
682void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
683{
684 struct tasklet_struct **i;
685
686 BUG_ON(cpu_online(cpu));
687 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
688
689 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
690 return;
691
692 /* CPU is dead, so no lock needed. */
48f20a9a 693 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
694 if (*i == t) {
695 *i = t->next;
48f20a9a
OJ
696 /* If this was the tail element, move the tail ptr */
697 if (*i == NULL)
698 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
699 return;
700 }
701 }
702 BUG();
703}
704
705static void takeover_tasklets(unsigned int cpu)
706{
1da177e4
LT
707 /* CPU is dead, so no lock needed. */
708 local_irq_disable();
709
710 /* Find end, append list for that CPU. */
e5e41723 711 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964
CL
712 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
713 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
714 per_cpu(tasklet_vec, cpu).head = NULL;
715 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
716 }
1da177e4
LT
717 raise_softirq_irqoff(TASKLET_SOFTIRQ);
718
e5e41723 719 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
720 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
721 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
722 per_cpu(tasklet_hi_vec, cpu).head = NULL;
723 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
724 }
1da177e4
LT
725 raise_softirq_irqoff(HI_SOFTIRQ);
726
727 local_irq_enable();
728}
729#endif /* CONFIG_HOTPLUG_CPU */
730
0db0628d 731static int cpu_callback(struct notifier_block *nfb,
1da177e4
LT
732 unsigned long action,
733 void *hcpu)
734{
1da177e4 735 switch (action) {
1da177e4 736#ifdef CONFIG_HOTPLUG_CPU
1da177e4 737 case CPU_DEAD:
3e339b5d
TG
738 case CPU_DEAD_FROZEN:
739 takeover_tasklets((unsigned long)hcpu);
1da177e4
LT
740 break;
741#endif /* CONFIG_HOTPLUG_CPU */
3e339b5d 742 }
1da177e4
LT
743 return NOTIFY_OK;
744}
745
0db0628d 746static struct notifier_block cpu_nfb = {
1da177e4
LT
747 .notifier_call = cpu_callback
748};
749
3e339b5d
TG
750static struct smp_hotplug_thread softirq_threads = {
751 .store = &ksoftirqd,
752 .thread_should_run = ksoftirqd_should_run,
753 .thread_fn = run_ksoftirqd,
754 .thread_comm = "ksoftirqd/%u",
755};
756
7babe8db 757static __init int spawn_ksoftirqd(void)
1da177e4 758{
1da177e4 759 register_cpu_notifier(&cpu_nfb);
3e339b5d
TG
760
761 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
762
1da177e4
LT
763 return 0;
764}
7babe8db 765early_initcall(spawn_ksoftirqd);
78eef01b 766
43a25632
YL
767/*
768 * [ These __weak aliases are kept in a separate compilation unit, so that
769 * GCC does not inline them incorrectly. ]
770 */
771
772int __init __weak early_irq_init(void)
773{
774 return 0;
775}
776
4a046d17
YL
777int __init __weak arch_probe_nr_irqs(void)
778{
b683de2b 779 return NR_IRQS_LEGACY;
4a046d17
YL
780}
781
43a25632
YL
782int __init __weak arch_early_irq_init(void)
783{
784 return 0;
785}