0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0011
0012 #include <linux/export.h>
0013 #include <linux/kernel_stat.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/init.h>
0016 #include <linux/local_lock.h>
0017 #include <linux/mm.h>
0018 #include <linux/notifier.h>
0019 #include <linux/percpu.h>
0020 #include <linux/cpu.h>
0021 #include <linux/freezer.h>
0022 #include <linux/kthread.h>
0023 #include <linux/rcupdate.h>
0024 #include <linux/ftrace.h>
0025 #include <linux/smp.h>
0026 #include <linux/smpboot.h>
0027 #include <linux/tick.h>
0028 #include <linux/irq.h>
0029 #include <linux/wait_bit.h>
0030
0031 #include <asm/softirq_stack.h>
0032
0033 #define CREATE_TRACE_POINTS
0034 #include <trace/events/irq.h>
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 #ifndef __ARCH_IRQ_STAT
0055 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
0056 EXPORT_PER_CPU_SYMBOL(irq_stat);
0057 #endif
0058
0059 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
0060
0061 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
0062
0063 const char * const softirq_to_name[NR_SOFTIRQS] = {
0064 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
0065 "TASKLET", "SCHED", "HRTIMER", "RCU"
0066 };
0067
0068
0069
0070
0071
0072
0073
0074 static void wakeup_softirqd(void)
0075 {
0076
0077 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
0078
0079 if (tsk)
0080 wake_up_process(tsk);
0081 }
0082
0083
0084
0085
0086
0087
0088 #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
0089 static bool ksoftirqd_running(unsigned long pending)
0090 {
0091 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
0092
0093 if (pending & SOFTIRQ_NOW_MASK)
0094 return false;
0095 return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
0096 }
0097
0098 #ifdef CONFIG_TRACE_IRQFLAGS
0099 DEFINE_PER_CPU(int, hardirqs_enabled);
0100 DEFINE_PER_CPU(int, hardirq_context);
0101 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
0102 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
0103 #endif
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 #ifdef CONFIG_PREEMPT_RT
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134 struct softirq_ctrl {
0135 local_lock_t lock;
0136 int cnt;
0137 };
0138
0139 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
0140 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
0141 };
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 bool local_bh_blocked(void)
0154 {
0155 return __this_cpu_read(softirq_ctrl.cnt) != 0;
0156 }
0157
0158 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
0159 {
0160 unsigned long flags;
0161 int newcnt;
0162
0163 WARN_ON_ONCE(in_hardirq());
0164
0165
0166 if (!current->softirq_disable_cnt) {
0167 if (preemptible()) {
0168 local_lock(&softirq_ctrl.lock);
0169
0170 rcu_read_lock();
0171 } else {
0172 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
0173 }
0174 }
0175
0176
0177
0178
0179
0180 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
0181
0182
0183
0184
0185 current->softirq_disable_cnt = newcnt;
0186
0187 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
0188 raw_local_irq_save(flags);
0189 lockdep_softirqs_off(ip);
0190 raw_local_irq_restore(flags);
0191 }
0192 }
0193 EXPORT_SYMBOL(__local_bh_disable_ip);
0194
0195 static void __local_bh_enable(unsigned int cnt, bool unlock)
0196 {
0197 unsigned long flags;
0198 int newcnt;
0199
0200 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
0201 this_cpu_read(softirq_ctrl.cnt));
0202
0203 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
0204 raw_local_irq_save(flags);
0205 lockdep_softirqs_on(_RET_IP_);
0206 raw_local_irq_restore(flags);
0207 }
0208
0209 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
0210 current->softirq_disable_cnt = newcnt;
0211
0212 if (!newcnt && unlock) {
0213 rcu_read_unlock();
0214 local_unlock(&softirq_ctrl.lock);
0215 }
0216 }
0217
0218 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
0219 {
0220 bool preempt_on = preemptible();
0221 unsigned long flags;
0222 u32 pending;
0223 int curcnt;
0224
0225 WARN_ON_ONCE(in_hardirq());
0226 lockdep_assert_irqs_enabled();
0227
0228 local_irq_save(flags);
0229 curcnt = __this_cpu_read(softirq_ctrl.cnt);
0230
0231
0232
0233
0234
0235 if (curcnt != cnt)
0236 goto out;
0237
0238 pending = local_softirq_pending();
0239 if (!pending || ksoftirqd_running(pending))
0240 goto out;
0241
0242
0243
0244
0245
0246 if (!preempt_on) {
0247 wakeup_softirqd();
0248 goto out;
0249 }
0250
0251
0252
0253
0254
0255 cnt = SOFTIRQ_OFFSET;
0256 __local_bh_enable(cnt, false);
0257 __do_softirq();
0258
0259 out:
0260 __local_bh_enable(cnt, preempt_on);
0261 local_irq_restore(flags);
0262 }
0263 EXPORT_SYMBOL(__local_bh_enable_ip);
0264
0265
0266
0267
0268
0269 static inline void ksoftirqd_run_begin(void)
0270 {
0271 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
0272 local_irq_disable();
0273 }
0274
0275
0276 static inline void ksoftirqd_run_end(void)
0277 {
0278 __local_bh_enable(SOFTIRQ_OFFSET, true);
0279 WARN_ON_ONCE(in_interrupt());
0280 local_irq_enable();
0281 }
0282
0283 static inline void softirq_handle_begin(void) { }
0284 static inline void softirq_handle_end(void) { }
0285
0286 static inline bool should_wake_ksoftirqd(void)
0287 {
0288 return !this_cpu_read(softirq_ctrl.cnt);
0289 }
0290
0291 static inline void invoke_softirq(void)
0292 {
0293 if (should_wake_ksoftirqd())
0294 wakeup_softirqd();
0295 }
0296
0297
0298
0299
0300
0301
0302
0303
0304 void do_softirq_post_smp_call_flush(unsigned int was_pending)
0305 {
0306 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
0307 invoke_softirq();
0308 }
0309
0310 #else
0311
0312
0313
0314
0315
0316 #ifdef CONFIG_TRACE_IRQFLAGS
0317 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
0318 {
0319 unsigned long flags;
0320
0321 WARN_ON_ONCE(in_hardirq());
0322
0323 raw_local_irq_save(flags);
0324
0325
0326
0327
0328
0329
0330
0331 __preempt_count_add(cnt);
0332
0333
0334
0335 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0336 lockdep_softirqs_off(ip);
0337 raw_local_irq_restore(flags);
0338
0339 if (preempt_count() == cnt) {
0340 #ifdef CONFIG_DEBUG_PREEMPT
0341 current->preempt_disable_ip = get_lock_parent_ip();
0342 #endif
0343 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0344 }
0345 }
0346 EXPORT_SYMBOL(__local_bh_disable_ip);
0347 #endif
0348
0349 static void __local_bh_enable(unsigned int cnt)
0350 {
0351 lockdep_assert_irqs_disabled();
0352
0353 if (preempt_count() == cnt)
0354 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
0355
0356 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0357 lockdep_softirqs_on(_RET_IP_);
0358
0359 __preempt_count_sub(cnt);
0360 }
0361
0362
0363
0364
0365
0366 void _local_bh_enable(void)
0367 {
0368 WARN_ON_ONCE(in_hardirq());
0369 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
0370 }
0371 EXPORT_SYMBOL(_local_bh_enable);
0372
0373 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
0374 {
0375 WARN_ON_ONCE(in_hardirq());
0376 lockdep_assert_irqs_enabled();
0377 #ifdef CONFIG_TRACE_IRQFLAGS
0378 local_irq_disable();
0379 #endif
0380
0381
0382
0383 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0384 lockdep_softirqs_on(ip);
0385
0386
0387
0388
0389 __preempt_count_sub(cnt - 1);
0390
0391 if (unlikely(!in_interrupt() && local_softirq_pending())) {
0392
0393
0394
0395
0396 do_softirq();
0397 }
0398
0399 preempt_count_dec();
0400 #ifdef CONFIG_TRACE_IRQFLAGS
0401 local_irq_enable();
0402 #endif
0403 preempt_check_resched();
0404 }
0405 EXPORT_SYMBOL(__local_bh_enable_ip);
0406
0407 static inline void softirq_handle_begin(void)
0408 {
0409 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
0410 }
0411
0412 static inline void softirq_handle_end(void)
0413 {
0414 __local_bh_enable(SOFTIRQ_OFFSET);
0415 WARN_ON_ONCE(in_interrupt());
0416 }
0417
0418 static inline void ksoftirqd_run_begin(void)
0419 {
0420 local_irq_disable();
0421 }
0422
0423 static inline void ksoftirqd_run_end(void)
0424 {
0425 local_irq_enable();
0426 }
0427
0428 static inline bool should_wake_ksoftirqd(void)
0429 {
0430 return true;
0431 }
0432
0433 static inline void invoke_softirq(void)
0434 {
0435 if (ksoftirqd_running(local_softirq_pending()))
0436 return;
0437
0438 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
0439 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
0440
0441
0442
0443
0444
0445 __do_softirq();
0446 #else
0447
0448
0449
0450
0451
0452 do_softirq_own_stack();
0453 #endif
0454 } else {
0455 wakeup_softirqd();
0456 }
0457 }
0458
0459 asmlinkage __visible void do_softirq(void)
0460 {
0461 __u32 pending;
0462 unsigned long flags;
0463
0464 if (in_interrupt())
0465 return;
0466
0467 local_irq_save(flags);
0468
0469 pending = local_softirq_pending();
0470
0471 if (pending && !ksoftirqd_running(pending))
0472 do_softirq_own_stack();
0473
0474 local_irq_restore(flags);
0475 }
0476
0477 #endif
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
0493 #define MAX_SOFTIRQ_RESTART 10
0494
0495 #ifdef CONFIG_TRACE_IRQFLAGS
0496
0497
0498
0499
0500
0501
0502 static inline bool lockdep_softirq_start(void)
0503 {
0504 bool in_hardirq = false;
0505
0506 if (lockdep_hardirq_context()) {
0507 in_hardirq = true;
0508 lockdep_hardirq_exit();
0509 }
0510
0511 lockdep_softirq_enter();
0512
0513 return in_hardirq;
0514 }
0515
0516 static inline void lockdep_softirq_end(bool in_hardirq)
0517 {
0518 lockdep_softirq_exit();
0519
0520 if (in_hardirq)
0521 lockdep_hardirq_enter();
0522 }
0523 #else
0524 static inline bool lockdep_softirq_start(void) { return false; }
0525 static inline void lockdep_softirq_end(bool in_hardirq) { }
0526 #endif
0527
0528 asmlinkage __visible void __softirq_entry __do_softirq(void)
0529 {
0530 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
0531 unsigned long old_flags = current->flags;
0532 int max_restart = MAX_SOFTIRQ_RESTART;
0533 struct softirq_action *h;
0534 bool in_hardirq;
0535 __u32 pending;
0536 int softirq_bit;
0537
0538
0539
0540
0541
0542
0543 current->flags &= ~PF_MEMALLOC;
0544
0545 pending = local_softirq_pending();
0546
0547 softirq_handle_begin();
0548 in_hardirq = lockdep_softirq_start();
0549 account_softirq_enter(current);
0550
0551 restart:
0552
0553 set_softirq_pending(0);
0554
0555 local_irq_enable();
0556
0557 h = softirq_vec;
0558
0559 while ((softirq_bit = ffs(pending))) {
0560 unsigned int vec_nr;
0561 int prev_count;
0562
0563 h += softirq_bit - 1;
0564
0565 vec_nr = h - softirq_vec;
0566 prev_count = preempt_count();
0567
0568 kstat_incr_softirqs_this_cpu(vec_nr);
0569
0570 trace_softirq_entry(vec_nr);
0571 h->action(h);
0572 trace_softirq_exit(vec_nr);
0573 if (unlikely(prev_count != preempt_count())) {
0574 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
0575 vec_nr, softirq_to_name[vec_nr], h->action,
0576 prev_count, preempt_count());
0577 preempt_count_set(prev_count);
0578 }
0579 h++;
0580 pending >>= softirq_bit;
0581 }
0582
0583 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
0584 __this_cpu_read(ksoftirqd) == current)
0585 rcu_softirq_qs();
0586
0587 local_irq_disable();
0588
0589 pending = local_softirq_pending();
0590 if (pending) {
0591 if (time_before(jiffies, end) && !need_resched() &&
0592 --max_restart)
0593 goto restart;
0594
0595 wakeup_softirqd();
0596 }
0597
0598 account_softirq_exit(current);
0599 lockdep_softirq_end(in_hardirq);
0600 softirq_handle_end();
0601 current_restore_flags(old_flags, PF_MEMALLOC);
0602 }
0603
0604
0605
0606
0607 void irq_enter_rcu(void)
0608 {
0609 __irq_enter_raw();
0610
0611 if (tick_nohz_full_cpu(smp_processor_id()) ||
0612 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
0613 tick_irq_enter();
0614
0615 account_hardirq_enter(current);
0616 }
0617
0618
0619
0620
0621 void irq_enter(void)
0622 {
0623 ct_irq_enter();
0624 irq_enter_rcu();
0625 }
0626
0627 static inline void tick_irq_exit(void)
0628 {
0629 #ifdef CONFIG_NO_HZ_COMMON
0630 int cpu = smp_processor_id();
0631
0632
0633 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
0634 if (!in_hardirq())
0635 tick_nohz_irq_exit();
0636 }
0637 #endif
0638 }
0639
0640 static inline void __irq_exit_rcu(void)
0641 {
0642 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
0643 local_irq_disable();
0644 #else
0645 lockdep_assert_irqs_disabled();
0646 #endif
0647 account_hardirq_exit(current);
0648 preempt_count_sub(HARDIRQ_OFFSET);
0649 if (!in_interrupt() && local_softirq_pending())
0650 invoke_softirq();
0651
0652 tick_irq_exit();
0653 }
0654
0655
0656
0657
0658
0659
0660 void irq_exit_rcu(void)
0661 {
0662 __irq_exit_rcu();
0663
0664 lockdep_hardirq_exit();
0665 }
0666
0667
0668
0669
0670
0671
0672 void irq_exit(void)
0673 {
0674 __irq_exit_rcu();
0675 ct_irq_exit();
0676
0677 lockdep_hardirq_exit();
0678 }
0679
0680
0681
0682
0683 inline void raise_softirq_irqoff(unsigned int nr)
0684 {
0685 __raise_softirq_irqoff(nr);
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696 if (!in_interrupt() && should_wake_ksoftirqd())
0697 wakeup_softirqd();
0698 }
0699
0700 void raise_softirq(unsigned int nr)
0701 {
0702 unsigned long flags;
0703
0704 local_irq_save(flags);
0705 raise_softirq_irqoff(nr);
0706 local_irq_restore(flags);
0707 }
0708
0709 void __raise_softirq_irqoff(unsigned int nr)
0710 {
0711 lockdep_assert_irqs_disabled();
0712 trace_softirq_raise(nr);
0713 or_softirq_pending(1UL << nr);
0714 }
0715
0716 void open_softirq(int nr, void (*action)(struct softirq_action *))
0717 {
0718 softirq_vec[nr].action = action;
0719 }
0720
0721
0722
0723
0724 struct tasklet_head {
0725 struct tasklet_struct *head;
0726 struct tasklet_struct **tail;
0727 };
0728
0729 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
0730 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
0731
0732 static void __tasklet_schedule_common(struct tasklet_struct *t,
0733 struct tasklet_head __percpu *headp,
0734 unsigned int softirq_nr)
0735 {
0736 struct tasklet_head *head;
0737 unsigned long flags;
0738
0739 local_irq_save(flags);
0740 head = this_cpu_ptr(headp);
0741 t->next = NULL;
0742 *head->tail = t;
0743 head->tail = &(t->next);
0744 raise_softirq_irqoff(softirq_nr);
0745 local_irq_restore(flags);
0746 }
0747
0748 void __tasklet_schedule(struct tasklet_struct *t)
0749 {
0750 __tasklet_schedule_common(t, &tasklet_vec,
0751 TASKLET_SOFTIRQ);
0752 }
0753 EXPORT_SYMBOL(__tasklet_schedule);
0754
0755 void __tasklet_hi_schedule(struct tasklet_struct *t)
0756 {
0757 __tasklet_schedule_common(t, &tasklet_hi_vec,
0758 HI_SOFTIRQ);
0759 }
0760 EXPORT_SYMBOL(__tasklet_hi_schedule);
0761
0762 static bool tasklet_clear_sched(struct tasklet_struct *t)
0763 {
0764 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
0765 wake_up_var(&t->state);
0766 return true;
0767 }
0768
0769 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
0770 t->use_callback ? "callback" : "func",
0771 t->use_callback ? (void *)t->callback : (void *)t->func);
0772
0773 return false;
0774 }
0775
0776 static void tasklet_action_common(struct softirq_action *a,
0777 struct tasklet_head *tl_head,
0778 unsigned int softirq_nr)
0779 {
0780 struct tasklet_struct *list;
0781
0782 local_irq_disable();
0783 list = tl_head->head;
0784 tl_head->head = NULL;
0785 tl_head->tail = &tl_head->head;
0786 local_irq_enable();
0787
0788 while (list) {
0789 struct tasklet_struct *t = list;
0790
0791 list = list->next;
0792
0793 if (tasklet_trylock(t)) {
0794 if (!atomic_read(&t->count)) {
0795 if (tasklet_clear_sched(t)) {
0796 if (t->use_callback)
0797 t->callback(t);
0798 else
0799 t->func(t->data);
0800 }
0801 tasklet_unlock(t);
0802 continue;
0803 }
0804 tasklet_unlock(t);
0805 }
0806
0807 local_irq_disable();
0808 t->next = NULL;
0809 *tl_head->tail = t;
0810 tl_head->tail = &t->next;
0811 __raise_softirq_irqoff(softirq_nr);
0812 local_irq_enable();
0813 }
0814 }
0815
0816 static __latent_entropy void tasklet_action(struct softirq_action *a)
0817 {
0818 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
0819 }
0820
0821 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
0822 {
0823 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
0824 }
0825
0826 void tasklet_setup(struct tasklet_struct *t,
0827 void (*callback)(struct tasklet_struct *))
0828 {
0829 t->next = NULL;
0830 t->state = 0;
0831 atomic_set(&t->count, 0);
0832 t->callback = callback;
0833 t->use_callback = true;
0834 t->data = 0;
0835 }
0836 EXPORT_SYMBOL(tasklet_setup);
0837
0838 void tasklet_init(struct tasklet_struct *t,
0839 void (*func)(unsigned long), unsigned long data)
0840 {
0841 t->next = NULL;
0842 t->state = 0;
0843 atomic_set(&t->count, 0);
0844 t->func = func;
0845 t->use_callback = false;
0846 t->data = data;
0847 }
0848 EXPORT_SYMBOL(tasklet_init);
0849
0850 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
0851
0852
0853
0854
0855 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
0856 {
0857 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
0858 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
0859
0860
0861
0862
0863
0864
0865
0866 local_bh_disable();
0867 local_bh_enable();
0868 } else {
0869 cpu_relax();
0870 }
0871 }
0872 }
0873 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
0874 #endif
0875
0876 void tasklet_kill(struct tasklet_struct *t)
0877 {
0878 if (in_interrupt())
0879 pr_notice("Attempt to kill tasklet from interrupt\n");
0880
0881 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
0882 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
0883
0884 tasklet_unlock_wait(t);
0885 tasklet_clear_sched(t);
0886 }
0887 EXPORT_SYMBOL(tasklet_kill);
0888
0889 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
0890 void tasklet_unlock(struct tasklet_struct *t)
0891 {
0892 smp_mb__before_atomic();
0893 clear_bit(TASKLET_STATE_RUN, &t->state);
0894 smp_mb__after_atomic();
0895 wake_up_var(&t->state);
0896 }
0897 EXPORT_SYMBOL_GPL(tasklet_unlock);
0898
0899 void tasklet_unlock_wait(struct tasklet_struct *t)
0900 {
0901 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
0902 }
0903 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
0904 #endif
0905
0906 void __init softirq_init(void)
0907 {
0908 int cpu;
0909
0910 for_each_possible_cpu(cpu) {
0911 per_cpu(tasklet_vec, cpu).tail =
0912 &per_cpu(tasklet_vec, cpu).head;
0913 per_cpu(tasklet_hi_vec, cpu).tail =
0914 &per_cpu(tasklet_hi_vec, cpu).head;
0915 }
0916
0917 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
0918 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
0919 }
0920
0921 static int ksoftirqd_should_run(unsigned int cpu)
0922 {
0923 return local_softirq_pending();
0924 }
0925
0926 static void run_ksoftirqd(unsigned int cpu)
0927 {
0928 ksoftirqd_run_begin();
0929 if (local_softirq_pending()) {
0930
0931
0932
0933
0934 __do_softirq();
0935 ksoftirqd_run_end();
0936 cond_resched();
0937 return;
0938 }
0939 ksoftirqd_run_end();
0940 }
0941
0942 #ifdef CONFIG_HOTPLUG_CPU
0943 static int takeover_tasklets(unsigned int cpu)
0944 {
0945
0946 local_irq_disable();
0947
0948
0949 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
0950 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
0951 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
0952 per_cpu(tasklet_vec, cpu).head = NULL;
0953 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
0954 }
0955 raise_softirq_irqoff(TASKLET_SOFTIRQ);
0956
0957 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
0958 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
0959 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
0960 per_cpu(tasklet_hi_vec, cpu).head = NULL;
0961 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
0962 }
0963 raise_softirq_irqoff(HI_SOFTIRQ);
0964
0965 local_irq_enable();
0966 return 0;
0967 }
0968 #else
0969 #define takeover_tasklets NULL
0970 #endif
0971
0972 static struct smp_hotplug_thread softirq_threads = {
0973 .store = &ksoftirqd,
0974 .thread_should_run = ksoftirqd_should_run,
0975 .thread_fn = run_ksoftirqd,
0976 .thread_comm = "ksoftirqd/%u",
0977 };
0978
0979 static __init int spawn_ksoftirqd(void)
0980 {
0981 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
0982 takeover_tasklets);
0983 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
0984
0985 return 0;
0986 }
0987 early_initcall(spawn_ksoftirqd);
0988
0989
0990
0991
0992
0993
0994 int __init __weak early_irq_init(void)
0995 {
0996 return 0;
0997 }
0998
0999 int __init __weak arch_probe_nr_irqs(void)
1000 {
1001 return NR_IRQS_LEGACY;
1002 }
1003
1004 int __init __weak arch_early_irq_init(void)
1005 {
1006 return 0;
1007 }
1008
1009 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1010 {
1011 return from;
1012 }