0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/kernel_stat.h>
0022 #include <linux/export.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/percpu.h>
0025 #include <linux/init.h>
0026 #include <linux/mm.h>
0027 #include <linux/swap.h>
0028 #include <linux/pid_namespace.h>
0029 #include <linux/notifier.h>
0030 #include <linux/thread_info.h>
0031 #include <linux/time.h>
0032 #include <linux/jiffies.h>
0033 #include <linux/posix-timers.h>
0034 #include <linux/cpu.h>
0035 #include <linux/syscalls.h>
0036 #include <linux/delay.h>
0037 #include <linux/tick.h>
0038 #include <linux/kallsyms.h>
0039 #include <linux/irq_work.h>
0040 #include <linux/sched/signal.h>
0041 #include <linux/sched/sysctl.h>
0042 #include <linux/sched/nohz.h>
0043 #include <linux/sched/debug.h>
0044 #include <linux/slab.h>
0045 #include <linux/compat.h>
0046 #include <linux/random.h>
0047 #include <linux/sysctl.h>
0048
0049 #include <linux/uaccess.h>
0050 #include <asm/unistd.h>
0051 #include <asm/div64.h>
0052 #include <asm/timex.h>
0053 #include <asm/io.h>
0054
0055 #include "tick-internal.h"
0056
0057 #define CREATE_TRACE_POINTS
0058 #include <trace/events/timer.h>
0059
0060 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
0061
0062 EXPORT_SYMBOL(jiffies_64);
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 #define LVL_CLK_SHIFT 3
0154 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
0155 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
0156 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
0157 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
0158
0159
0160
0161
0162
0163
0164 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
0165
0166
0167 #define LVL_BITS 6
0168 #define LVL_SIZE (1UL << LVL_BITS)
0169 #define LVL_MASK (LVL_SIZE - 1)
0170 #define LVL_OFFS(n) ((n) * LVL_SIZE)
0171
0172
0173 #if HZ > 100
0174 # define LVL_DEPTH 9
0175 # else
0176 # define LVL_DEPTH 8
0177 #endif
0178
0179
0180 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
0181 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
0182
0183
0184
0185
0186
0187 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
0188
0189 #ifdef CONFIG_NO_HZ_COMMON
0190 # define NR_BASES 2
0191 # define BASE_STD 0
0192 # define BASE_DEF 1
0193 #else
0194 # define NR_BASES 1
0195 # define BASE_STD 0
0196 # define BASE_DEF 0
0197 #endif
0198
0199 struct timer_base {
0200 raw_spinlock_t lock;
0201 struct timer_list *running_timer;
0202 #ifdef CONFIG_PREEMPT_RT
0203 spinlock_t expiry_lock;
0204 atomic_t timer_waiters;
0205 #endif
0206 unsigned long clk;
0207 unsigned long next_expiry;
0208 unsigned int cpu;
0209 bool next_expiry_recalc;
0210 bool is_idle;
0211 bool timers_pending;
0212 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
0213 struct hlist_head vectors[WHEEL_SIZE];
0214 } ____cacheline_aligned;
0215
0216 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
0217
0218 #ifdef CONFIG_NO_HZ_COMMON
0219
0220 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
0221 static DEFINE_MUTEX(timer_keys_mutex);
0222
0223 static void timer_update_keys(struct work_struct *work);
0224 static DECLARE_WORK(timer_update_work, timer_update_keys);
0225
0226 #ifdef CONFIG_SMP
0227 static unsigned int sysctl_timer_migration = 1;
0228
0229 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
0230
0231 static void timers_update_migration(void)
0232 {
0233 if (sysctl_timer_migration && tick_nohz_active)
0234 static_branch_enable(&timers_migration_enabled);
0235 else
0236 static_branch_disable(&timers_migration_enabled);
0237 }
0238
0239 #ifdef CONFIG_SYSCTL
0240 static int timer_migration_handler(struct ctl_table *table, int write,
0241 void *buffer, size_t *lenp, loff_t *ppos)
0242 {
0243 int ret;
0244
0245 mutex_lock(&timer_keys_mutex);
0246 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0247 if (!ret && write)
0248 timers_update_migration();
0249 mutex_unlock(&timer_keys_mutex);
0250 return ret;
0251 }
0252
0253 static struct ctl_table timer_sysctl[] = {
0254 {
0255 .procname = "timer_migration",
0256 .data = &sysctl_timer_migration,
0257 .maxlen = sizeof(unsigned int),
0258 .mode = 0644,
0259 .proc_handler = timer_migration_handler,
0260 .extra1 = SYSCTL_ZERO,
0261 .extra2 = SYSCTL_ONE,
0262 },
0263 {}
0264 };
0265
0266 static int __init timer_sysctl_init(void)
0267 {
0268 register_sysctl("kernel", timer_sysctl);
0269 return 0;
0270 }
0271 device_initcall(timer_sysctl_init);
0272 #endif
0273 #else
0274 static inline void timers_update_migration(void) { }
0275 #endif
0276
0277 static void timer_update_keys(struct work_struct *work)
0278 {
0279 mutex_lock(&timer_keys_mutex);
0280 timers_update_migration();
0281 static_branch_enable(&timers_nohz_active);
0282 mutex_unlock(&timer_keys_mutex);
0283 }
0284
0285 void timers_update_nohz(void)
0286 {
0287 schedule_work(&timer_update_work);
0288 }
0289
0290 static inline bool is_timers_nohz_active(void)
0291 {
0292 return static_branch_unlikely(&timers_nohz_active);
0293 }
0294 #else
0295 static inline bool is_timers_nohz_active(void) { return false; }
0296 #endif
0297
0298 static unsigned long round_jiffies_common(unsigned long j, int cpu,
0299 bool force_up)
0300 {
0301 int rem;
0302 unsigned long original = j;
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 j += cpu * 3;
0313
0314 rem = j % HZ;
0315
0316
0317
0318
0319
0320
0321
0322
0323 if (rem < HZ/4 && !force_up)
0324 j = j - rem;
0325 else
0326 j = j - rem + HZ;
0327
0328
0329 j -= cpu * 3;
0330
0331
0332
0333
0334
0335 return time_is_after_jiffies(j) ? j : original;
0336 }
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358 unsigned long __round_jiffies(unsigned long j, int cpu)
0359 {
0360 return round_jiffies_common(j, cpu, false);
0361 }
0362 EXPORT_SYMBOL_GPL(__round_jiffies);
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
0385 {
0386 unsigned long j0 = jiffies;
0387
0388
0389 return round_jiffies_common(j + j0, cpu, false) - j0;
0390 }
0391 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408 unsigned long round_jiffies(unsigned long j)
0409 {
0410 return round_jiffies_common(j, raw_smp_processor_id(), false);
0411 }
0412 EXPORT_SYMBOL_GPL(round_jiffies);
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429 unsigned long round_jiffies_relative(unsigned long j)
0430 {
0431 return __round_jiffies_relative(j, raw_smp_processor_id());
0432 }
0433 EXPORT_SYMBOL_GPL(round_jiffies_relative);
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445 unsigned long __round_jiffies_up(unsigned long j, int cpu)
0446 {
0447 return round_jiffies_common(j, cpu, true);
0448 }
0449 EXPORT_SYMBOL_GPL(__round_jiffies_up);
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
0462 {
0463 unsigned long j0 = jiffies;
0464
0465
0466 return round_jiffies_common(j + j0, cpu, true) - j0;
0467 }
0468 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 unsigned long round_jiffies_up(unsigned long j)
0480 {
0481 return round_jiffies_common(j, raw_smp_processor_id(), true);
0482 }
0483 EXPORT_SYMBOL_GPL(round_jiffies_up);
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494 unsigned long round_jiffies_up_relative(unsigned long j)
0495 {
0496 return __round_jiffies_up_relative(j, raw_smp_processor_id());
0497 }
0498 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
0499
0500
0501 static inline unsigned int timer_get_idx(struct timer_list *timer)
0502 {
0503 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
0504 }
0505
0506 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
0507 {
0508 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
0509 idx << TIMER_ARRAYSHIFT;
0510 }
0511
0512
0513
0514
0515
0516 static inline unsigned calc_index(unsigned long expires, unsigned lvl,
0517 unsigned long *bucket_expiry)
0518 {
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528 expires = (expires >> LVL_SHIFT(lvl)) + 1;
0529 *bucket_expiry = expires << LVL_SHIFT(lvl);
0530 return LVL_OFFS(lvl) + (expires & LVL_MASK);
0531 }
0532
0533 static int calc_wheel_index(unsigned long expires, unsigned long clk,
0534 unsigned long *bucket_expiry)
0535 {
0536 unsigned long delta = expires - clk;
0537 unsigned int idx;
0538
0539 if (delta < LVL_START(1)) {
0540 idx = calc_index(expires, 0, bucket_expiry);
0541 } else if (delta < LVL_START(2)) {
0542 idx = calc_index(expires, 1, bucket_expiry);
0543 } else if (delta < LVL_START(3)) {
0544 idx = calc_index(expires, 2, bucket_expiry);
0545 } else if (delta < LVL_START(4)) {
0546 idx = calc_index(expires, 3, bucket_expiry);
0547 } else if (delta < LVL_START(5)) {
0548 idx = calc_index(expires, 4, bucket_expiry);
0549 } else if (delta < LVL_START(6)) {
0550 idx = calc_index(expires, 5, bucket_expiry);
0551 } else if (delta < LVL_START(7)) {
0552 idx = calc_index(expires, 6, bucket_expiry);
0553 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
0554 idx = calc_index(expires, 7, bucket_expiry);
0555 } else if ((long) delta < 0) {
0556 idx = clk & LVL_MASK;
0557 *bucket_expiry = clk;
0558 } else {
0559
0560
0561
0562
0563 if (delta >= WHEEL_TIMEOUT_CUTOFF)
0564 expires = clk + WHEEL_TIMEOUT_MAX;
0565
0566 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
0567 }
0568 return idx;
0569 }
0570
0571 static void
0572 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
0573 {
0574 if (!is_timers_nohz_active())
0575 return;
0576
0577
0578
0579
0580
0581 if (timer->flags & TIMER_DEFERRABLE) {
0582 if (tick_nohz_full_cpu(base->cpu))
0583 wake_up_nohz_cpu(base->cpu);
0584 return;
0585 }
0586
0587
0588
0589
0590
0591
0592 if (base->is_idle)
0593 wake_up_nohz_cpu(base->cpu);
0594 }
0595
0596
0597
0598
0599
0600
0601 static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
0602 unsigned int idx, unsigned long bucket_expiry)
0603 {
0604
0605 hlist_add_head(&timer->entry, base->vectors + idx);
0606 __set_bit(idx, base->pending_map);
0607 timer_set_idx(timer, idx);
0608
0609 trace_timer_start(timer, timer->expires, timer->flags);
0610
0611
0612
0613
0614
0615
0616 if (time_before(bucket_expiry, base->next_expiry)) {
0617
0618
0619
0620
0621 base->next_expiry = bucket_expiry;
0622 base->timers_pending = true;
0623 base->next_expiry_recalc = false;
0624 trigger_dyntick_cpu(base, timer);
0625 }
0626 }
0627
0628 static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
0629 {
0630 unsigned long bucket_expiry;
0631 unsigned int idx;
0632
0633 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
0634 enqueue_timer(base, timer, idx, bucket_expiry);
0635 }
0636
0637 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
0638
0639 static const struct debug_obj_descr timer_debug_descr;
0640
0641 struct timer_hint {
0642 void (*function)(struct timer_list *t);
0643 long offset;
0644 };
0645
0646 #define TIMER_HINT(fn, container, timr, hintfn) \
0647 { \
0648 .function = fn, \
0649 .offset = offsetof(container, hintfn) - \
0650 offsetof(container, timr) \
0651 }
0652
0653 static const struct timer_hint timer_hints[] = {
0654 TIMER_HINT(delayed_work_timer_fn,
0655 struct delayed_work, timer, work.func),
0656 TIMER_HINT(kthread_delayed_work_timer_fn,
0657 struct kthread_delayed_work, timer, work.func),
0658 };
0659
0660 static void *timer_debug_hint(void *addr)
0661 {
0662 struct timer_list *timer = addr;
0663 int i;
0664
0665 for (i = 0; i < ARRAY_SIZE(timer_hints); i++) {
0666 if (timer_hints[i].function == timer->function) {
0667 void (**fn)(void) = addr + timer_hints[i].offset;
0668
0669 return *fn;
0670 }
0671 }
0672
0673 return timer->function;
0674 }
0675
0676 static bool timer_is_static_object(void *addr)
0677 {
0678 struct timer_list *timer = addr;
0679
0680 return (timer->entry.pprev == NULL &&
0681 timer->entry.next == TIMER_ENTRY_STATIC);
0682 }
0683
0684
0685
0686
0687
0688 static bool timer_fixup_init(void *addr, enum debug_obj_state state)
0689 {
0690 struct timer_list *timer = addr;
0691
0692 switch (state) {
0693 case ODEBUG_STATE_ACTIVE:
0694 del_timer_sync(timer);
0695 debug_object_init(timer, &timer_debug_descr);
0696 return true;
0697 default:
0698 return false;
0699 }
0700 }
0701
0702
0703 static void stub_timer(struct timer_list *unused)
0704 {
0705 WARN_ON(1);
0706 }
0707
0708
0709
0710
0711
0712
0713 static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
0714 {
0715 struct timer_list *timer = addr;
0716
0717 switch (state) {
0718 case ODEBUG_STATE_NOTAVAILABLE:
0719 timer_setup(timer, stub_timer, 0);
0720 return true;
0721
0722 case ODEBUG_STATE_ACTIVE:
0723 WARN_ON(1);
0724 fallthrough;
0725 default:
0726 return false;
0727 }
0728 }
0729
0730
0731
0732
0733
0734 static bool timer_fixup_free(void *addr, enum debug_obj_state state)
0735 {
0736 struct timer_list *timer = addr;
0737
0738 switch (state) {
0739 case ODEBUG_STATE_ACTIVE:
0740 del_timer_sync(timer);
0741 debug_object_free(timer, &timer_debug_descr);
0742 return true;
0743 default:
0744 return false;
0745 }
0746 }
0747
0748
0749
0750
0751
0752 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
0753 {
0754 struct timer_list *timer = addr;
0755
0756 switch (state) {
0757 case ODEBUG_STATE_NOTAVAILABLE:
0758 timer_setup(timer, stub_timer, 0);
0759 return true;
0760 default:
0761 return false;
0762 }
0763 }
0764
0765 static const struct debug_obj_descr timer_debug_descr = {
0766 .name = "timer_list",
0767 .debug_hint = timer_debug_hint,
0768 .is_static_object = timer_is_static_object,
0769 .fixup_init = timer_fixup_init,
0770 .fixup_activate = timer_fixup_activate,
0771 .fixup_free = timer_fixup_free,
0772 .fixup_assert_init = timer_fixup_assert_init,
0773 };
0774
0775 static inline void debug_timer_init(struct timer_list *timer)
0776 {
0777 debug_object_init(timer, &timer_debug_descr);
0778 }
0779
0780 static inline void debug_timer_activate(struct timer_list *timer)
0781 {
0782 debug_object_activate(timer, &timer_debug_descr);
0783 }
0784
0785 static inline void debug_timer_deactivate(struct timer_list *timer)
0786 {
0787 debug_object_deactivate(timer, &timer_debug_descr);
0788 }
0789
0790 static inline void debug_timer_assert_init(struct timer_list *timer)
0791 {
0792 debug_object_assert_init(timer, &timer_debug_descr);
0793 }
0794
0795 static void do_init_timer(struct timer_list *timer,
0796 void (*func)(struct timer_list *),
0797 unsigned int flags,
0798 const char *name, struct lock_class_key *key);
0799
0800 void init_timer_on_stack_key(struct timer_list *timer,
0801 void (*func)(struct timer_list *),
0802 unsigned int flags,
0803 const char *name, struct lock_class_key *key)
0804 {
0805 debug_object_init_on_stack(timer, &timer_debug_descr);
0806 do_init_timer(timer, func, flags, name, key);
0807 }
0808 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
0809
0810 void destroy_timer_on_stack(struct timer_list *timer)
0811 {
0812 debug_object_free(timer, &timer_debug_descr);
0813 }
0814 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
0815
0816 #else
0817 static inline void debug_timer_init(struct timer_list *timer) { }
0818 static inline void debug_timer_activate(struct timer_list *timer) { }
0819 static inline void debug_timer_deactivate(struct timer_list *timer) { }
0820 static inline void debug_timer_assert_init(struct timer_list *timer) { }
0821 #endif
0822
0823 static inline void debug_init(struct timer_list *timer)
0824 {
0825 debug_timer_init(timer);
0826 trace_timer_init(timer);
0827 }
0828
0829 static inline void debug_deactivate(struct timer_list *timer)
0830 {
0831 debug_timer_deactivate(timer);
0832 trace_timer_cancel(timer);
0833 }
0834
0835 static inline void debug_assert_init(struct timer_list *timer)
0836 {
0837 debug_timer_assert_init(timer);
0838 }
0839
0840 static void do_init_timer(struct timer_list *timer,
0841 void (*func)(struct timer_list *),
0842 unsigned int flags,
0843 const char *name, struct lock_class_key *key)
0844 {
0845 timer->entry.pprev = NULL;
0846 timer->function = func;
0847 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
0848 flags &= TIMER_INIT_FLAGS;
0849 timer->flags = flags | raw_smp_processor_id();
0850 lockdep_init_map(&timer->lockdep_map, name, key, 0);
0851 }
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865 void init_timer_key(struct timer_list *timer,
0866 void (*func)(struct timer_list *), unsigned int flags,
0867 const char *name, struct lock_class_key *key)
0868 {
0869 debug_init(timer);
0870 do_init_timer(timer, func, flags, name, key);
0871 }
0872 EXPORT_SYMBOL(init_timer_key);
0873
0874 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
0875 {
0876 struct hlist_node *entry = &timer->entry;
0877
0878 debug_deactivate(timer);
0879
0880 __hlist_del(entry);
0881 if (clear_pending)
0882 entry->pprev = NULL;
0883 entry->next = LIST_POISON2;
0884 }
0885
0886 static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
0887 bool clear_pending)
0888 {
0889 unsigned idx = timer_get_idx(timer);
0890
0891 if (!timer_pending(timer))
0892 return 0;
0893
0894 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
0895 __clear_bit(idx, base->pending_map);
0896 base->next_expiry_recalc = true;
0897 }
0898
0899 detach_timer(timer, clear_pending);
0900 return 1;
0901 }
0902
0903 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
0904 {
0905 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
0906
0907
0908
0909
0910
0911 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
0912 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
0913 return base;
0914 }
0915
0916 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
0917 {
0918 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
0919
0920
0921
0922
0923
0924 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
0925 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
0926 return base;
0927 }
0928
0929 static inline struct timer_base *get_timer_base(u32 tflags)
0930 {
0931 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
0932 }
0933
0934 static inline struct timer_base *
0935 get_target_base(struct timer_base *base, unsigned tflags)
0936 {
0937 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
0938 if (static_branch_likely(&timers_migration_enabled) &&
0939 !(tflags & TIMER_PINNED))
0940 return get_timer_cpu_base(tflags, get_nohz_timer_target());
0941 #endif
0942 return get_timer_this_cpu_base(tflags);
0943 }
0944
0945 static inline void forward_timer_base(struct timer_base *base)
0946 {
0947 unsigned long jnow = READ_ONCE(jiffies);
0948
0949
0950
0951
0952
0953
0954 if ((long)(jnow - base->clk) < 1)
0955 return;
0956
0957
0958
0959
0960
0961 if (time_after(base->next_expiry, jnow)) {
0962 base->clk = jnow;
0963 } else {
0964 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
0965 return;
0966 base->clk = base->next_expiry;
0967 }
0968 }
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982 static struct timer_base *lock_timer_base(struct timer_list *timer,
0983 unsigned long *flags)
0984 __acquires(timer->base->lock)
0985 {
0986 for (;;) {
0987 struct timer_base *base;
0988 u32 tf;
0989
0990
0991
0992
0993
0994
0995 tf = READ_ONCE(timer->flags);
0996
0997 if (!(tf & TIMER_MIGRATING)) {
0998 base = get_timer_base(tf);
0999 raw_spin_lock_irqsave(&base->lock, *flags);
1000 if (timer->flags == tf)
1001 return base;
1002 raw_spin_unlock_irqrestore(&base->lock, *flags);
1003 }
1004 cpu_relax();
1005 }
1006 }
1007
1008 #define MOD_TIMER_PENDING_ONLY 0x01
1009 #define MOD_TIMER_REDUCE 0x02
1010 #define MOD_TIMER_NOTPENDING 0x04
1011
1012 static inline int
1013 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
1014 {
1015 unsigned long clk = 0, flags, bucket_expiry;
1016 struct timer_base *base, *new_base;
1017 unsigned int idx = UINT_MAX;
1018 int ret = 0;
1019
1020 BUG_ON(!timer->function);
1021
1022
1023
1024
1025
1026
1027 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
1028
1029
1030
1031
1032
1033 long diff = timer->expires - expires;
1034
1035 if (!diff)
1036 return 1;
1037 if (options & MOD_TIMER_REDUCE && diff <= 0)
1038 return 1;
1039
1040
1041
1042
1043
1044
1045
1046 base = lock_timer_base(timer, &flags);
1047 forward_timer_base(base);
1048
1049 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
1050 time_before_eq(timer->expires, expires)) {
1051 ret = 1;
1052 goto out_unlock;
1053 }
1054
1055 clk = base->clk;
1056 idx = calc_wheel_index(expires, clk, &bucket_expiry);
1057
1058
1059
1060
1061
1062
1063 if (idx == timer_get_idx(timer)) {
1064 if (!(options & MOD_TIMER_REDUCE))
1065 timer->expires = expires;
1066 else if (time_after(timer->expires, expires))
1067 timer->expires = expires;
1068 ret = 1;
1069 goto out_unlock;
1070 }
1071 } else {
1072 base = lock_timer_base(timer, &flags);
1073 forward_timer_base(base);
1074 }
1075
1076 ret = detach_if_pending(timer, base, false);
1077 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1078 goto out_unlock;
1079
1080 new_base = get_target_base(base, timer->flags);
1081
1082 if (base != new_base) {
1083
1084
1085
1086
1087
1088
1089
1090 if (likely(base->running_timer != timer)) {
1091
1092 timer->flags |= TIMER_MIGRATING;
1093
1094 raw_spin_unlock(&base->lock);
1095 base = new_base;
1096 raw_spin_lock(&base->lock);
1097 WRITE_ONCE(timer->flags,
1098 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1099 forward_timer_base(base);
1100 }
1101 }
1102
1103 debug_timer_activate(timer);
1104
1105 timer->expires = expires;
1106
1107
1108
1109
1110
1111
1112 if (idx != UINT_MAX && clk == base->clk)
1113 enqueue_timer(base, timer, idx, bucket_expiry);
1114 else
1115 internal_add_timer(base, timer);
1116
1117 out_unlock:
1118 raw_spin_unlock_irqrestore(&base->lock, flags);
1119
1120 return ret;
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1134 {
1135 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1136 }
1137 EXPORT_SYMBOL(mod_timer_pending);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159 int mod_timer(struct timer_list *timer, unsigned long expires)
1160 {
1161 return __mod_timer(timer, expires, 0);
1162 }
1163 EXPORT_SYMBOL(mod_timer);
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 int timer_reduce(struct timer_list *timer, unsigned long expires)
1175 {
1176 return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
1177 }
1178 EXPORT_SYMBOL(timer_reduce);
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 void add_timer(struct timer_list *timer)
1195 {
1196 BUG_ON(timer_pending(timer));
1197 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1198 }
1199 EXPORT_SYMBOL(add_timer);
1200
1201
1202
1203
1204
1205
1206
1207
1208 void add_timer_on(struct timer_list *timer, int cpu)
1209 {
1210 struct timer_base *new_base, *base;
1211 unsigned long flags;
1212
1213 BUG_ON(timer_pending(timer) || !timer->function);
1214
1215 new_base = get_timer_cpu_base(timer->flags, cpu);
1216
1217
1218
1219
1220
1221
1222 base = lock_timer_base(timer, &flags);
1223 if (base != new_base) {
1224 timer->flags |= TIMER_MIGRATING;
1225
1226 raw_spin_unlock(&base->lock);
1227 base = new_base;
1228 raw_spin_lock(&base->lock);
1229 WRITE_ONCE(timer->flags,
1230 (timer->flags & ~TIMER_BASEMASK) | cpu);
1231 }
1232 forward_timer_base(base);
1233
1234 debug_timer_activate(timer);
1235 internal_add_timer(base, timer);
1236 raw_spin_unlock_irqrestore(&base->lock, flags);
1237 }
1238 EXPORT_SYMBOL_GPL(add_timer_on);
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251 int del_timer(struct timer_list *timer)
1252 {
1253 struct timer_base *base;
1254 unsigned long flags;
1255 int ret = 0;
1256
1257 debug_assert_init(timer);
1258
1259 if (timer_pending(timer)) {
1260 base = lock_timer_base(timer, &flags);
1261 ret = detach_if_pending(timer, base, true);
1262 raw_spin_unlock_irqrestore(&base->lock, flags);
1263 }
1264
1265 return ret;
1266 }
1267 EXPORT_SYMBOL(del_timer);
1268
1269
1270
1271
1272
1273
1274
1275
1276 int try_to_del_timer_sync(struct timer_list *timer)
1277 {
1278 struct timer_base *base;
1279 unsigned long flags;
1280 int ret = -1;
1281
1282 debug_assert_init(timer);
1283
1284 base = lock_timer_base(timer, &flags);
1285
1286 if (base->running_timer != timer)
1287 ret = detach_if_pending(timer, base, true);
1288
1289 raw_spin_unlock_irqrestore(&base->lock, flags);
1290
1291 return ret;
1292 }
1293 EXPORT_SYMBOL(try_to_del_timer_sync);
1294
1295 #ifdef CONFIG_PREEMPT_RT
1296 static __init void timer_base_init_expiry_lock(struct timer_base *base)
1297 {
1298 spin_lock_init(&base->expiry_lock);
1299 }
1300
1301 static inline void timer_base_lock_expiry(struct timer_base *base)
1302 {
1303 spin_lock(&base->expiry_lock);
1304 }
1305
1306 static inline void timer_base_unlock_expiry(struct timer_base *base)
1307 {
1308 spin_unlock(&base->expiry_lock);
1309 }
1310
1311
1312
1313
1314
1315
1316
1317
1318 static void timer_sync_wait_running(struct timer_base *base)
1319 {
1320 if (atomic_read(&base->timer_waiters)) {
1321 raw_spin_unlock_irq(&base->lock);
1322 spin_unlock(&base->expiry_lock);
1323 spin_lock(&base->expiry_lock);
1324 raw_spin_lock_irq(&base->lock);
1325 }
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 static void del_timer_wait_running(struct timer_list *timer)
1339 {
1340 u32 tf;
1341
1342 tf = READ_ONCE(timer->flags);
1343 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
1344 struct timer_base *base = get_timer_base(tf);
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 atomic_inc(&base->timer_waiters);
1355 spin_lock_bh(&base->expiry_lock);
1356 atomic_dec(&base->timer_waiters);
1357 spin_unlock_bh(&base->expiry_lock);
1358 }
1359 }
1360 #else
1361 static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
1362 static inline void timer_base_lock_expiry(struct timer_base *base) { }
1363 static inline void timer_base_unlock_expiry(struct timer_base *base) { }
1364 static inline void timer_sync_wait_running(struct timer_base *base) { }
1365 static inline void del_timer_wait_running(struct timer_list *timer) { }
1366 #endif
1367
1368 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 int del_timer_sync(struct timer_list *timer)
1406 {
1407 int ret;
1408
1409 #ifdef CONFIG_LOCKDEP
1410 unsigned long flags;
1411
1412
1413
1414
1415
1416 local_irq_save(flags);
1417 lock_map_acquire(&timer->lockdep_map);
1418 lock_map_release(&timer->lockdep_map);
1419 local_irq_restore(flags);
1420 #endif
1421
1422
1423
1424
1425 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1426
1427
1428
1429
1430
1431 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
1432 lockdep_assert_preemption_enabled();
1433
1434 do {
1435 ret = try_to_del_timer_sync(timer);
1436
1437 if (unlikely(ret < 0)) {
1438 del_timer_wait_running(timer);
1439 cpu_relax();
1440 }
1441 } while (ret < 0);
1442
1443 return ret;
1444 }
1445 EXPORT_SYMBOL(del_timer_sync);
1446 #endif
1447
1448 static void call_timer_fn(struct timer_list *timer,
1449 void (*fn)(struct timer_list *),
1450 unsigned long baseclk)
1451 {
1452 int count = preempt_count();
1453
1454 #ifdef CONFIG_LOCKDEP
1455
1456
1457
1458
1459
1460
1461
1462 struct lockdep_map lockdep_map;
1463
1464 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1465 #endif
1466
1467
1468
1469
1470
1471 lock_map_acquire(&lockdep_map);
1472
1473 trace_timer_expire_entry(timer, baseclk);
1474 fn(timer);
1475 trace_timer_expire_exit(timer);
1476
1477 lock_map_release(&lockdep_map);
1478
1479 if (count != preempt_count()) {
1480 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
1481 fn, count, preempt_count());
1482
1483
1484
1485
1486
1487
1488 preempt_count_set(count);
1489 }
1490 }
1491
1492 static void expire_timers(struct timer_base *base, struct hlist_head *head)
1493 {
1494
1495
1496
1497
1498
1499 unsigned long baseclk = base->clk - 1;
1500
1501 while (!hlist_empty(head)) {
1502 struct timer_list *timer;
1503 void (*fn)(struct timer_list *);
1504
1505 timer = hlist_entry(head->first, struct timer_list, entry);
1506
1507 base->running_timer = timer;
1508 detach_timer(timer, true);
1509
1510 fn = timer->function;
1511
1512 if (timer->flags & TIMER_IRQSAFE) {
1513 raw_spin_unlock(&base->lock);
1514 call_timer_fn(timer, fn, baseclk);
1515 raw_spin_lock(&base->lock);
1516 base->running_timer = NULL;
1517 } else {
1518 raw_spin_unlock_irq(&base->lock);
1519 call_timer_fn(timer, fn, baseclk);
1520 raw_spin_lock_irq(&base->lock);
1521 base->running_timer = NULL;
1522 timer_sync_wait_running(base);
1523 }
1524 }
1525 }
1526
1527 static int collect_expired_timers(struct timer_base *base,
1528 struct hlist_head *heads)
1529 {
1530 unsigned long clk = base->clk = base->next_expiry;
1531 struct hlist_head *vec;
1532 int i, levels = 0;
1533 unsigned int idx;
1534
1535 for (i = 0; i < LVL_DEPTH; i++) {
1536 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1537
1538 if (__test_and_clear_bit(idx, base->pending_map)) {
1539 vec = base->vectors + idx;
1540 hlist_move_list(vec, heads++);
1541 levels++;
1542 }
1543
1544 if (clk & LVL_CLK_MASK)
1545 break;
1546
1547 clk >>= LVL_CLK_SHIFT;
1548 }
1549 return levels;
1550 }
1551
1552
1553
1554
1555
1556
1557 static int next_pending_bucket(struct timer_base *base, unsigned offset,
1558 unsigned clk)
1559 {
1560 unsigned pos, start = offset + clk;
1561 unsigned end = offset + LVL_SIZE;
1562
1563 pos = find_next_bit(base->pending_map, end, start);
1564 if (pos < end)
1565 return pos - start;
1566
1567 pos = find_next_bit(base->pending_map, start, offset);
1568 return pos < start ? pos + LVL_SIZE - start : -1;
1569 }
1570
1571
1572
1573
1574
1575 static unsigned long __next_timer_interrupt(struct timer_base *base)
1576 {
1577 unsigned long clk, next, adj;
1578 unsigned lvl, offset = 0;
1579
1580 next = base->clk + NEXT_TIMER_MAX_DELTA;
1581 clk = base->clk;
1582 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1583 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1584 unsigned long lvl_clk = clk & LVL_CLK_MASK;
1585
1586 if (pos >= 0) {
1587 unsigned long tmp = clk + (unsigned long) pos;
1588
1589 tmp <<= LVL_SHIFT(lvl);
1590 if (time_before(tmp, next))
1591 next = tmp;
1592
1593
1594
1595
1596
1597 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
1598 break;
1599 }
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 adj = lvl_clk ? 1 : 0;
1637 clk >>= LVL_CLK_SHIFT;
1638 clk += adj;
1639 }
1640
1641 base->next_expiry_recalc = false;
1642 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
1643
1644 return next;
1645 }
1646
1647 #ifdef CONFIG_NO_HZ_COMMON
1648
1649
1650
1651
1652 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1653 {
1654 u64 nextevt = hrtimer_get_next_event();
1655
1656
1657
1658
1659
1660 if (expires <= nextevt)
1661 return expires;
1662
1663
1664
1665
1666
1667 if (nextevt <= basem)
1668 return basem;
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1690 {
1691 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1692 u64 expires = KTIME_MAX;
1693 unsigned long nextevt;
1694
1695
1696
1697
1698
1699 if (cpu_is_offline(smp_processor_id()))
1700 return expires;
1701
1702 raw_spin_lock(&base->lock);
1703 if (base->next_expiry_recalc)
1704 base->next_expiry = __next_timer_interrupt(base);
1705 nextevt = base->next_expiry;
1706
1707
1708
1709
1710
1711
1712 if (time_after(basej, base->clk)) {
1713 if (time_after(nextevt, basej))
1714 base->clk = basej;
1715 else if (time_after(nextevt, base->clk))
1716 base->clk = nextevt;
1717 }
1718
1719 if (time_before_eq(nextevt, basej)) {
1720 expires = basem;
1721 base->is_idle = false;
1722 } else {
1723 if (base->timers_pending)
1724 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1725
1726
1727
1728
1729
1730
1731
1732 if ((expires - basem) > TICK_NSEC)
1733 base->is_idle = true;
1734 }
1735 raw_spin_unlock(&base->lock);
1736
1737 return cmp_next_hrtimer_event(basem, expires);
1738 }
1739
1740
1741
1742
1743
1744
1745 void timer_clear_idle(void)
1746 {
1747 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1748
1749
1750
1751
1752
1753
1754
1755 base->is_idle = false;
1756 }
1757 #endif
1758
1759
1760
1761
1762
1763 static inline void __run_timers(struct timer_base *base)
1764 {
1765 struct hlist_head heads[LVL_DEPTH];
1766 int levels;
1767
1768 if (time_before(jiffies, base->next_expiry))
1769 return;
1770
1771 timer_base_lock_expiry(base);
1772 raw_spin_lock_irq(&base->lock);
1773
1774 while (time_after_eq(jiffies, base->clk) &&
1775 time_after_eq(jiffies, base->next_expiry)) {
1776 levels = collect_expired_timers(base, heads);
1777
1778
1779
1780
1781
1782
1783
1784 WARN_ON_ONCE(!levels && !base->next_expiry_recalc
1785 && base->timers_pending);
1786 base->clk++;
1787 base->next_expiry = __next_timer_interrupt(base);
1788
1789 while (levels--)
1790 expire_timers(base, heads + levels);
1791 }
1792 raw_spin_unlock_irq(&base->lock);
1793 timer_base_unlock_expiry(base);
1794 }
1795
1796
1797
1798
1799 static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1800 {
1801 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1802
1803 __run_timers(base);
1804 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
1805 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1806 }
1807
1808
1809
1810
1811 static void run_local_timers(void)
1812 {
1813 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1814
1815 hrtimer_run_queues();
1816
1817 if (time_before(jiffies, base->next_expiry)) {
1818 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
1819 return;
1820
1821 base++;
1822 if (time_before(jiffies, base->next_expiry))
1823 return;
1824 }
1825 raise_softirq(TIMER_SOFTIRQ);
1826 }
1827
1828
1829
1830
1831
1832 void update_process_times(int user_tick)
1833 {
1834 struct task_struct *p = current;
1835
1836
1837 account_process_tick(p, user_tick);
1838 run_local_timers();
1839 rcu_sched_clock_irq(user_tick);
1840 #ifdef CONFIG_IRQ_WORK
1841 if (in_irq())
1842 irq_work_tick();
1843 #endif
1844 scheduler_tick();
1845 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
1846 run_posix_cpu_timers();
1847 }
1848
1849
1850
1851
1852
1853 struct process_timer {
1854 struct timer_list timer;
1855 struct task_struct *task;
1856 };
1857
1858 static void process_timeout(struct timer_list *t)
1859 {
1860 struct process_timer *timeout = from_timer(timeout, t, timer);
1861
1862 wake_up_process(timeout->task);
1863 }
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 signed long __sched schedule_timeout(signed long timeout)
1897 {
1898 struct process_timer timer;
1899 unsigned long expire;
1900
1901 switch (timeout)
1902 {
1903 case MAX_SCHEDULE_TIMEOUT:
1904
1905
1906
1907
1908
1909
1910
1911 schedule();
1912 goto out;
1913 default:
1914
1915
1916
1917
1918
1919
1920
1921 if (timeout < 0) {
1922 printk(KERN_ERR "schedule_timeout: wrong timeout "
1923 "value %lx\n", timeout);
1924 dump_stack();
1925 __set_current_state(TASK_RUNNING);
1926 goto out;
1927 }
1928 }
1929
1930 expire = timeout + jiffies;
1931
1932 timer.task = current;
1933 timer_setup_on_stack(&timer.timer, process_timeout, 0);
1934 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
1935 schedule();
1936 del_singleshot_timer_sync(&timer.timer);
1937
1938
1939 destroy_timer_on_stack(&timer.timer);
1940
1941 timeout = expire - jiffies;
1942
1943 out:
1944 return timeout < 0 ? 0 : timeout;
1945 }
1946 EXPORT_SYMBOL(schedule_timeout);
1947
1948
1949
1950
1951
1952 signed long __sched schedule_timeout_interruptible(signed long timeout)
1953 {
1954 __set_current_state(TASK_INTERRUPTIBLE);
1955 return schedule_timeout(timeout);
1956 }
1957 EXPORT_SYMBOL(schedule_timeout_interruptible);
1958
1959 signed long __sched schedule_timeout_killable(signed long timeout)
1960 {
1961 __set_current_state(TASK_KILLABLE);
1962 return schedule_timeout(timeout);
1963 }
1964 EXPORT_SYMBOL(schedule_timeout_killable);
1965
1966 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1967 {
1968 __set_current_state(TASK_UNINTERRUPTIBLE);
1969 return schedule_timeout(timeout);
1970 }
1971 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1972
1973
1974
1975
1976
1977 signed long __sched schedule_timeout_idle(signed long timeout)
1978 {
1979 __set_current_state(TASK_IDLE);
1980 return schedule_timeout(timeout);
1981 }
1982 EXPORT_SYMBOL(schedule_timeout_idle);
1983
1984 #ifdef CONFIG_HOTPLUG_CPU
1985 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1986 {
1987 struct timer_list *timer;
1988 int cpu = new_base->cpu;
1989
1990 while (!hlist_empty(head)) {
1991 timer = hlist_entry(head->first, struct timer_list, entry);
1992 detach_timer(timer, false);
1993 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1994 internal_add_timer(new_base, timer);
1995 }
1996 }
1997
1998 int timers_prepare_cpu(unsigned int cpu)
1999 {
2000 struct timer_base *base;
2001 int b;
2002
2003 for (b = 0; b < NR_BASES; b++) {
2004 base = per_cpu_ptr(&timer_bases[b], cpu);
2005 base->clk = jiffies;
2006 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2007 base->next_expiry_recalc = false;
2008 base->timers_pending = false;
2009 base->is_idle = false;
2010 }
2011 return 0;
2012 }
2013
2014 int timers_dead_cpu(unsigned int cpu)
2015 {
2016 struct timer_base *old_base;
2017 struct timer_base *new_base;
2018 int b, i;
2019
2020 BUG_ON(cpu_online(cpu));
2021
2022 for (b = 0; b < NR_BASES; b++) {
2023 old_base = per_cpu_ptr(&timer_bases[b], cpu);
2024 new_base = get_cpu_ptr(&timer_bases[b]);
2025
2026
2027
2028
2029 raw_spin_lock_irq(&new_base->lock);
2030 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2031
2032
2033
2034
2035
2036 forward_timer_base(new_base);
2037
2038 BUG_ON(old_base->running_timer);
2039
2040 for (i = 0; i < WHEEL_SIZE; i++)
2041 migrate_timer_list(new_base, old_base->vectors + i);
2042
2043 raw_spin_unlock(&old_base->lock);
2044 raw_spin_unlock_irq(&new_base->lock);
2045 put_cpu_ptr(&timer_bases);
2046 }
2047 return 0;
2048 }
2049
2050 #endif
2051
2052 static void __init init_timer_cpu(int cpu)
2053 {
2054 struct timer_base *base;
2055 int i;
2056
2057 for (i = 0; i < NR_BASES; i++) {
2058 base = per_cpu_ptr(&timer_bases[i], cpu);
2059 base->cpu = cpu;
2060 raw_spin_lock_init(&base->lock);
2061 base->clk = jiffies;
2062 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2063 timer_base_init_expiry_lock(base);
2064 }
2065 }
2066
2067 static void __init init_timer_cpus(void)
2068 {
2069 int cpu;
2070
2071 for_each_possible_cpu(cpu)
2072 init_timer_cpu(cpu);
2073 }
2074
2075 void __init init_timers(void)
2076 {
2077 init_timer_cpus();
2078 posix_cputimers_init_work();
2079 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
2080 }
2081
2082
2083
2084
2085
2086 void msleep(unsigned int msecs)
2087 {
2088 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2089
2090 while (timeout)
2091 timeout = schedule_timeout_uninterruptible(timeout);
2092 }
2093
2094 EXPORT_SYMBOL(msleep);
2095
2096
2097
2098
2099
2100 unsigned long msleep_interruptible(unsigned int msecs)
2101 {
2102 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2103
2104 while (timeout && !signal_pending(current))
2105 timeout = schedule_timeout_interruptible(timeout);
2106 return jiffies_to_msecs(timeout);
2107 }
2108
2109 EXPORT_SYMBOL(msleep_interruptible);
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123 void __sched usleep_range_state(unsigned long min, unsigned long max,
2124 unsigned int state)
2125 {
2126 ktime_t exp = ktime_add_us(ktime_get(), min);
2127 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
2128
2129 for (;;) {
2130 __set_current_state(state);
2131
2132 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
2133 break;
2134 }
2135 }
2136 EXPORT_SYMBOL(usleep_range_state);