0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/cpu.h>
0011 #include <linux/err.h>
0012 #include <linux/hrtimer.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/nmi.h>
0015 #include <linux/percpu.h>
0016 #include <linux/profile.h>
0017 #include <linux/sched.h>
0018 #include <linux/module.h>
0019 #include <trace/events/power.h>
0020
0021 #include <asm/irq_regs.h>
0022
0023 #include "tick-internal.h"
0024
0025
0026
0027
0028 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
0029
0030
0031
0032
0033
0034 ktime_t tick_next_period;
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
0051 #ifdef CONFIG_NO_HZ_FULL
0052
0053
0054
0055
0056
0057 static int tick_do_timer_boot_cpu __read_mostly = -1;
0058 #endif
0059
0060
0061
0062
0063 struct tick_device *tick_get_device(int cpu)
0064 {
0065 return &per_cpu(tick_cpu_device, cpu);
0066 }
0067
0068
0069
0070
0071 int tick_is_oneshot_available(void)
0072 {
0073 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
0074
0075 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
0076 return 0;
0077 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
0078 return 1;
0079 return tick_broadcast_oneshot_available();
0080 }
0081
0082
0083
0084
0085 static void tick_periodic(int cpu)
0086 {
0087 if (tick_do_timer_cpu == cpu) {
0088 raw_spin_lock(&jiffies_lock);
0089 write_seqcount_begin(&jiffies_seq);
0090
0091
0092 tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
0093
0094 do_timer(1);
0095 write_seqcount_end(&jiffies_seq);
0096 raw_spin_unlock(&jiffies_lock);
0097 update_wall_time();
0098 }
0099
0100 update_process_times(user_mode(get_irq_regs()));
0101 profile_tick(CPU_PROFILING);
0102 }
0103
0104
0105
0106
0107 void tick_handle_periodic(struct clock_event_device *dev)
0108 {
0109 int cpu = smp_processor_id();
0110 ktime_t next = dev->next_event;
0111
0112 tick_periodic(cpu);
0113
0114 #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
0115
0116
0117
0118
0119
0120 if (dev->event_handler != tick_handle_periodic)
0121 return;
0122 #endif
0123
0124 if (!clockevent_state_oneshot(dev))
0125 return;
0126 for (;;) {
0127
0128
0129
0130
0131 next = ktime_add_ns(next, TICK_NSEC);
0132
0133 if (!clockevents_program_event(dev, next, false))
0134 return;
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144 if (timekeeping_valid_for_hres())
0145 tick_periodic(cpu);
0146 }
0147 }
0148
0149
0150
0151
0152 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
0153 {
0154 tick_set_periodic_handler(dev, broadcast);
0155
0156
0157 if (!tick_device_is_functional(dev))
0158 return;
0159
0160 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
0161 !tick_broadcast_oneshot_active()) {
0162 clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
0163 } else {
0164 unsigned int seq;
0165 ktime_t next;
0166
0167 do {
0168 seq = read_seqcount_begin(&jiffies_seq);
0169 next = tick_next_period;
0170 } while (read_seqcount_retry(&jiffies_seq, seq));
0171
0172 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
0173
0174 for (;;) {
0175 if (!clockevents_program_event(dev, next, false))
0176 return;
0177 next = ktime_add_ns(next, TICK_NSEC);
0178 }
0179 }
0180 }
0181
0182 #ifdef CONFIG_NO_HZ_FULL
0183 static void giveup_do_timer(void *info)
0184 {
0185 int cpu = *(unsigned int *)info;
0186
0187 WARN_ON(tick_do_timer_cpu != smp_processor_id());
0188
0189 tick_do_timer_cpu = cpu;
0190 }
0191
0192 static void tick_take_do_timer_from_boot(void)
0193 {
0194 int cpu = smp_processor_id();
0195 int from = tick_do_timer_boot_cpu;
0196
0197 if (from >= 0 && from != cpu)
0198 smp_call_function_single(from, giveup_do_timer, &cpu, 1);
0199 }
0200 #endif
0201
0202
0203
0204
0205 static void tick_setup_device(struct tick_device *td,
0206 struct clock_event_device *newdev, int cpu,
0207 const struct cpumask *cpumask)
0208 {
0209 void (*handler)(struct clock_event_device *) = NULL;
0210 ktime_t next_event = 0;
0211
0212
0213
0214
0215 if (!td->evtdev) {
0216
0217
0218
0219
0220 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
0221 tick_do_timer_cpu = cpu;
0222
0223 tick_next_period = ktime_get();
0224 #ifdef CONFIG_NO_HZ_FULL
0225
0226
0227
0228
0229
0230
0231 if (tick_nohz_full_cpu(cpu))
0232 tick_do_timer_boot_cpu = cpu;
0233
0234 } else if (tick_do_timer_boot_cpu != -1 &&
0235 !tick_nohz_full_cpu(cpu)) {
0236 tick_take_do_timer_from_boot();
0237 tick_do_timer_boot_cpu = -1;
0238 WARN_ON(tick_do_timer_cpu != cpu);
0239 #endif
0240 }
0241
0242
0243
0244
0245 td->mode = TICKDEV_MODE_PERIODIC;
0246 } else {
0247 handler = td->evtdev->event_handler;
0248 next_event = td->evtdev->next_event;
0249 td->evtdev->event_handler = clockevents_handle_noop;
0250 }
0251
0252 td->evtdev = newdev;
0253
0254
0255
0256
0257
0258 if (!cpumask_equal(newdev->cpumask, cpumask))
0259 irq_set_affinity(newdev->irq, cpumask);
0260
0261
0262
0263
0264
0265
0266
0267
0268 if (tick_device_uses_broadcast(newdev, cpu))
0269 return;
0270
0271 if (td->mode == TICKDEV_MODE_PERIODIC)
0272 tick_setup_periodic(newdev, 0);
0273 else
0274 tick_setup_oneshot(newdev, handler, next_event);
0275 }
0276
0277 void tick_install_replacement(struct clock_event_device *newdev)
0278 {
0279 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
0280 int cpu = smp_processor_id();
0281
0282 clockevents_exchange_device(td->evtdev, newdev);
0283 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
0284 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
0285 tick_oneshot_notify();
0286 }
0287
0288 static bool tick_check_percpu(struct clock_event_device *curdev,
0289 struct clock_event_device *newdev, int cpu)
0290 {
0291 if (!cpumask_test_cpu(cpu, newdev->cpumask))
0292 return false;
0293 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
0294 return true;
0295
0296 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
0297 return false;
0298
0299 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
0300 return false;
0301 return true;
0302 }
0303
0304 static bool tick_check_preferred(struct clock_event_device *curdev,
0305 struct clock_event_device *newdev)
0306 {
0307
0308 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
0309 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
0310 return false;
0311 if (tick_oneshot_mode_active())
0312 return false;
0313 }
0314
0315
0316
0317
0318
0319 return !curdev ||
0320 newdev->rating > curdev->rating ||
0321 !cpumask_equal(curdev->cpumask, newdev->cpumask);
0322 }
0323
0324
0325
0326
0327
0328 bool tick_check_replacement(struct clock_event_device *curdev,
0329 struct clock_event_device *newdev)
0330 {
0331 if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
0332 return false;
0333
0334 return tick_check_preferred(curdev, newdev);
0335 }
0336
0337
0338
0339
0340
0341 void tick_check_new_device(struct clock_event_device *newdev)
0342 {
0343 struct clock_event_device *curdev;
0344 struct tick_device *td;
0345 int cpu;
0346
0347 cpu = smp_processor_id();
0348 td = &per_cpu(tick_cpu_device, cpu);
0349 curdev = td->evtdev;
0350
0351 if (!tick_check_replacement(curdev, newdev))
0352 goto out_bc;
0353
0354 if (!try_module_get(newdev->owner))
0355 return;
0356
0357
0358
0359
0360
0361
0362 if (tick_is_broadcast_device(curdev)) {
0363 clockevents_shutdown(curdev);
0364 curdev = NULL;
0365 }
0366 clockevents_exchange_device(curdev, newdev);
0367 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
0368 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
0369 tick_oneshot_notify();
0370 return;
0371
0372 out_bc:
0373
0374
0375
0376 tick_install_broadcast_device(newdev, cpu);
0377 }
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390 int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
0391 {
0392 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
0393
0394 if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
0395 return 0;
0396
0397 return __tick_broadcast_oneshot_control(state);
0398 }
0399 EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
0400
0401 #ifdef CONFIG_HOTPLUG_CPU
0402
0403
0404
0405
0406
0407
0408 void tick_handover_do_timer(void)
0409 {
0410 if (tick_do_timer_cpu == smp_processor_id())
0411 tick_do_timer_cpu = cpumask_first(cpu_online_mask);
0412 }
0413
0414
0415
0416
0417
0418
0419
0420
0421 void tick_shutdown(unsigned int cpu)
0422 {
0423 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
0424 struct clock_event_device *dev = td->evtdev;
0425
0426 td->mode = TICKDEV_MODE_PERIODIC;
0427 if (dev) {
0428
0429
0430
0431
0432 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
0433 clockevents_exchange_device(dev, NULL);
0434 dev->event_handler = clockevents_handle_noop;
0435 td->evtdev = NULL;
0436 }
0437 }
0438 #endif
0439
0440
0441
0442
0443
0444
0445
0446
0447 void tick_suspend_local(void)
0448 {
0449 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
0450
0451 clockevents_shutdown(td->evtdev);
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461 void tick_resume_local(void)
0462 {
0463 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
0464 bool broadcast = tick_resume_check_broadcast();
0465
0466 clockevents_tick_resume(td->evtdev);
0467 if (!broadcast) {
0468 if (td->mode == TICKDEV_MODE_PERIODIC)
0469 tick_setup_periodic(td->evtdev, 0);
0470 else
0471 tick_resume_oneshot();
0472 }
0473
0474
0475
0476
0477
0478
0479 hrtimers_resume_local();
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 void tick_suspend(void)
0492 {
0493 tick_suspend_local();
0494 tick_suspend_broadcast();
0495 }
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505 void tick_resume(void)
0506 {
0507 tick_resume_broadcast();
0508 tick_resume_local();
0509 }
0510
0511 #ifdef CONFIG_SUSPEND
0512 static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
0513 static unsigned int tick_freeze_depth;
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 void tick_freeze(void)
0525 {
0526 raw_spin_lock(&tick_freeze_lock);
0527
0528 tick_freeze_depth++;
0529 if (tick_freeze_depth == num_online_cpus()) {
0530 trace_suspend_resume(TPS("timekeeping_freeze"),
0531 smp_processor_id(), true);
0532 system_state = SYSTEM_SUSPEND;
0533 sched_clock_suspend();
0534 timekeeping_suspend();
0535 } else {
0536 tick_suspend_local();
0537 }
0538
0539 raw_spin_unlock(&tick_freeze_lock);
0540 }
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 void tick_unfreeze(void)
0552 {
0553 raw_spin_lock(&tick_freeze_lock);
0554
0555 if (tick_freeze_depth == num_online_cpus()) {
0556 timekeeping_resume();
0557 sched_clock_resume();
0558 system_state = SYSTEM_RUNNING;
0559 trace_suspend_resume(TPS("timekeeping_freeze"),
0560 smp_processor_id(), false);
0561 } else {
0562 touch_softlockup_watchdog();
0563 tick_resume_local();
0564 }
0565
0566 tick_freeze_depth--;
0567
0568 raw_spin_unlock(&tick_freeze_lock);
0569 }
0570 #endif
0571
0572
0573
0574
0575 void __init tick_init(void)
0576 {
0577 tick_broadcast_init();
0578 tick_nohz_init();
0579 }