Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  hrtimers - High-resolution kernel timers
0004  *
0005  *   Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
0006  *   Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
0007  *
0008  *  data type definitions, declarations, prototypes
0009  *
0010  *  Started by: Thomas Gleixner and Ingo Molnar
0011  */
0012 #ifndef _LINUX_HRTIMER_H
0013 #define _LINUX_HRTIMER_H
0014 
0015 #include <linux/hrtimer_defs.h>
0016 #include <linux/rbtree.h>
0017 #include <linux/init.h>
0018 #include <linux/list.h>
0019 #include <linux/percpu.h>
0020 #include <linux/seqlock.h>
0021 #include <linux/timer.h>
0022 #include <linux/timerqueue.h>
0023 
0024 struct hrtimer_clock_base;
0025 struct hrtimer_cpu_base;
0026 
0027 /*
0028  * Mode arguments of xxx_hrtimer functions:
0029  *
0030  * HRTIMER_MODE_ABS     - Time value is absolute
0031  * HRTIMER_MODE_REL     - Time value is relative to now
0032  * HRTIMER_MODE_PINNED      - Timer is bound to CPU (is only considered
0033  *                when starting the timer)
0034  * HRTIMER_MODE_SOFT        - Timer callback function will be executed in
0035  *                soft irq context
0036  * HRTIMER_MODE_HARD        - Timer callback function will be executed in
0037  *                hard irq context even on PREEMPT_RT.
0038  */
0039 enum hrtimer_mode {
0040     HRTIMER_MODE_ABS    = 0x00,
0041     HRTIMER_MODE_REL    = 0x01,
0042     HRTIMER_MODE_PINNED = 0x02,
0043     HRTIMER_MODE_SOFT   = 0x04,
0044     HRTIMER_MODE_HARD   = 0x08,
0045 
0046     HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
0047     HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
0048 
0049     HRTIMER_MODE_ABS_SOFT   = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
0050     HRTIMER_MODE_REL_SOFT   = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
0051 
0052     HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
0053     HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
0054 
0055     HRTIMER_MODE_ABS_HARD   = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
0056     HRTIMER_MODE_REL_HARD   = HRTIMER_MODE_REL | HRTIMER_MODE_HARD,
0057 
0058     HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
0059     HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
0060 };
0061 
0062 /*
0063  * Return values for the callback function
0064  */
0065 enum hrtimer_restart {
0066     HRTIMER_NORESTART,  /* Timer is not restarted */
0067     HRTIMER_RESTART,    /* Timer must be restarted */
0068 };
0069 
0070 /*
0071  * Values to track state of the timer
0072  *
0073  * Possible states:
0074  *
0075  * 0x00     inactive
0076  * 0x01     enqueued into rbtree
0077  *
0078  * The callback state is not part of the timer->state because clearing it would
0079  * mean touching the timer after the callback, this makes it impossible to free
0080  * the timer from the callback function.
0081  *
0082  * Therefore we track the callback state in:
0083  *
0084  *  timer->base->cpu_base->running == timer
0085  *
0086  * On SMP it is possible to have a "callback function running and enqueued"
0087  * status. It happens for example when a posix timer expired and the callback
0088  * queued a signal. Between dropping the lock which protects the posix timer
0089  * and reacquiring the base lock of the hrtimer, another CPU can deliver the
0090  * signal and rearm the timer.
0091  *
0092  * All state transitions are protected by cpu_base->lock.
0093  */
0094 #define HRTIMER_STATE_INACTIVE  0x00
0095 #define HRTIMER_STATE_ENQUEUED  0x01
0096 
0097 /**
0098  * struct hrtimer - the basic hrtimer structure
0099  * @node:   timerqueue node, which also manages node.expires,
0100  *      the absolute expiry time in the hrtimers internal
0101  *      representation. The time is related to the clock on
0102  *      which the timer is based. Is setup by adding
0103  *      slack to the _softexpires value. For non range timers
0104  *      identical to _softexpires.
0105  * @_softexpires: the absolute earliest expiry time of the hrtimer.
0106  *      The time which was given as expiry time when the timer
0107  *      was armed.
0108  * @function:   timer expiry callback function
0109  * @base:   pointer to the timer base (per cpu and per clock)
0110  * @state:  state information (See bit values above)
0111  * @is_rel: Set if the timer was armed relative
0112  * @is_soft:    Set if hrtimer will be expired in soft interrupt context.
0113  * @is_hard:    Set if hrtimer will be expired in hard interrupt context
0114  *      even on RT.
0115  *
0116  * The hrtimer structure must be initialized by hrtimer_init()
0117  */
0118 struct hrtimer {
0119     struct timerqueue_node      node;
0120     ktime_t             _softexpires;
0121     enum hrtimer_restart        (*function)(struct hrtimer *);
0122     struct hrtimer_clock_base   *base;
0123     u8              state;
0124     u8              is_rel;
0125     u8              is_soft;
0126     u8              is_hard;
0127 };
0128 
0129 /**
0130  * struct hrtimer_sleeper - simple sleeper structure
0131  * @timer:  embedded timer structure
0132  * @task:   task to wake up
0133  *
0134  * task is set to NULL, when the timer expires.
0135  */
0136 struct hrtimer_sleeper {
0137     struct hrtimer timer;
0138     struct task_struct *task;
0139 };
0140 
0141 #ifdef CONFIG_64BIT
0142 # define __hrtimer_clock_base_align ____cacheline_aligned
0143 #else
0144 # define __hrtimer_clock_base_align
0145 #endif
0146 
0147 /**
0148  * struct hrtimer_clock_base - the timer base for a specific clock
0149  * @cpu_base:       per cpu clock base
0150  * @index:      clock type index for per_cpu support when moving a
0151  *          timer to a base on another cpu.
0152  * @clockid:        clock id for per_cpu support
0153  * @seq:        seqcount around __run_hrtimer
0154  * @running:        pointer to the currently running hrtimer
0155  * @active:     red black tree root node for the active timers
0156  * @get_time:       function to retrieve the current time of the clock
0157  * @offset:     offset of this clock to the monotonic base
0158  */
0159 struct hrtimer_clock_base {
0160     struct hrtimer_cpu_base *cpu_base;
0161     unsigned int        index;
0162     clockid_t       clockid;
0163     seqcount_raw_spinlock_t seq;
0164     struct hrtimer      *running;
0165     struct timerqueue_head  active;
0166     ktime_t         (*get_time)(void);
0167     ktime_t         offset;
0168 } __hrtimer_clock_base_align;
0169 
0170 enum  hrtimer_base_type {
0171     HRTIMER_BASE_MONOTONIC,
0172     HRTIMER_BASE_REALTIME,
0173     HRTIMER_BASE_BOOTTIME,
0174     HRTIMER_BASE_TAI,
0175     HRTIMER_BASE_MONOTONIC_SOFT,
0176     HRTIMER_BASE_REALTIME_SOFT,
0177     HRTIMER_BASE_BOOTTIME_SOFT,
0178     HRTIMER_BASE_TAI_SOFT,
0179     HRTIMER_MAX_CLOCK_BASES,
0180 };
0181 
0182 /**
0183  * struct hrtimer_cpu_base - the per cpu clock bases
0184  * @lock:       lock protecting the base and associated clock bases
0185  *          and timers
0186  * @cpu:        cpu number
0187  * @active_bases:   Bitfield to mark bases with active timers
0188  * @clock_was_set_seq:  Sequence counter of clock was set events
0189  * @hres_active:    State of high resolution mode
0190  * @in_hrtirq:      hrtimer_interrupt() is currently executing
0191  * @hang_detected:  The last hrtimer interrupt detected a hang
0192  * @softirq_activated:  displays, if the softirq is raised - update of softirq
0193  *          related settings is not required then.
0194  * @nr_events:      Total number of hrtimer interrupt events
0195  * @nr_retries:     Total number of hrtimer interrupt retries
0196  * @nr_hangs:       Total number of hrtimer interrupt hangs
0197  * @max_hang_time:  Maximum time spent in hrtimer_interrupt
0198  * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
0199  *           expired
0200  * @timer_waiters:  A hrtimer_cancel() invocation waits for the timer
0201  *          callback to finish.
0202  * @expires_next:   absolute time of the next event, is required for remote
0203  *          hrtimer enqueue; it is the total first expiry time (hard
0204  *          and soft hrtimer are taken into account)
0205  * @next_timer:     Pointer to the first expiring timer
0206  * @softirq_expires_next: Time to check, if soft queues needs also to be expired
0207  * @softirq_next_timer: Pointer to the first expiring softirq based timer
0208  * @clock_base:     array of clock bases for this cpu
0209  *
0210  * Note: next_timer is just an optimization for __remove_hrtimer().
0211  *   Do not dereference the pointer because it is not reliable on
0212  *   cross cpu removals.
0213  */
0214 struct hrtimer_cpu_base {
0215     raw_spinlock_t          lock;
0216     unsigned int            cpu;
0217     unsigned int            active_bases;
0218     unsigned int            clock_was_set_seq;
0219     unsigned int            hres_active     : 1,
0220                     in_hrtirq       : 1,
0221                     hang_detected       : 1,
0222                     softirq_activated       : 1;
0223 #ifdef CONFIG_HIGH_RES_TIMERS
0224     unsigned int            nr_events;
0225     unsigned short          nr_retries;
0226     unsigned short          nr_hangs;
0227     unsigned int            max_hang_time;
0228 #endif
0229 #ifdef CONFIG_PREEMPT_RT
0230     spinlock_t          softirq_expiry_lock;
0231     atomic_t            timer_waiters;
0232 #endif
0233     ktime_t             expires_next;
0234     struct hrtimer          *next_timer;
0235     ktime_t             softirq_expires_next;
0236     struct hrtimer          *softirq_next_timer;
0237     struct hrtimer_clock_base   clock_base[HRTIMER_MAX_CLOCK_BASES];
0238 } ____cacheline_aligned;
0239 
0240 static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
0241 {
0242     timer->node.expires = time;
0243     timer->_softexpires = time;
0244 }
0245 
0246 static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
0247 {
0248     timer->_softexpires = time;
0249     timer->node.expires = ktime_add_safe(time, delta);
0250 }
0251 
0252 static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
0253 {
0254     timer->_softexpires = time;
0255     timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
0256 }
0257 
0258 static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
0259 {
0260     timer->node.expires = tv64;
0261     timer->_softexpires = tv64;
0262 }
0263 
0264 static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
0265 {
0266     timer->node.expires = ktime_add_safe(timer->node.expires, time);
0267     timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
0268 }
0269 
0270 static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
0271 {
0272     timer->node.expires = ktime_add_ns(timer->node.expires, ns);
0273     timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
0274 }
0275 
0276 static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
0277 {
0278     return timer->node.expires;
0279 }
0280 
0281 static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
0282 {
0283     return timer->_softexpires;
0284 }
0285 
0286 static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
0287 {
0288     return timer->node.expires;
0289 }
0290 static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
0291 {
0292     return timer->_softexpires;
0293 }
0294 
0295 static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
0296 {
0297     return ktime_to_ns(timer->node.expires);
0298 }
0299 
0300 static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
0301 {
0302     return ktime_sub(timer->node.expires, timer->base->get_time());
0303 }
0304 
0305 static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
0306 {
0307     return timer->base->get_time();
0308 }
0309 
0310 static inline int hrtimer_is_hres_active(struct hrtimer *timer)
0311 {
0312     return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
0313         timer->base->cpu_base->hres_active : 0;
0314 }
0315 
0316 #ifdef CONFIG_HIGH_RES_TIMERS
0317 struct clock_event_device;
0318 
0319 extern void hrtimer_interrupt(struct clock_event_device *dev);
0320 
0321 extern unsigned int hrtimer_resolution;
0322 
0323 #else
0324 
0325 #define hrtimer_resolution  (unsigned int)LOW_RES_NSEC
0326 
0327 #endif
0328 
0329 static inline ktime_t
0330 __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
0331 {
0332     ktime_t rem = ktime_sub(timer->node.expires, now);
0333 
0334     /*
0335      * Adjust relative timers for the extra we added in
0336      * hrtimer_start_range_ns() to prevent short timeouts.
0337      */
0338     if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
0339         rem -= hrtimer_resolution;
0340     return rem;
0341 }
0342 
0343 static inline ktime_t
0344 hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
0345 {
0346     return __hrtimer_expires_remaining_adjusted(timer,
0347                             timer->base->get_time());
0348 }
0349 
0350 #ifdef CONFIG_TIMERFD
0351 extern void timerfd_clock_was_set(void);
0352 extern void timerfd_resume(void);
0353 #else
0354 static inline void timerfd_clock_was_set(void) { }
0355 static inline void timerfd_resume(void) { }
0356 #endif
0357 
0358 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
0359 
0360 #ifdef CONFIG_PREEMPT_RT
0361 void hrtimer_cancel_wait_running(const struct hrtimer *timer);
0362 #else
0363 static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
0364 {
0365     cpu_relax();
0366 }
0367 #endif
0368 
0369 /* Exported timer functions: */
0370 
0371 /* Initialize timers: */
0372 extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
0373              enum hrtimer_mode mode);
0374 extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
0375                  enum hrtimer_mode mode);
0376 
0377 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
0378 extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
0379                   enum hrtimer_mode mode);
0380 extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
0381                       clockid_t clock_id,
0382                       enum hrtimer_mode mode);
0383 
0384 extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
0385 #else
0386 static inline void hrtimer_init_on_stack(struct hrtimer *timer,
0387                      clockid_t which_clock,
0388                      enum hrtimer_mode mode)
0389 {
0390     hrtimer_init(timer, which_clock, mode);
0391 }
0392 
0393 static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
0394                          clockid_t clock_id,
0395                          enum hrtimer_mode mode)
0396 {
0397     hrtimer_init_sleeper(sl, clock_id, mode);
0398 }
0399 
0400 static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
0401 #endif
0402 
0403 /* Basic timer operations: */
0404 extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
0405                    u64 range_ns, const enum hrtimer_mode mode);
0406 
0407 /**
0408  * hrtimer_start - (re)start an hrtimer
0409  * @timer:  the timer to be added
0410  * @tim:    expiry time
0411  * @mode:   timer mode: absolute (HRTIMER_MODE_ABS) or
0412  *      relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
0413  *      softirq based mode is considered for debug purpose only!
0414  */
0415 static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
0416                  const enum hrtimer_mode mode)
0417 {
0418     hrtimer_start_range_ns(timer, tim, 0, mode);
0419 }
0420 
0421 extern int hrtimer_cancel(struct hrtimer *timer);
0422 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
0423 
0424 static inline void hrtimer_start_expires(struct hrtimer *timer,
0425                      enum hrtimer_mode mode)
0426 {
0427     u64 delta;
0428     ktime_t soft, hard;
0429     soft = hrtimer_get_softexpires(timer);
0430     hard = hrtimer_get_expires(timer);
0431     delta = ktime_to_ns(ktime_sub(hard, soft));
0432     hrtimer_start_range_ns(timer, soft, delta, mode);
0433 }
0434 
0435 void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
0436                    enum hrtimer_mode mode);
0437 
0438 static inline void hrtimer_restart(struct hrtimer *timer)
0439 {
0440     hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
0441 }
0442 
0443 /* Query timers: */
0444 extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
0445 
0446 /**
0447  * hrtimer_get_remaining - get remaining time for the timer
0448  * @timer:  the timer to read
0449  */
0450 static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
0451 {
0452     return __hrtimer_get_remaining(timer, false);
0453 }
0454 
0455 extern u64 hrtimer_get_next_event(void);
0456 extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
0457 
0458 extern bool hrtimer_active(const struct hrtimer *timer);
0459 
0460 /**
0461  * hrtimer_is_queued - check, whether the timer is on one of the queues
0462  * @timer:  Timer to check
0463  *
0464  * Returns: True if the timer is queued, false otherwise
0465  *
0466  * The function can be used lockless, but it gives only a current snapshot.
0467  */
0468 static inline bool hrtimer_is_queued(struct hrtimer *timer)
0469 {
0470     /* The READ_ONCE pairs with the update functions of timer->state */
0471     return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
0472 }
0473 
0474 /*
0475  * Helper function to check, whether the timer is running the callback
0476  * function
0477  */
0478 static inline int hrtimer_callback_running(struct hrtimer *timer)
0479 {
0480     return timer->base->running == timer;
0481 }
0482 
0483 /* Forward a hrtimer so it expires after now: */
0484 extern u64
0485 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
0486 
0487 /**
0488  * hrtimer_forward_now - forward the timer expiry so it expires after now
0489  * @timer:  hrtimer to forward
0490  * @interval:   the interval to forward
0491  *
0492  * Forward the timer expiry so it will expire after the current time
0493  * of the hrtimer clock base. Returns the number of overruns.
0494  *
0495  * Can be safely called from the callback function of @timer. If
0496  * called from other contexts @timer must neither be enqueued nor
0497  * running the callback and the caller needs to take care of
0498  * serialization.
0499  *
0500  * Note: This only updates the timer expiry value and does not requeue
0501  * the timer.
0502  */
0503 static inline u64 hrtimer_forward_now(struct hrtimer *timer,
0504                       ktime_t interval)
0505 {
0506     return hrtimer_forward(timer, timer->base->get_time(), interval);
0507 }
0508 
0509 /* Precise sleep: */
0510 
0511 extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
0512 extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
0513                   const clockid_t clockid);
0514 
0515 extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
0516                     const enum hrtimer_mode mode);
0517 extern int schedule_hrtimeout_range_clock(ktime_t *expires,
0518                       u64 delta,
0519                       const enum hrtimer_mode mode,
0520                       clockid_t clock_id);
0521 extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
0522 
0523 /* Soft interrupt function to run the hrtimer queues: */
0524 extern void hrtimer_run_queues(void);
0525 
0526 /* Bootup initialization: */
0527 extern void __init hrtimers_init(void);
0528 
0529 /* Show pending timers: */
0530 extern void sysrq_timer_list_show(void);
0531 
0532 int hrtimers_prepare_cpu(unsigned int cpu);
0533 #ifdef CONFIG_HOTPLUG_CPU
0534 int hrtimers_dead_cpu(unsigned int cpu);
0535 #else
0536 #define hrtimers_dead_cpu   NULL
0537 #endif
0538 
0539 #endif