Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * cpuidle.h - a generic framework for CPU idle power management
0003  *
0004  * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
0005  *          Shaohua Li <shaohua.li@intel.com>
0006  *          Adam Belay <abelay@novell.com>
0007  *
0008  * This code is licenced under the GPL.
0009  */
0010 
0011 #ifndef _LINUX_CPUIDLE_H
0012 #define _LINUX_CPUIDLE_H
0013 
0014 #include <linux/percpu.h>
0015 #include <linux/list.h>
0016 #include <linux/hrtimer.h>
0017 
0018 #define CPUIDLE_STATE_MAX   10
0019 #define CPUIDLE_NAME_LEN    16
0020 #define CPUIDLE_DESC_LEN    32
0021 
0022 struct module;
0023 
0024 struct cpuidle_device;
0025 struct cpuidle_driver;
0026 
0027 
0028 /****************************
0029  * CPUIDLE DEVICE INTERFACE *
0030  ****************************/
0031 
0032 #define CPUIDLE_STATE_DISABLED_BY_USER      BIT(0)
0033 #define CPUIDLE_STATE_DISABLED_BY_DRIVER    BIT(1)
0034 
0035 struct cpuidle_state_usage {
0036     unsigned long long  disable;
0037     unsigned long long  usage;
0038     u64         time_ns;
0039     unsigned long long  above; /* Number of times it's been too deep */
0040     unsigned long long  below; /* Number of times it's been too shallow */
0041     unsigned long long  rejected; /* Number of times idle entry was rejected */
0042 #ifdef CONFIG_SUSPEND
0043     unsigned long long  s2idle_usage;
0044     unsigned long long  s2idle_time; /* in US */
0045 #endif
0046 };
0047 
0048 struct cpuidle_state {
0049     char        name[CPUIDLE_NAME_LEN];
0050     char        desc[CPUIDLE_DESC_LEN];
0051 
0052     s64     exit_latency_ns;
0053     s64     target_residency_ns;
0054     unsigned int    flags;
0055     unsigned int    exit_latency; /* in US */
0056     int     power_usage; /* in mW */
0057     unsigned int    target_residency; /* in US */
0058 
0059     int (*enter)    (struct cpuidle_device *dev,
0060             struct cpuidle_driver *drv,
0061             int index);
0062 
0063     int (*enter_dead) (struct cpuidle_device *dev, int index);
0064 
0065     /*
0066      * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
0067      * suspended, so it must not re-enable interrupts at any point (even
0068      * temporarily) or attempt to change states of clock event devices.
0069      *
0070      * This callback may point to the same function as ->enter if all of
0071      * the above requirements are met by it.
0072      */
0073     int (*enter_s2idle)(struct cpuidle_device *dev,
0074                 struct cpuidle_driver *drv,
0075                 int index);
0076 };
0077 
0078 /* Idle State Flags */
0079 #define CPUIDLE_FLAG_NONE           (0x00)
0080 #define CPUIDLE_FLAG_POLLING        BIT(0) /* polling state */
0081 #define CPUIDLE_FLAG_COUPLED        BIT(1) /* state applies to multiple cpus */
0082 #define CPUIDLE_FLAG_TIMER_STOP     BIT(2) /* timer is stopped on this state */
0083 #define CPUIDLE_FLAG_UNUSABLE       BIT(3) /* avoid using this state */
0084 #define CPUIDLE_FLAG_OFF        BIT(4) /* disable this state by default */
0085 #define CPUIDLE_FLAG_TLB_FLUSHED    BIT(5) /* idle-state flushes TLBs */
0086 #define CPUIDLE_FLAG_RCU_IDLE       BIT(6) /* idle-state takes care of RCU */
0087 
0088 struct cpuidle_device_kobj;
0089 struct cpuidle_state_kobj;
0090 struct cpuidle_driver_kobj;
0091 
0092 struct cpuidle_device {
0093     unsigned int        registered:1;
0094     unsigned int        enabled:1;
0095     unsigned int        poll_time_limit:1;
0096     unsigned int        cpu;
0097     ktime_t         next_hrtimer;
0098 
0099     int         last_state_idx;
0100     u64         last_residency_ns;
0101     u64         poll_limit_ns;
0102     u64         forced_idle_latency_limit_ns;
0103     struct cpuidle_state_usage  states_usage[CPUIDLE_STATE_MAX];
0104     struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
0105     struct cpuidle_driver_kobj *kobj_driver;
0106     struct cpuidle_device_kobj *kobj_dev;
0107     struct list_head    device_list;
0108 
0109 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
0110     cpumask_t       coupled_cpus;
0111     struct cpuidle_coupled  *coupled;
0112 #endif
0113 };
0114 
0115 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
0116 DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
0117 
0118 /****************************
0119  * CPUIDLE DRIVER INTERFACE *
0120  ****************************/
0121 
0122 struct cpuidle_driver {
0123     const char      *name;
0124     struct module       *owner;
0125 
0126         /* used by the cpuidle framework to setup the broadcast timer */
0127     unsigned int            bctimer:1;
0128     /* states array must be ordered in decreasing power consumption */
0129     struct cpuidle_state    states[CPUIDLE_STATE_MAX];
0130     int         state_count;
0131     int         safe_state_index;
0132 
0133     /* the driver handles the cpus in cpumask */
0134     struct cpumask      *cpumask;
0135 
0136     /* preferred governor to switch at register time */
0137     const char      *governor;
0138 };
0139 
0140 #ifdef CONFIG_CPU_IDLE
0141 extern void disable_cpuidle(void);
0142 extern bool cpuidle_not_available(struct cpuidle_driver *drv,
0143                   struct cpuidle_device *dev);
0144 
0145 extern int cpuidle_select(struct cpuidle_driver *drv,
0146               struct cpuidle_device *dev,
0147               bool *stop_tick);
0148 extern int cpuidle_enter(struct cpuidle_driver *drv,
0149              struct cpuidle_device *dev, int index);
0150 extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
0151 extern u64 cpuidle_poll_time(struct cpuidle_driver *drv,
0152                  struct cpuidle_device *dev);
0153 
0154 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
0155 extern struct cpuidle_driver *cpuidle_get_driver(void);
0156 extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
0157                     bool disable);
0158 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
0159 extern int cpuidle_register_device(struct cpuidle_device *dev);
0160 extern void cpuidle_unregister_device(struct cpuidle_device *dev);
0161 extern int cpuidle_register(struct cpuidle_driver *drv,
0162                 const struct cpumask *const coupled_cpus);
0163 extern void cpuidle_unregister(struct cpuidle_driver *drv);
0164 extern void cpuidle_pause_and_lock(void);
0165 extern void cpuidle_resume_and_unlock(void);
0166 extern void cpuidle_pause(void);
0167 extern void cpuidle_resume(void);
0168 extern int cpuidle_enable_device(struct cpuidle_device *dev);
0169 extern void cpuidle_disable_device(struct cpuidle_device *dev);
0170 extern int cpuidle_play_dead(void);
0171 
0172 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
0173 static inline struct cpuidle_device *cpuidle_get_device(void)
0174 {return __this_cpu_read(cpuidle_devices); }
0175 #else
0176 static inline void disable_cpuidle(void) { }
0177 static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
0178                      struct cpuidle_device *dev)
0179 {return true; }
0180 static inline int cpuidle_select(struct cpuidle_driver *drv,
0181                  struct cpuidle_device *dev, bool *stop_tick)
0182 {return -ENODEV; }
0183 static inline int cpuidle_enter(struct cpuidle_driver *drv,
0184                 struct cpuidle_device *dev, int index)
0185 {return -ENODEV; }
0186 static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
0187 static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv,
0188                  struct cpuidle_device *dev)
0189 {return 0; }
0190 static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
0191 {return -ENODEV; }
0192 static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
0193 static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
0194                            int idx, bool disable) { }
0195 static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
0196 static inline int cpuidle_register_device(struct cpuidle_device *dev)
0197 {return -ENODEV; }
0198 static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
0199 static inline int cpuidle_register(struct cpuidle_driver *drv,
0200                    const struct cpumask *const coupled_cpus)
0201 {return -ENODEV; }
0202 static inline void cpuidle_unregister(struct cpuidle_driver *drv) { }
0203 static inline void cpuidle_pause_and_lock(void) { }
0204 static inline void cpuidle_resume_and_unlock(void) { }
0205 static inline void cpuidle_pause(void) { }
0206 static inline void cpuidle_resume(void) { }
0207 static inline int cpuidle_enable_device(struct cpuidle_device *dev)
0208 {return -ENODEV; }
0209 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
0210 static inline int cpuidle_play_dead(void) {return -ENODEV; }
0211 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
0212     struct cpuidle_device *dev) {return NULL; }
0213 static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
0214 #endif
0215 
0216 #ifdef CONFIG_CPU_IDLE
0217 extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
0218                       struct cpuidle_device *dev,
0219                       u64 latency_limit_ns);
0220 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
0221                 struct cpuidle_device *dev);
0222 extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
0223 #else
0224 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
0225                          struct cpuidle_device *dev,
0226                          u64 latency_limit_ns)
0227 {return -ENODEV; }
0228 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
0229                        struct cpuidle_device *dev)
0230 {return -ENODEV; }
0231 static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
0232 {
0233 }
0234 #endif
0235 
0236 /* kernel/sched/idle.c */
0237 extern void sched_idle_set_state(struct cpuidle_state *idle_state);
0238 extern void default_idle_call(void);
0239 
0240 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
0241 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
0242 #else
0243 static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
0244 {
0245 }
0246 #endif
0247 
0248 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
0249 void cpuidle_poll_state_init(struct cpuidle_driver *drv);
0250 #else
0251 static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
0252 #endif
0253 
0254 /******************************
0255  * CPUIDLE GOVERNOR INTERFACE *
0256  ******************************/
0257 
0258 struct cpuidle_governor {
0259     char            name[CPUIDLE_NAME_LEN];
0260     struct list_head    governor_list;
0261     unsigned int        rating;
0262 
0263     int  (*enable)      (struct cpuidle_driver *drv,
0264                     struct cpuidle_device *dev);
0265     void (*disable)     (struct cpuidle_driver *drv,
0266                     struct cpuidle_device *dev);
0267 
0268     int  (*select)      (struct cpuidle_driver *drv,
0269                     struct cpuidle_device *dev,
0270                     bool *stop_tick);
0271     void (*reflect)     (struct cpuidle_device *dev, int index);
0272 };
0273 
0274 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
0275 extern s64 cpuidle_governor_latency_req(unsigned int cpu);
0276 
0277 #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter,           \
0278                 idx,                    \
0279                 state,                  \
0280                 is_retention)               \
0281 ({                                  \
0282     int __ret = 0;                          \
0283                                     \
0284     if (!idx) {                         \
0285         cpu_do_idle();                      \
0286         return idx;                     \
0287     }                               \
0288                                     \
0289     if (!is_retention)                      \
0290         __ret =  cpu_pm_enter();                \
0291     if (!__ret) {                           \
0292         __ret = low_level_idle_enter(state);            \
0293         if (!is_retention)                  \
0294             cpu_pm_exit();                  \
0295     }                               \
0296                                     \
0297     __ret ? -1 : idx;                       \
0298 })
0299 
0300 #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx)    \
0301     __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
0302 
0303 #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx)  \
0304     __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
0305 
0306 #define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state)   \
0307     __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
0308 
0309 #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
0310     __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
0311 
0312 #endif /* _LINUX_CPUIDLE_H */