0001
0002
0003
0004
0005
0006
0007 #define pr_fmt(fmt) "PM: " fmt
0008
0009 #include <linux/delay.h>
0010 #include <linux/kernel.h>
0011 #include <linux/io.h>
0012 #include <linux/platform_device.h>
0013 #include <linux/pm_opp.h>
0014 #include <linux/pm_runtime.h>
0015 #include <linux/pm_domain.h>
0016 #include <linux/pm_qos.h>
0017 #include <linux/pm_clock.h>
0018 #include <linux/slab.h>
0019 #include <linux/err.h>
0020 #include <linux/sched.h>
0021 #include <linux/suspend.h>
0022 #include <linux/export.h>
0023 #include <linux/cpu.h>
0024 #include <linux/debugfs.h>
0025
0026 #include "power.h"
0027
0028 #define GENPD_RETRY_MAX_MS 250
0029
0030 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
0031 ({ \
0032 type (*__routine)(struct device *__d); \
0033 type __ret = (type)0; \
0034 \
0035 __routine = genpd->dev_ops.callback; \
0036 if (__routine) { \
0037 __ret = __routine(dev); \
0038 } \
0039 __ret; \
0040 })
0041
0042 static LIST_HEAD(gpd_list);
0043 static DEFINE_MUTEX(gpd_list_lock);
0044
0045 struct genpd_lock_ops {
0046 void (*lock)(struct generic_pm_domain *genpd);
0047 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
0048 int (*lock_interruptible)(struct generic_pm_domain *genpd);
0049 void (*unlock)(struct generic_pm_domain *genpd);
0050 };
0051
0052 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
0053 {
0054 mutex_lock(&genpd->mlock);
0055 }
0056
0057 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
0058 int depth)
0059 {
0060 mutex_lock_nested(&genpd->mlock, depth);
0061 }
0062
0063 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
0064 {
0065 return mutex_lock_interruptible(&genpd->mlock);
0066 }
0067
0068 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
0069 {
0070 return mutex_unlock(&genpd->mlock);
0071 }
0072
0073 static const struct genpd_lock_ops genpd_mtx_ops = {
0074 .lock = genpd_lock_mtx,
0075 .lock_nested = genpd_lock_nested_mtx,
0076 .lock_interruptible = genpd_lock_interruptible_mtx,
0077 .unlock = genpd_unlock_mtx,
0078 };
0079
0080 static void genpd_lock_spin(struct generic_pm_domain *genpd)
0081 __acquires(&genpd->slock)
0082 {
0083 unsigned long flags;
0084
0085 spin_lock_irqsave(&genpd->slock, flags);
0086 genpd->lock_flags = flags;
0087 }
0088
0089 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
0090 int depth)
0091 __acquires(&genpd->slock)
0092 {
0093 unsigned long flags;
0094
0095 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
0096 genpd->lock_flags = flags;
0097 }
0098
0099 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
0100 __acquires(&genpd->slock)
0101 {
0102 unsigned long flags;
0103
0104 spin_lock_irqsave(&genpd->slock, flags);
0105 genpd->lock_flags = flags;
0106 return 0;
0107 }
0108
0109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
0110 __releases(&genpd->slock)
0111 {
0112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
0113 }
0114
0115 static const struct genpd_lock_ops genpd_spin_ops = {
0116 .lock = genpd_lock_spin,
0117 .lock_nested = genpd_lock_nested_spin,
0118 .lock_interruptible = genpd_lock_interruptible_spin,
0119 .unlock = genpd_unlock_spin,
0120 };
0121
0122 #define genpd_lock(p) p->lock_ops->lock(p)
0123 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
0124 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
0125 #define genpd_unlock(p) p->lock_ops->unlock(p)
0126
0127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
0128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
0129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
0130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
0131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
0132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
0133
0134 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
0135 const struct generic_pm_domain *genpd)
0136 {
0137 bool ret;
0138
0139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
0140
0141
0142
0143
0144
0145
0146 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
0147 return ret;
0148
0149 if (ret)
0150 dev_warn_once(dev, "PM domain %s will not be powered off\n",
0151 genpd->name);
0152
0153 return ret;
0154 }
0155
0156 static int genpd_runtime_suspend(struct device *dev);
0157
0158
0159
0160
0161
0162
0163
0164 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
0165 {
0166 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
0167 return NULL;
0168
0169
0170 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
0171 return pd_to_genpd(dev->pm_domain);
0172
0173 return NULL;
0174 }
0175
0176
0177
0178
0179
0180 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
0181 {
0182 if (IS_ERR_OR_NULL(dev->pm_domain))
0183 return ERR_PTR(-EINVAL);
0184
0185 return pd_to_genpd(dev->pm_domain);
0186 }
0187
0188 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
0189 struct device *dev)
0190 {
0191 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
0192 }
0193
0194 static int genpd_start_dev(const struct generic_pm_domain *genpd,
0195 struct device *dev)
0196 {
0197 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
0198 }
0199
0200 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
0201 {
0202 bool ret = false;
0203
0204 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
0205 ret = !!atomic_dec_and_test(&genpd->sd_count);
0206
0207 return ret;
0208 }
0209
0210 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
0211 {
0212 atomic_inc(&genpd->sd_count);
0213 smp_mb__after_atomic();
0214 }
0215
0216 #ifdef CONFIG_DEBUG_FS
0217 static struct dentry *genpd_debugfs_dir;
0218
0219 static void genpd_debug_add(struct generic_pm_domain *genpd);
0220
0221 static void genpd_debug_remove(struct generic_pm_domain *genpd)
0222 {
0223 struct dentry *d;
0224
0225 if (!genpd_debugfs_dir)
0226 return;
0227
0228 d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
0229 debugfs_remove(d);
0230 }
0231
0232 static void genpd_update_accounting(struct generic_pm_domain *genpd)
0233 {
0234 u64 delta, now;
0235
0236 now = ktime_get_mono_fast_ns();
0237 if (now <= genpd->accounting_time)
0238 return;
0239
0240 delta = now - genpd->accounting_time;
0241
0242
0243
0244
0245
0246
0247 if (genpd->status == GENPD_STATE_ON)
0248 genpd->states[genpd->state_idx].idle_time += delta;
0249 else
0250 genpd->on_time += delta;
0251
0252 genpd->accounting_time = now;
0253 }
0254 #else
0255 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
0256 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
0257 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
0258 #endif
0259
0260 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
0261 unsigned int state)
0262 {
0263 struct generic_pm_domain_data *pd_data;
0264 struct pm_domain_data *pdd;
0265 struct gpd_link *link;
0266
0267
0268 if (state == genpd->performance_state)
0269 return state;
0270
0271
0272 if (state > genpd->performance_state)
0273 return state;
0274
0275
0276 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
0277 pd_data = to_gpd_data(pdd);
0278
0279 if (pd_data->performance_state > state)
0280 state = pd_data->performance_state;
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 list_for_each_entry(link, &genpd->parent_links, parent_node) {
0298 if (link->performance_state > state)
0299 state = link->performance_state;
0300 }
0301
0302 return state;
0303 }
0304
0305 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
0306 struct generic_pm_domain *parent,
0307 unsigned int pstate)
0308 {
0309 if (!parent->set_performance_state)
0310 return pstate;
0311
0312 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
0313 parent->opp_table,
0314 pstate);
0315 }
0316
0317 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
0318 unsigned int state, int depth)
0319 {
0320 struct generic_pm_domain *parent;
0321 struct gpd_link *link;
0322 int parent_state, ret;
0323
0324 if (state == genpd->performance_state)
0325 return 0;
0326
0327
0328 list_for_each_entry(link, &genpd->child_links, child_node) {
0329 parent = link->parent;
0330
0331
0332 ret = genpd_xlate_performance_state(genpd, parent, state);
0333 if (unlikely(ret < 0))
0334 goto err;
0335
0336 parent_state = ret;
0337
0338 genpd_lock_nested(parent, depth + 1);
0339
0340 link->prev_performance_state = link->performance_state;
0341 link->performance_state = parent_state;
0342 parent_state = _genpd_reeval_performance_state(parent,
0343 parent_state);
0344 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
0345 if (ret)
0346 link->performance_state = link->prev_performance_state;
0347
0348 genpd_unlock(parent);
0349
0350 if (ret)
0351 goto err;
0352 }
0353
0354 if (genpd->set_performance_state) {
0355 ret = genpd->set_performance_state(genpd, state);
0356 if (ret)
0357 goto err;
0358 }
0359
0360 genpd->performance_state = state;
0361 return 0;
0362
0363 err:
0364
0365 list_for_each_entry_continue_reverse(link, &genpd->child_links,
0366 child_node) {
0367 parent = link->parent;
0368
0369 genpd_lock_nested(parent, depth + 1);
0370
0371 parent_state = link->prev_performance_state;
0372 link->performance_state = parent_state;
0373
0374 parent_state = _genpd_reeval_performance_state(parent,
0375 parent_state);
0376 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
0377 pr_err("%s: Failed to roll back to %d performance state\n",
0378 parent->name, parent_state);
0379 }
0380
0381 genpd_unlock(parent);
0382 }
0383
0384 return ret;
0385 }
0386
0387 static int genpd_set_performance_state(struct device *dev, unsigned int state)
0388 {
0389 struct generic_pm_domain *genpd = dev_to_genpd(dev);
0390 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
0391 unsigned int prev_state;
0392 int ret;
0393
0394 prev_state = gpd_data->performance_state;
0395 if (prev_state == state)
0396 return 0;
0397
0398 gpd_data->performance_state = state;
0399 state = _genpd_reeval_performance_state(genpd, state);
0400
0401 ret = _genpd_set_performance_state(genpd, state, 0);
0402 if (ret)
0403 gpd_data->performance_state = prev_state;
0404
0405 return ret;
0406 }
0407
0408 static int genpd_drop_performance_state(struct device *dev)
0409 {
0410 unsigned int prev_state = dev_gpd_data(dev)->performance_state;
0411
0412 if (!genpd_set_performance_state(dev, 0))
0413 return prev_state;
0414
0415 return 0;
0416 }
0417
0418 static void genpd_restore_performance_state(struct device *dev,
0419 unsigned int state)
0420 {
0421 if (state)
0422 genpd_set_performance_state(dev, state);
0423 }
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
0441 {
0442 struct generic_pm_domain *genpd;
0443 int ret = 0;
0444
0445 genpd = dev_to_genpd_safe(dev);
0446 if (!genpd)
0447 return -ENODEV;
0448
0449 if (WARN_ON(!dev->power.subsys_data ||
0450 !dev->power.subsys_data->domain_data))
0451 return -EINVAL;
0452
0453 genpd_lock(genpd);
0454 if (pm_runtime_suspended(dev)) {
0455 dev_gpd_data(dev)->rpm_pstate = state;
0456 } else {
0457 ret = genpd_set_performance_state(dev, state);
0458 if (!ret)
0459 dev_gpd_data(dev)->rpm_pstate = 0;
0460 }
0461 genpd_unlock(genpd);
0462
0463 return ret;
0464 }
0465 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
0483 {
0484 struct generic_pm_domain *genpd;
0485 struct gpd_timing_data *td;
0486
0487 genpd = dev_to_genpd_safe(dev);
0488 if (!genpd)
0489 return;
0490
0491 td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
0492 if (td)
0493 td->next_wakeup = next;
0494 }
0495 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
0496
0497 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
0498 {
0499 unsigned int state_idx = genpd->state_idx;
0500 ktime_t time_start;
0501 s64 elapsed_ns;
0502 int ret;
0503
0504
0505 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
0506 GENPD_NOTIFY_PRE_ON,
0507 GENPD_NOTIFY_OFF, NULL);
0508 ret = notifier_to_errno(ret);
0509 if (ret)
0510 return ret;
0511
0512 if (!genpd->power_on)
0513 goto out;
0514
0515 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
0516 if (!timed) {
0517 ret = genpd->power_on(genpd);
0518 if (ret)
0519 goto err;
0520
0521 goto out;
0522 }
0523
0524 time_start = ktime_get();
0525 ret = genpd->power_on(genpd);
0526 if (ret)
0527 goto err;
0528
0529 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
0530 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
0531 goto out;
0532
0533 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
0534 genpd->gd->max_off_time_changed = true;
0535 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
0536 genpd->name, "on", elapsed_ns);
0537
0538 out:
0539 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
0540 return 0;
0541 err:
0542 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
0543 NULL);
0544 return ret;
0545 }
0546
0547 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
0548 {
0549 unsigned int state_idx = genpd->state_idx;
0550 ktime_t time_start;
0551 s64 elapsed_ns;
0552 int ret;
0553
0554
0555 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
0556 GENPD_NOTIFY_PRE_OFF,
0557 GENPD_NOTIFY_ON, NULL);
0558 ret = notifier_to_errno(ret);
0559 if (ret)
0560 return ret;
0561
0562 if (!genpd->power_off)
0563 goto out;
0564
0565 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
0566 if (!timed) {
0567 ret = genpd->power_off(genpd);
0568 if (ret)
0569 goto busy;
0570
0571 goto out;
0572 }
0573
0574 time_start = ktime_get();
0575 ret = genpd->power_off(genpd);
0576 if (ret)
0577 goto busy;
0578
0579 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
0580 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
0581 goto out;
0582
0583 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
0584 genpd->gd->max_off_time_changed = true;
0585 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
0586 genpd->name, "off", elapsed_ns);
0587
0588 out:
0589 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
0590 NULL);
0591 return 0;
0592 busy:
0593 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
0594 return ret;
0595 }
0596
0597
0598
0599
0600
0601
0602
0603
0604 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
0605 {
0606 queue_work(pm_wq, &genpd->power_off_work);
0607 }
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
0622 unsigned int depth)
0623 {
0624 struct pm_domain_data *pdd;
0625 struct gpd_link *link;
0626 unsigned int not_suspended = 0;
0627 int ret;
0628
0629
0630
0631
0632
0633
0634 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
0635 return 0;
0636
0637
0638
0639
0640
0641
0642 if (genpd_is_always_on(genpd) ||
0643 genpd_is_rpm_always_on(genpd) ||
0644 atomic_read(&genpd->sd_count) > 0)
0645 return -EBUSY;
0646
0647
0648
0649
0650
0651
0652
0653 list_for_each_entry(link, &genpd->parent_links, parent_node) {
0654 struct generic_pm_domain *child = link->child;
0655 if (child->state_idx < child->state_count - 1)
0656 return -EBUSY;
0657 }
0658
0659 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
0660
0661
0662
0663
0664 if (!pm_runtime_suspended(pdd->dev) ||
0665 irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
0666 not_suspended++;
0667 }
0668
0669 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
0670 return -EBUSY;
0671
0672 if (genpd->gov && genpd->gov->power_down_ok) {
0673 if (!genpd->gov->power_down_ok(&genpd->domain))
0674 return -EAGAIN;
0675 }
0676
0677
0678 if (!genpd->gov)
0679 genpd->state_idx = 0;
0680
0681
0682 if (atomic_read(&genpd->sd_count) > 0)
0683 return -EBUSY;
0684
0685 ret = _genpd_power_off(genpd, true);
0686 if (ret) {
0687 genpd->states[genpd->state_idx].rejected++;
0688 return ret;
0689 }
0690
0691 genpd->status = GENPD_STATE_OFF;
0692 genpd_update_accounting(genpd);
0693 genpd->states[genpd->state_idx].usage++;
0694
0695 list_for_each_entry(link, &genpd->child_links, child_node) {
0696 genpd_sd_counter_dec(link->parent);
0697 genpd_lock_nested(link->parent, depth + 1);
0698 genpd_power_off(link->parent, false, depth + 1);
0699 genpd_unlock(link->parent);
0700 }
0701
0702 return 0;
0703 }
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
0714 {
0715 struct gpd_link *link;
0716 int ret = 0;
0717
0718 if (genpd_status_on(genpd))
0719 return 0;
0720
0721
0722
0723
0724
0725
0726 list_for_each_entry(link, &genpd->child_links, child_node) {
0727 struct generic_pm_domain *parent = link->parent;
0728
0729 genpd_sd_counter_inc(parent);
0730
0731 genpd_lock_nested(parent, depth + 1);
0732 ret = genpd_power_on(parent, depth + 1);
0733 genpd_unlock(parent);
0734
0735 if (ret) {
0736 genpd_sd_counter_dec(parent);
0737 goto err;
0738 }
0739 }
0740
0741 ret = _genpd_power_on(genpd, true);
0742 if (ret)
0743 goto err;
0744
0745 genpd->status = GENPD_STATE_ON;
0746 genpd_update_accounting(genpd);
0747
0748 return 0;
0749
0750 err:
0751 list_for_each_entry_continue_reverse(link,
0752 &genpd->child_links,
0753 child_node) {
0754 genpd_sd_counter_dec(link->parent);
0755 genpd_lock_nested(link->parent, depth + 1);
0756 genpd_power_off(link->parent, false, depth + 1);
0757 genpd_unlock(link->parent);
0758 }
0759
0760 return ret;
0761 }
0762
0763 static int genpd_dev_pm_start(struct device *dev)
0764 {
0765 struct generic_pm_domain *genpd = dev_to_genpd(dev);
0766
0767 return genpd_start_dev(genpd, dev);
0768 }
0769
0770 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
0771 unsigned long val, void *ptr)
0772 {
0773 struct generic_pm_domain_data *gpd_data;
0774 struct device *dev;
0775
0776 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
0777 dev = gpd_data->base.dev;
0778
0779 for (;;) {
0780 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
0781 struct pm_domain_data *pdd;
0782 struct gpd_timing_data *td;
0783
0784 spin_lock_irq(&dev->power.lock);
0785
0786 pdd = dev->power.subsys_data ?
0787 dev->power.subsys_data->domain_data : NULL;
0788 if (pdd) {
0789 td = to_gpd_data(pdd)->td;
0790 if (td) {
0791 td->constraint_changed = true;
0792 genpd = dev_to_genpd(dev);
0793 }
0794 }
0795
0796 spin_unlock_irq(&dev->power.lock);
0797
0798 if (!IS_ERR(genpd)) {
0799 genpd_lock(genpd);
0800 genpd->gd->max_off_time_changed = true;
0801 genpd_unlock(genpd);
0802 }
0803
0804 dev = dev->parent;
0805 if (!dev || dev->power.ignore_children)
0806 break;
0807 }
0808
0809 return NOTIFY_DONE;
0810 }
0811
0812
0813
0814
0815
0816 static void genpd_power_off_work_fn(struct work_struct *work)
0817 {
0818 struct generic_pm_domain *genpd;
0819
0820 genpd = container_of(work, struct generic_pm_domain, power_off_work);
0821
0822 genpd_lock(genpd);
0823 genpd_power_off(genpd, false, 0);
0824 genpd_unlock(genpd);
0825 }
0826
0827
0828
0829
0830
0831 static int __genpd_runtime_suspend(struct device *dev)
0832 {
0833 int (*cb)(struct device *__dev);
0834
0835 if (dev->type && dev->type->pm)
0836 cb = dev->type->pm->runtime_suspend;
0837 else if (dev->class && dev->class->pm)
0838 cb = dev->class->pm->runtime_suspend;
0839 else if (dev->bus && dev->bus->pm)
0840 cb = dev->bus->pm->runtime_suspend;
0841 else
0842 cb = NULL;
0843
0844 if (!cb && dev->driver && dev->driver->pm)
0845 cb = dev->driver->pm->runtime_suspend;
0846
0847 return cb ? cb(dev) : 0;
0848 }
0849
0850
0851
0852
0853
0854 static int __genpd_runtime_resume(struct device *dev)
0855 {
0856 int (*cb)(struct device *__dev);
0857
0858 if (dev->type && dev->type->pm)
0859 cb = dev->type->pm->runtime_resume;
0860 else if (dev->class && dev->class->pm)
0861 cb = dev->class->pm->runtime_resume;
0862 else if (dev->bus && dev->bus->pm)
0863 cb = dev->bus->pm->runtime_resume;
0864 else
0865 cb = NULL;
0866
0867 if (!cb && dev->driver && dev->driver->pm)
0868 cb = dev->driver->pm->runtime_resume;
0869
0870 return cb ? cb(dev) : 0;
0871 }
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881 static int genpd_runtime_suspend(struct device *dev)
0882 {
0883 struct generic_pm_domain *genpd;
0884 bool (*suspend_ok)(struct device *__dev);
0885 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
0886 struct gpd_timing_data *td = gpd_data->td;
0887 bool runtime_pm = pm_runtime_enabled(dev);
0888 ktime_t time_start = 0;
0889 s64 elapsed_ns;
0890 int ret;
0891
0892 dev_dbg(dev, "%s()\n", __func__);
0893
0894 genpd = dev_to_genpd(dev);
0895 if (IS_ERR(genpd))
0896 return -EINVAL;
0897
0898
0899
0900
0901
0902
0903
0904 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
0905 if (runtime_pm && suspend_ok && !suspend_ok(dev))
0906 return -EBUSY;
0907
0908
0909 if (td && runtime_pm)
0910 time_start = ktime_get();
0911
0912 ret = __genpd_runtime_suspend(dev);
0913 if (ret)
0914 return ret;
0915
0916 ret = genpd_stop_dev(genpd, dev);
0917 if (ret) {
0918 __genpd_runtime_resume(dev);
0919 return ret;
0920 }
0921
0922
0923 if (td && runtime_pm) {
0924 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
0925 if (elapsed_ns > td->suspend_latency_ns) {
0926 td->suspend_latency_ns = elapsed_ns;
0927 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
0928 elapsed_ns);
0929 genpd->gd->max_off_time_changed = true;
0930 td->constraint_changed = true;
0931 }
0932 }
0933
0934
0935
0936
0937
0938 if (irq_safe_dev_in_sleep_domain(dev, genpd))
0939 return 0;
0940
0941 genpd_lock(genpd);
0942 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
0943 genpd_power_off(genpd, true, 0);
0944 genpd_unlock(genpd);
0945
0946 return 0;
0947 }
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957 static int genpd_runtime_resume(struct device *dev)
0958 {
0959 struct generic_pm_domain *genpd;
0960 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
0961 struct gpd_timing_data *td = gpd_data->td;
0962 bool timed = td && pm_runtime_enabled(dev);
0963 ktime_t time_start = 0;
0964 s64 elapsed_ns;
0965 int ret;
0966
0967 dev_dbg(dev, "%s()\n", __func__);
0968
0969 genpd = dev_to_genpd(dev);
0970 if (IS_ERR(genpd))
0971 return -EINVAL;
0972
0973
0974
0975
0976
0977 if (irq_safe_dev_in_sleep_domain(dev, genpd))
0978 goto out;
0979
0980 genpd_lock(genpd);
0981 ret = genpd_power_on(genpd, 0);
0982 if (!ret)
0983 genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
0984 genpd_unlock(genpd);
0985
0986 if (ret)
0987 return ret;
0988
0989 out:
0990
0991 if (timed)
0992 time_start = ktime_get();
0993
0994 ret = genpd_start_dev(genpd, dev);
0995 if (ret)
0996 goto err_poweroff;
0997
0998 ret = __genpd_runtime_resume(dev);
0999 if (ret)
1000 goto err_stop;
1001
1002
1003 if (timed) {
1004 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1005 if (elapsed_ns > td->resume_latency_ns) {
1006 td->resume_latency_ns = elapsed_ns;
1007 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1008 elapsed_ns);
1009 genpd->gd->max_off_time_changed = true;
1010 td->constraint_changed = true;
1011 }
1012 }
1013
1014 return 0;
1015
1016 err_stop:
1017 genpd_stop_dev(genpd, dev);
1018 err_poweroff:
1019 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1020 genpd_lock(genpd);
1021 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1022 genpd_power_off(genpd, true, 0);
1023 genpd_unlock(genpd);
1024 }
1025
1026 return ret;
1027 }
1028
1029 static bool pd_ignore_unused;
1030 static int __init pd_ignore_unused_setup(char *__unused)
1031 {
1032 pd_ignore_unused = true;
1033 return 1;
1034 }
1035 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1036
1037
1038
1039
1040 static int __init genpd_power_off_unused(void)
1041 {
1042 struct generic_pm_domain *genpd;
1043
1044 if (pd_ignore_unused) {
1045 pr_warn("genpd: Not disabling unused power domains\n");
1046 return 0;
1047 }
1048
1049 mutex_lock(&gpd_list_lock);
1050
1051 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1052 genpd_queue_power_off_work(genpd);
1053
1054 mutex_unlock(&gpd_list_lock);
1055
1056 return 0;
1057 }
1058 late_initcall(genpd_power_off_unused);
1059
1060 #ifdef CONFIG_PM_SLEEP
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1076 unsigned int depth)
1077 {
1078 struct gpd_link *link;
1079
1080 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1081 return;
1082
1083 if (genpd->suspended_count != genpd->device_count
1084 || atomic_read(&genpd->sd_count) > 0)
1085 return;
1086
1087
1088 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1089 struct generic_pm_domain *child = link->child;
1090 if (child->state_idx < child->state_count - 1)
1091 return;
1092 }
1093
1094
1095 genpd->state_idx = genpd->state_count - 1;
1096 if (_genpd_power_off(genpd, false))
1097 return;
1098
1099 genpd->status = GENPD_STATE_OFF;
1100
1101 list_for_each_entry(link, &genpd->child_links, child_node) {
1102 genpd_sd_counter_dec(link->parent);
1103
1104 if (use_lock)
1105 genpd_lock_nested(link->parent, depth + 1);
1106
1107 genpd_sync_power_off(link->parent, use_lock, depth + 1);
1108
1109 if (use_lock)
1110 genpd_unlock(link->parent);
1111 }
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1125 unsigned int depth)
1126 {
1127 struct gpd_link *link;
1128
1129 if (genpd_status_on(genpd))
1130 return;
1131
1132 list_for_each_entry(link, &genpd->child_links, child_node) {
1133 genpd_sd_counter_inc(link->parent);
1134
1135 if (use_lock)
1136 genpd_lock_nested(link->parent, depth + 1);
1137
1138 genpd_sync_power_on(link->parent, use_lock, depth + 1);
1139
1140 if (use_lock)
1141 genpd_unlock(link->parent);
1142 }
1143
1144 _genpd_power_on(genpd, false);
1145 genpd->status = GENPD_STATE_ON;
1146 }
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 static int genpd_prepare(struct device *dev)
1158 {
1159 struct generic_pm_domain *genpd;
1160 int ret;
1161
1162 dev_dbg(dev, "%s()\n", __func__);
1163
1164 genpd = dev_to_genpd(dev);
1165 if (IS_ERR(genpd))
1166 return -EINVAL;
1167
1168 genpd_lock(genpd);
1169
1170 if (genpd->prepared_count++ == 0)
1171 genpd->suspended_count = 0;
1172
1173 genpd_unlock(genpd);
1174
1175 ret = pm_generic_prepare(dev);
1176 if (ret < 0) {
1177 genpd_lock(genpd);
1178
1179 genpd->prepared_count--;
1180
1181 genpd_unlock(genpd);
1182 }
1183
1184
1185 return ret >= 0 ? 0 : ret;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 static int genpd_finish_suspend(struct device *dev, bool poweroff)
1198 {
1199 struct generic_pm_domain *genpd;
1200 int ret = 0;
1201
1202 genpd = dev_to_genpd(dev);
1203 if (IS_ERR(genpd))
1204 return -EINVAL;
1205
1206 if (poweroff)
1207 ret = pm_generic_poweroff_noirq(dev);
1208 else
1209 ret = pm_generic_suspend_noirq(dev);
1210 if (ret)
1211 return ret;
1212
1213 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1214 return 0;
1215
1216 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1217 !pm_runtime_status_suspended(dev)) {
1218 ret = genpd_stop_dev(genpd, dev);
1219 if (ret) {
1220 if (poweroff)
1221 pm_generic_restore_noirq(dev);
1222 else
1223 pm_generic_resume_noirq(dev);
1224 return ret;
1225 }
1226 }
1227
1228 genpd_lock(genpd);
1229 genpd->suspended_count++;
1230 genpd_sync_power_off(genpd, true, 0);
1231 genpd_unlock(genpd);
1232
1233 return 0;
1234 }
1235
1236
1237
1238
1239
1240
1241
1242
1243 static int genpd_suspend_noirq(struct device *dev)
1244 {
1245 dev_dbg(dev, "%s()\n", __func__);
1246
1247 return genpd_finish_suspend(dev, false);
1248 }
1249
1250
1251
1252
1253
1254
1255
1256 static int genpd_resume_noirq(struct device *dev)
1257 {
1258 struct generic_pm_domain *genpd;
1259 int ret;
1260
1261 dev_dbg(dev, "%s()\n", __func__);
1262
1263 genpd = dev_to_genpd(dev);
1264 if (IS_ERR(genpd))
1265 return -EINVAL;
1266
1267 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1268 return pm_generic_resume_noirq(dev);
1269
1270 genpd_lock(genpd);
1271 genpd_sync_power_on(genpd, true, 0);
1272 genpd->suspended_count--;
1273 genpd_unlock(genpd);
1274
1275 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1276 !pm_runtime_status_suspended(dev)) {
1277 ret = genpd_start_dev(genpd, dev);
1278 if (ret)
1279 return ret;
1280 }
1281
1282 return pm_generic_resume_noirq(dev);
1283 }
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 static int genpd_freeze_noirq(struct device *dev)
1295 {
1296 const struct generic_pm_domain *genpd;
1297 int ret = 0;
1298
1299 dev_dbg(dev, "%s()\n", __func__);
1300
1301 genpd = dev_to_genpd(dev);
1302 if (IS_ERR(genpd))
1303 return -EINVAL;
1304
1305 ret = pm_generic_freeze_noirq(dev);
1306 if (ret)
1307 return ret;
1308
1309 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1310 !pm_runtime_status_suspended(dev))
1311 ret = genpd_stop_dev(genpd, dev);
1312
1313 return ret;
1314 }
1315
1316
1317
1318
1319
1320
1321
1322
1323 static int genpd_thaw_noirq(struct device *dev)
1324 {
1325 const struct generic_pm_domain *genpd;
1326 int ret = 0;
1327
1328 dev_dbg(dev, "%s()\n", __func__);
1329
1330 genpd = dev_to_genpd(dev);
1331 if (IS_ERR(genpd))
1332 return -EINVAL;
1333
1334 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1335 !pm_runtime_status_suspended(dev)) {
1336 ret = genpd_start_dev(genpd, dev);
1337 if (ret)
1338 return ret;
1339 }
1340
1341 return pm_generic_thaw_noirq(dev);
1342 }
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 static int genpd_poweroff_noirq(struct device *dev)
1353 {
1354 dev_dbg(dev, "%s()\n", __func__);
1355
1356 return genpd_finish_suspend(dev, true);
1357 }
1358
1359
1360
1361
1362
1363
1364
1365
1366 static int genpd_restore_noirq(struct device *dev)
1367 {
1368 struct generic_pm_domain *genpd;
1369 int ret = 0;
1370
1371 dev_dbg(dev, "%s()\n", __func__);
1372
1373 genpd = dev_to_genpd(dev);
1374 if (IS_ERR(genpd))
1375 return -EINVAL;
1376
1377
1378
1379
1380
1381 genpd_lock(genpd);
1382 if (genpd->suspended_count++ == 0) {
1383
1384
1385
1386
1387
1388 genpd->status = GENPD_STATE_OFF;
1389 }
1390
1391 genpd_sync_power_on(genpd, true, 0);
1392 genpd_unlock(genpd);
1393
1394 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1395 !pm_runtime_status_suspended(dev)) {
1396 ret = genpd_start_dev(genpd, dev);
1397 if (ret)
1398 return ret;
1399 }
1400
1401 return pm_generic_restore_noirq(dev);
1402 }
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 static void genpd_complete(struct device *dev)
1414 {
1415 struct generic_pm_domain *genpd;
1416
1417 dev_dbg(dev, "%s()\n", __func__);
1418
1419 genpd = dev_to_genpd(dev);
1420 if (IS_ERR(genpd))
1421 return;
1422
1423 pm_generic_complete(dev);
1424
1425 genpd_lock(genpd);
1426
1427 genpd->prepared_count--;
1428 if (!genpd->prepared_count)
1429 genpd_queue_power_off_work(genpd);
1430
1431 genpd_unlock(genpd);
1432 }
1433
1434 static void genpd_switch_state(struct device *dev, bool suspend)
1435 {
1436 struct generic_pm_domain *genpd;
1437 bool use_lock;
1438
1439 genpd = dev_to_genpd_safe(dev);
1440 if (!genpd)
1441 return;
1442
1443 use_lock = genpd_is_irq_safe(genpd);
1444
1445 if (use_lock)
1446 genpd_lock(genpd);
1447
1448 if (suspend) {
1449 genpd->suspended_count++;
1450 genpd_sync_power_off(genpd, use_lock, 0);
1451 } else {
1452 genpd_sync_power_on(genpd, use_lock, 0);
1453 genpd->suspended_count--;
1454 }
1455
1456 if (use_lock)
1457 genpd_unlock(genpd);
1458 }
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 void dev_pm_genpd_suspend(struct device *dev)
1470 {
1471 genpd_switch_state(dev, true);
1472 }
1473 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 void dev_pm_genpd_resume(struct device *dev)
1484 {
1485 genpd_switch_state(dev, false);
1486 }
1487 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1488
1489 #else
1490
1491 #define genpd_prepare NULL
1492 #define genpd_suspend_noirq NULL
1493 #define genpd_resume_noirq NULL
1494 #define genpd_freeze_noirq NULL
1495 #define genpd_thaw_noirq NULL
1496 #define genpd_poweroff_noirq NULL
1497 #define genpd_restore_noirq NULL
1498 #define genpd_complete NULL
1499
1500 #endif
1501
1502 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1503 bool has_governor)
1504 {
1505 struct generic_pm_domain_data *gpd_data;
1506 struct gpd_timing_data *td;
1507 int ret;
1508
1509 ret = dev_pm_get_subsys_data(dev);
1510 if (ret)
1511 return ERR_PTR(ret);
1512
1513 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1514 if (!gpd_data) {
1515 ret = -ENOMEM;
1516 goto err_put;
1517 }
1518
1519 gpd_data->base.dev = dev;
1520 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1521
1522
1523 if (has_governor) {
1524 td = kzalloc(sizeof(*td), GFP_KERNEL);
1525 if (!td) {
1526 ret = -ENOMEM;
1527 goto err_free;
1528 }
1529
1530 td->constraint_changed = true;
1531 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1532 td->next_wakeup = KTIME_MAX;
1533 gpd_data->td = td;
1534 }
1535
1536 spin_lock_irq(&dev->power.lock);
1537
1538 if (dev->power.subsys_data->domain_data)
1539 ret = -EINVAL;
1540 else
1541 dev->power.subsys_data->domain_data = &gpd_data->base;
1542
1543 spin_unlock_irq(&dev->power.lock);
1544
1545 if (ret)
1546 goto err_free;
1547
1548 return gpd_data;
1549
1550 err_free:
1551 kfree(gpd_data->td);
1552 kfree(gpd_data);
1553 err_put:
1554 dev_pm_put_subsys_data(dev);
1555 return ERR_PTR(ret);
1556 }
1557
1558 static void genpd_free_dev_data(struct device *dev,
1559 struct generic_pm_domain_data *gpd_data)
1560 {
1561 spin_lock_irq(&dev->power.lock);
1562
1563 dev->power.subsys_data->domain_data = NULL;
1564
1565 spin_unlock_irq(&dev->power.lock);
1566
1567 kfree(gpd_data->td);
1568 kfree(gpd_data);
1569 dev_pm_put_subsys_data(dev);
1570 }
1571
1572 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1573 int cpu, bool set, unsigned int depth)
1574 {
1575 struct gpd_link *link;
1576
1577 if (!genpd_is_cpu_domain(genpd))
1578 return;
1579
1580 list_for_each_entry(link, &genpd->child_links, child_node) {
1581 struct generic_pm_domain *parent = link->parent;
1582
1583 genpd_lock_nested(parent, depth + 1);
1584 genpd_update_cpumask(parent, cpu, set, depth + 1);
1585 genpd_unlock(parent);
1586 }
1587
1588 if (set)
1589 cpumask_set_cpu(cpu, genpd->cpus);
1590 else
1591 cpumask_clear_cpu(cpu, genpd->cpus);
1592 }
1593
1594 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1595 {
1596 if (cpu >= 0)
1597 genpd_update_cpumask(genpd, cpu, true, 0);
1598 }
1599
1600 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1601 {
1602 if (cpu >= 0)
1603 genpd_update_cpumask(genpd, cpu, false, 0);
1604 }
1605
1606 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1607 {
1608 int cpu;
1609
1610 if (!genpd_is_cpu_domain(genpd))
1611 return -1;
1612
1613 for_each_possible_cpu(cpu) {
1614 if (get_cpu_device(cpu) == dev)
1615 return cpu;
1616 }
1617
1618 return -1;
1619 }
1620
1621 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1622 struct device *base_dev)
1623 {
1624 struct genpd_governor_data *gd = genpd->gd;
1625 struct generic_pm_domain_data *gpd_data;
1626 int ret;
1627
1628 dev_dbg(dev, "%s()\n", __func__);
1629
1630 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1631 return -EINVAL;
1632
1633 gpd_data = genpd_alloc_dev_data(dev, gd);
1634 if (IS_ERR(gpd_data))
1635 return PTR_ERR(gpd_data);
1636
1637 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1638
1639 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1640 if (ret)
1641 goto out;
1642
1643 genpd_lock(genpd);
1644
1645 genpd_set_cpumask(genpd, gpd_data->cpu);
1646 dev_pm_domain_set(dev, &genpd->domain);
1647
1648 genpd->device_count++;
1649 if (gd)
1650 gd->max_off_time_changed = true;
1651
1652 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1653
1654 genpd_unlock(genpd);
1655 out:
1656 if (ret)
1657 genpd_free_dev_data(dev, gpd_data);
1658 else
1659 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1660 DEV_PM_QOS_RESUME_LATENCY);
1661
1662 return ret;
1663 }
1664
1665
1666
1667
1668
1669
1670 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1671 {
1672 int ret;
1673
1674 mutex_lock(&gpd_list_lock);
1675 ret = genpd_add_device(genpd, dev, dev);
1676 mutex_unlock(&gpd_list_lock);
1677
1678 return ret;
1679 }
1680 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1681
1682 static int genpd_remove_device(struct generic_pm_domain *genpd,
1683 struct device *dev)
1684 {
1685 struct generic_pm_domain_data *gpd_data;
1686 struct pm_domain_data *pdd;
1687 int ret = 0;
1688
1689 dev_dbg(dev, "%s()\n", __func__);
1690
1691 pdd = dev->power.subsys_data->domain_data;
1692 gpd_data = to_gpd_data(pdd);
1693 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1694 DEV_PM_QOS_RESUME_LATENCY);
1695
1696 genpd_lock(genpd);
1697
1698 if (genpd->prepared_count > 0) {
1699 ret = -EAGAIN;
1700 goto out;
1701 }
1702
1703 genpd->device_count--;
1704 if (genpd->gd)
1705 genpd->gd->max_off_time_changed = true;
1706
1707 genpd_clear_cpumask(genpd, gpd_data->cpu);
1708 dev_pm_domain_set(dev, NULL);
1709
1710 list_del_init(&pdd->list_node);
1711
1712 genpd_unlock(genpd);
1713
1714 if (genpd->detach_dev)
1715 genpd->detach_dev(genpd, dev);
1716
1717 genpd_free_dev_data(dev, gpd_data);
1718
1719 return 0;
1720
1721 out:
1722 genpd_unlock(genpd);
1723 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1724
1725 return ret;
1726 }
1727
1728
1729
1730
1731
1732 int pm_genpd_remove_device(struct device *dev)
1733 {
1734 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1735
1736 if (!genpd)
1737 return -EINVAL;
1738
1739 return genpd_remove_device(genpd, dev);
1740 }
1741 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1759 {
1760 struct generic_pm_domain *genpd;
1761 struct generic_pm_domain_data *gpd_data;
1762 int ret;
1763
1764 genpd = dev_to_genpd_safe(dev);
1765 if (!genpd)
1766 return -ENODEV;
1767
1768 if (WARN_ON(!dev->power.subsys_data ||
1769 !dev->power.subsys_data->domain_data))
1770 return -EINVAL;
1771
1772 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1773 if (gpd_data->power_nb)
1774 return -EEXIST;
1775
1776 genpd_lock(genpd);
1777 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1778 genpd_unlock(genpd);
1779
1780 if (ret) {
1781 dev_warn(dev, "failed to add notifier for PM domain %s\n",
1782 genpd->name);
1783 return ret;
1784 }
1785
1786 gpd_data->power_nb = nb;
1787 return 0;
1788 }
1789 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 int dev_pm_genpd_remove_notifier(struct device *dev)
1805 {
1806 struct generic_pm_domain *genpd;
1807 struct generic_pm_domain_data *gpd_data;
1808 int ret;
1809
1810 genpd = dev_to_genpd_safe(dev);
1811 if (!genpd)
1812 return -ENODEV;
1813
1814 if (WARN_ON(!dev->power.subsys_data ||
1815 !dev->power.subsys_data->domain_data))
1816 return -EINVAL;
1817
1818 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1819 if (!gpd_data->power_nb)
1820 return -ENODEV;
1821
1822 genpd_lock(genpd);
1823 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1824 gpd_data->power_nb);
1825 genpd_unlock(genpd);
1826
1827 if (ret) {
1828 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1829 genpd->name);
1830 return ret;
1831 }
1832
1833 gpd_data->power_nb = NULL;
1834 return 0;
1835 }
1836 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1837
1838 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1839 struct generic_pm_domain *subdomain)
1840 {
1841 struct gpd_link *link, *itr;
1842 int ret = 0;
1843
1844 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1845 || genpd == subdomain)
1846 return -EINVAL;
1847
1848
1849
1850
1851
1852
1853 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1854 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1855 genpd->name, subdomain->name);
1856 return -EINVAL;
1857 }
1858
1859 link = kzalloc(sizeof(*link), GFP_KERNEL);
1860 if (!link)
1861 return -ENOMEM;
1862
1863 genpd_lock(subdomain);
1864 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1865
1866 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1867 ret = -EINVAL;
1868 goto out;
1869 }
1870
1871 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1872 if (itr->child == subdomain && itr->parent == genpd) {
1873 ret = -EINVAL;
1874 goto out;
1875 }
1876 }
1877
1878 link->parent = genpd;
1879 list_add_tail(&link->parent_node, &genpd->parent_links);
1880 link->child = subdomain;
1881 list_add_tail(&link->child_node, &subdomain->child_links);
1882 if (genpd_status_on(subdomain))
1883 genpd_sd_counter_inc(genpd);
1884
1885 out:
1886 genpd_unlock(genpd);
1887 genpd_unlock(subdomain);
1888 if (ret)
1889 kfree(link);
1890 return ret;
1891 }
1892
1893
1894
1895
1896
1897
1898 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1899 struct generic_pm_domain *subdomain)
1900 {
1901 int ret;
1902
1903 mutex_lock(&gpd_list_lock);
1904 ret = genpd_add_subdomain(genpd, subdomain);
1905 mutex_unlock(&gpd_list_lock);
1906
1907 return ret;
1908 }
1909 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1910
1911
1912
1913
1914
1915
1916 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1917 struct generic_pm_domain *subdomain)
1918 {
1919 struct gpd_link *l, *link;
1920 int ret = -EINVAL;
1921
1922 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1923 return -EINVAL;
1924
1925 genpd_lock(subdomain);
1926 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1927
1928 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1929 pr_warn("%s: unable to remove subdomain %s\n",
1930 genpd->name, subdomain->name);
1931 ret = -EBUSY;
1932 goto out;
1933 }
1934
1935 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1936 if (link->child != subdomain)
1937 continue;
1938
1939 list_del(&link->parent_node);
1940 list_del(&link->child_node);
1941 kfree(link);
1942 if (genpd_status_on(subdomain))
1943 genpd_sd_counter_dec(genpd);
1944
1945 ret = 0;
1946 break;
1947 }
1948
1949 out:
1950 genpd_unlock(genpd);
1951 genpd_unlock(subdomain);
1952
1953 return ret;
1954 }
1955 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1956
1957 static void genpd_free_default_power_state(struct genpd_power_state *states,
1958 unsigned int state_count)
1959 {
1960 kfree(states);
1961 }
1962
1963 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1964 {
1965 struct genpd_power_state *state;
1966
1967 state = kzalloc(sizeof(*state), GFP_KERNEL);
1968 if (!state)
1969 return -ENOMEM;
1970
1971 genpd->states = state;
1972 genpd->state_count = 1;
1973 genpd->free_states = genpd_free_default_power_state;
1974
1975 return 0;
1976 }
1977
1978 static int genpd_alloc_data(struct generic_pm_domain *genpd)
1979 {
1980 struct genpd_governor_data *gd = NULL;
1981 int ret;
1982
1983 if (genpd_is_cpu_domain(genpd) &&
1984 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1985 return -ENOMEM;
1986
1987 if (genpd->gov) {
1988 gd = kzalloc(sizeof(*gd), GFP_KERNEL);
1989 if (!gd) {
1990 ret = -ENOMEM;
1991 goto free;
1992 }
1993
1994 gd->max_off_time_ns = -1;
1995 gd->max_off_time_changed = true;
1996 gd->next_wakeup = KTIME_MAX;
1997 }
1998
1999
2000 if (genpd->state_count == 0) {
2001 ret = genpd_set_default_power_state(genpd);
2002 if (ret)
2003 goto free;
2004 }
2005
2006 genpd->gd = gd;
2007 return 0;
2008
2009 free:
2010 if (genpd_is_cpu_domain(genpd))
2011 free_cpumask_var(genpd->cpus);
2012 kfree(gd);
2013 return ret;
2014 }
2015
2016 static void genpd_free_data(struct generic_pm_domain *genpd)
2017 {
2018 if (genpd_is_cpu_domain(genpd))
2019 free_cpumask_var(genpd->cpus);
2020 if (genpd->free_states)
2021 genpd->free_states(genpd->states, genpd->state_count);
2022 kfree(genpd->gd);
2023 }
2024
2025 static void genpd_lock_init(struct generic_pm_domain *genpd)
2026 {
2027 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2028 spin_lock_init(&genpd->slock);
2029 genpd->lock_ops = &genpd_spin_ops;
2030 } else {
2031 mutex_init(&genpd->mlock);
2032 genpd->lock_ops = &genpd_mtx_ops;
2033 }
2034 }
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044 int pm_genpd_init(struct generic_pm_domain *genpd,
2045 struct dev_power_governor *gov, bool is_off)
2046 {
2047 int ret;
2048
2049 if (IS_ERR_OR_NULL(genpd))
2050 return -EINVAL;
2051
2052 INIT_LIST_HEAD(&genpd->parent_links);
2053 INIT_LIST_HEAD(&genpd->child_links);
2054 INIT_LIST_HEAD(&genpd->dev_list);
2055 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2056 genpd_lock_init(genpd);
2057 genpd->gov = gov;
2058 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2059 atomic_set(&genpd->sd_count, 0);
2060 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2061 genpd->device_count = 0;
2062 genpd->provider = NULL;
2063 genpd->has_provider = false;
2064 genpd->accounting_time = ktime_get_mono_fast_ns();
2065 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2066 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2067 genpd->domain.ops.prepare = genpd_prepare;
2068 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2069 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2070 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2071 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2072 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2073 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2074 genpd->domain.ops.complete = genpd_complete;
2075 genpd->domain.start = genpd_dev_pm_start;
2076
2077 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2078 genpd->dev_ops.stop = pm_clk_suspend;
2079 genpd->dev_ops.start = pm_clk_resume;
2080 }
2081
2082
2083 if (gov == &pm_domain_always_on_gov)
2084 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2085
2086
2087 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2088 !genpd_status_on(genpd))
2089 return -EINVAL;
2090
2091
2092 if (!gov && genpd->state_count > 1)
2093 pr_warn("%s: no governor for states\n", genpd->name);
2094
2095 ret = genpd_alloc_data(genpd);
2096 if (ret)
2097 return ret;
2098
2099 device_initialize(&genpd->dev);
2100 dev_set_name(&genpd->dev, "%s", genpd->name);
2101
2102 mutex_lock(&gpd_list_lock);
2103 list_add(&genpd->gpd_list_node, &gpd_list);
2104 mutex_unlock(&gpd_list_lock);
2105 genpd_debug_add(genpd);
2106
2107 return 0;
2108 }
2109 EXPORT_SYMBOL_GPL(pm_genpd_init);
2110
2111 static int genpd_remove(struct generic_pm_domain *genpd)
2112 {
2113 struct gpd_link *l, *link;
2114
2115 if (IS_ERR_OR_NULL(genpd))
2116 return -EINVAL;
2117
2118 genpd_lock(genpd);
2119
2120 if (genpd->has_provider) {
2121 genpd_unlock(genpd);
2122 pr_err("Provider present, unable to remove %s\n", genpd->name);
2123 return -EBUSY;
2124 }
2125
2126 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2127 genpd_unlock(genpd);
2128 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2129 return -EBUSY;
2130 }
2131
2132 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2133 list_del(&link->parent_node);
2134 list_del(&link->child_node);
2135 kfree(link);
2136 }
2137
2138 list_del(&genpd->gpd_list_node);
2139 genpd_unlock(genpd);
2140 genpd_debug_remove(genpd);
2141 cancel_work_sync(&genpd->power_off_work);
2142 genpd_free_data(genpd);
2143
2144 pr_debug("%s: removed %s\n", __func__, genpd->name);
2145
2146 return 0;
2147 }
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162 int pm_genpd_remove(struct generic_pm_domain *genpd)
2163 {
2164 int ret;
2165
2166 mutex_lock(&gpd_list_lock);
2167 ret = genpd_remove(genpd);
2168 mutex_unlock(&gpd_list_lock);
2169
2170 return ret;
2171 }
2172 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2173
2174 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200 struct of_genpd_provider {
2201 struct list_head link;
2202 struct device_node *node;
2203 genpd_xlate_t xlate;
2204 void *data;
2205 };
2206
2207
2208 static LIST_HEAD(of_genpd_providers);
2209
2210 static DEFINE_MUTEX(of_genpd_mutex);
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 static struct generic_pm_domain *genpd_xlate_simple(
2222 struct of_phandle_args *genpdspec,
2223 void *data)
2224 {
2225 return data;
2226 }
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 static struct generic_pm_domain *genpd_xlate_onecell(
2239 struct of_phandle_args *genpdspec,
2240 void *data)
2241 {
2242 struct genpd_onecell_data *genpd_data = data;
2243 unsigned int idx = genpdspec->args[0];
2244
2245 if (genpdspec->args_count != 1)
2246 return ERR_PTR(-EINVAL);
2247
2248 if (idx >= genpd_data->num_domains) {
2249 pr_err("%s: invalid domain index %u\n", __func__, idx);
2250 return ERR_PTR(-EINVAL);
2251 }
2252
2253 if (!genpd_data->domains[idx])
2254 return ERR_PTR(-ENOENT);
2255
2256 return genpd_data->domains[idx];
2257 }
2258
2259
2260
2261
2262
2263
2264
2265 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2266 void *data)
2267 {
2268 struct of_genpd_provider *cp;
2269
2270 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2271 if (!cp)
2272 return -ENOMEM;
2273
2274 cp->node = of_node_get(np);
2275 cp->data = data;
2276 cp->xlate = xlate;
2277 fwnode_dev_initialized(&np->fwnode, true);
2278
2279 mutex_lock(&of_genpd_mutex);
2280 list_add(&cp->link, &of_genpd_providers);
2281 mutex_unlock(&of_genpd_mutex);
2282 pr_debug("Added domain provider from %pOF\n", np);
2283
2284 return 0;
2285 }
2286
2287 static bool genpd_present(const struct generic_pm_domain *genpd)
2288 {
2289 bool ret = false;
2290 const struct generic_pm_domain *gpd;
2291
2292 mutex_lock(&gpd_list_lock);
2293 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2294 if (gpd == genpd) {
2295 ret = true;
2296 break;
2297 }
2298 }
2299 mutex_unlock(&gpd_list_lock);
2300
2301 return ret;
2302 }
2303
2304
2305
2306
2307
2308
2309 int of_genpd_add_provider_simple(struct device_node *np,
2310 struct generic_pm_domain *genpd)
2311 {
2312 int ret;
2313
2314 if (!np || !genpd)
2315 return -EINVAL;
2316
2317 if (!genpd_present(genpd))
2318 return -EINVAL;
2319
2320 genpd->dev.of_node = np;
2321
2322
2323 if (genpd->set_performance_state) {
2324 ret = dev_pm_opp_of_add_table(&genpd->dev);
2325 if (ret)
2326 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2327
2328
2329
2330
2331
2332 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2333 WARN_ON(IS_ERR(genpd->opp_table));
2334 }
2335
2336 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2337 if (ret) {
2338 if (genpd->set_performance_state) {
2339 dev_pm_opp_put_opp_table(genpd->opp_table);
2340 dev_pm_opp_of_remove_table(&genpd->dev);
2341 }
2342
2343 return ret;
2344 }
2345
2346 genpd->provider = &np->fwnode;
2347 genpd->has_provider = true;
2348
2349 return 0;
2350 }
2351 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2352
2353
2354
2355
2356
2357
2358 int of_genpd_add_provider_onecell(struct device_node *np,
2359 struct genpd_onecell_data *data)
2360 {
2361 struct generic_pm_domain *genpd;
2362 unsigned int i;
2363 int ret = -EINVAL;
2364
2365 if (!np || !data)
2366 return -EINVAL;
2367
2368 if (!data->xlate)
2369 data->xlate = genpd_xlate_onecell;
2370
2371 for (i = 0; i < data->num_domains; i++) {
2372 genpd = data->domains[i];
2373
2374 if (!genpd)
2375 continue;
2376 if (!genpd_present(genpd))
2377 goto error;
2378
2379 genpd->dev.of_node = np;
2380
2381
2382 if (genpd->set_performance_state) {
2383 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2384 if (ret) {
2385 dev_err_probe(&genpd->dev, ret,
2386 "Failed to add OPP table for index %d\n", i);
2387 goto error;
2388 }
2389
2390
2391
2392
2393
2394 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2395 WARN_ON(IS_ERR(genpd->opp_table));
2396 }
2397
2398 genpd->provider = &np->fwnode;
2399 genpd->has_provider = true;
2400 }
2401
2402 ret = genpd_add_provider(np, data->xlate, data);
2403 if (ret < 0)
2404 goto error;
2405
2406 return 0;
2407
2408 error:
2409 while (i--) {
2410 genpd = data->domains[i];
2411
2412 if (!genpd)
2413 continue;
2414
2415 genpd->provider = NULL;
2416 genpd->has_provider = false;
2417
2418 if (genpd->set_performance_state) {
2419 dev_pm_opp_put_opp_table(genpd->opp_table);
2420 dev_pm_opp_of_remove_table(&genpd->dev);
2421 }
2422 }
2423
2424 return ret;
2425 }
2426 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2427
2428
2429
2430
2431
2432 void of_genpd_del_provider(struct device_node *np)
2433 {
2434 struct of_genpd_provider *cp, *tmp;
2435 struct generic_pm_domain *gpd;
2436
2437 mutex_lock(&gpd_list_lock);
2438 mutex_lock(&of_genpd_mutex);
2439 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2440 if (cp->node == np) {
2441
2442
2443
2444
2445
2446 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2447 if (gpd->provider == &np->fwnode) {
2448 gpd->has_provider = false;
2449
2450 if (!gpd->set_performance_state)
2451 continue;
2452
2453 dev_pm_opp_put_opp_table(gpd->opp_table);
2454 dev_pm_opp_of_remove_table(&gpd->dev);
2455 }
2456 }
2457
2458 fwnode_dev_initialized(&cp->node->fwnode, false);
2459 list_del(&cp->link);
2460 of_node_put(cp->node);
2461 kfree(cp);
2462 break;
2463 }
2464 }
2465 mutex_unlock(&of_genpd_mutex);
2466 mutex_unlock(&gpd_list_lock);
2467 }
2468 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 static struct generic_pm_domain *genpd_get_from_provider(
2482 struct of_phandle_args *genpdspec)
2483 {
2484 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2485 struct of_genpd_provider *provider;
2486
2487 if (!genpdspec)
2488 return ERR_PTR(-EINVAL);
2489
2490 mutex_lock(&of_genpd_mutex);
2491
2492
2493 list_for_each_entry(provider, &of_genpd_providers, link) {
2494 if (provider->node == genpdspec->np)
2495 genpd = provider->xlate(genpdspec, provider->data);
2496 if (!IS_ERR(genpd))
2497 break;
2498 }
2499
2500 mutex_unlock(&of_genpd_mutex);
2501
2502 return genpd;
2503 }
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2514 {
2515 struct generic_pm_domain *genpd;
2516 int ret;
2517
2518 mutex_lock(&gpd_list_lock);
2519
2520 genpd = genpd_get_from_provider(genpdspec);
2521 if (IS_ERR(genpd)) {
2522 ret = PTR_ERR(genpd);
2523 goto out;
2524 }
2525
2526 ret = genpd_add_device(genpd, dev, dev);
2527
2528 out:
2529 mutex_unlock(&gpd_list_lock);
2530
2531 return ret;
2532 }
2533 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2545 struct of_phandle_args *subdomain_spec)
2546 {
2547 struct generic_pm_domain *parent, *subdomain;
2548 int ret;
2549
2550 mutex_lock(&gpd_list_lock);
2551
2552 parent = genpd_get_from_provider(parent_spec);
2553 if (IS_ERR(parent)) {
2554 ret = PTR_ERR(parent);
2555 goto out;
2556 }
2557
2558 subdomain = genpd_get_from_provider(subdomain_spec);
2559 if (IS_ERR(subdomain)) {
2560 ret = PTR_ERR(subdomain);
2561 goto out;
2562 }
2563
2564 ret = genpd_add_subdomain(parent, subdomain);
2565
2566 out:
2567 mutex_unlock(&gpd_list_lock);
2568
2569 return ret == -ENOENT ? -EPROBE_DEFER : ret;
2570 }
2571 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2583 struct of_phandle_args *subdomain_spec)
2584 {
2585 struct generic_pm_domain *parent, *subdomain;
2586 int ret;
2587
2588 mutex_lock(&gpd_list_lock);
2589
2590 parent = genpd_get_from_provider(parent_spec);
2591 if (IS_ERR(parent)) {
2592 ret = PTR_ERR(parent);
2593 goto out;
2594 }
2595
2596 subdomain = genpd_get_from_provider(subdomain_spec);
2597 if (IS_ERR(subdomain)) {
2598 ret = PTR_ERR(subdomain);
2599 goto out;
2600 }
2601
2602 ret = pm_genpd_remove_subdomain(parent, subdomain);
2603
2604 out:
2605 mutex_unlock(&gpd_list_lock);
2606
2607 return ret;
2608 }
2609 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2625 {
2626 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2627 int ret;
2628
2629 if (IS_ERR_OR_NULL(np))
2630 return ERR_PTR(-EINVAL);
2631
2632 mutex_lock(&gpd_list_lock);
2633 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2634 if (gpd->provider == &np->fwnode) {
2635 ret = genpd_remove(gpd);
2636 genpd = ret ? ERR_PTR(ret) : gpd;
2637 break;
2638 }
2639 }
2640 mutex_unlock(&gpd_list_lock);
2641
2642 return genpd;
2643 }
2644 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2645
2646 static void genpd_release_dev(struct device *dev)
2647 {
2648 of_node_put(dev->of_node);
2649 kfree(dev);
2650 }
2651
2652 static struct bus_type genpd_bus_type = {
2653 .name = "genpd",
2654 };
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2665 {
2666 struct generic_pm_domain *pd;
2667 unsigned int i;
2668 int ret = 0;
2669
2670 pd = dev_to_genpd(dev);
2671 if (IS_ERR(pd))
2672 return;
2673
2674 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2675
2676
2677 if (dev_gpd_data(dev)->default_pstate) {
2678 dev_pm_genpd_set_performance_state(dev, 0);
2679 dev_gpd_data(dev)->default_pstate = 0;
2680 }
2681
2682 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2683 ret = genpd_remove_device(pd, dev);
2684 if (ret != -EAGAIN)
2685 break;
2686
2687 mdelay(i);
2688 cond_resched();
2689 }
2690
2691 if (ret < 0) {
2692 dev_err(dev, "failed to remove from PM domain %s: %d",
2693 pd->name, ret);
2694 return;
2695 }
2696
2697
2698 genpd_queue_power_off_work(pd);
2699
2700
2701 if (dev->bus == &genpd_bus_type)
2702 device_unregister(dev);
2703 }
2704
2705 static void genpd_dev_pm_sync(struct device *dev)
2706 {
2707 struct generic_pm_domain *pd;
2708
2709 pd = dev_to_genpd(dev);
2710 if (IS_ERR(pd))
2711 return;
2712
2713 genpd_queue_power_off_work(pd);
2714 }
2715
2716 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2717 unsigned int index, bool power_on)
2718 {
2719 struct of_phandle_args pd_args;
2720 struct generic_pm_domain *pd;
2721 int pstate;
2722 int ret;
2723
2724 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2725 "#power-domain-cells", index, &pd_args);
2726 if (ret < 0)
2727 return ret;
2728
2729 mutex_lock(&gpd_list_lock);
2730 pd = genpd_get_from_provider(&pd_args);
2731 of_node_put(pd_args.np);
2732 if (IS_ERR(pd)) {
2733 mutex_unlock(&gpd_list_lock);
2734 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2735 __func__, PTR_ERR(pd));
2736 return driver_deferred_probe_check_state(base_dev);
2737 }
2738
2739 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2740
2741 ret = genpd_add_device(pd, dev, base_dev);
2742 mutex_unlock(&gpd_list_lock);
2743
2744 if (ret < 0)
2745 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2746
2747 dev->pm_domain->detach = genpd_dev_pm_detach;
2748 dev->pm_domain->sync = genpd_dev_pm_sync;
2749
2750 if (power_on) {
2751 genpd_lock(pd);
2752 ret = genpd_power_on(pd, 0);
2753 genpd_unlock(pd);
2754 }
2755
2756 if (ret) {
2757 genpd_remove_device(pd, dev);
2758 return -EPROBE_DEFER;
2759 }
2760
2761
2762 pstate = of_get_required_opp_performance_state(dev->of_node, index);
2763 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2764 ret = pstate;
2765 goto err;
2766 } else if (pstate > 0) {
2767 ret = dev_pm_genpd_set_performance_state(dev, pstate);
2768 if (ret)
2769 goto err;
2770 dev_gpd_data(dev)->default_pstate = pstate;
2771 }
2772 return 1;
2773
2774 err:
2775 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2776 pd->name, ret);
2777 genpd_remove_device(pd, dev);
2778 return ret;
2779 }
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794 int genpd_dev_pm_attach(struct device *dev)
2795 {
2796 if (!dev->of_node)
2797 return 0;
2798
2799
2800
2801
2802
2803 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2804 "#power-domain-cells") != 1)
2805 return 0;
2806
2807 return __genpd_dev_pm_attach(dev, dev, 0, true);
2808 }
2809 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2828 unsigned int index)
2829 {
2830 struct device *virt_dev;
2831 int num_domains;
2832 int ret;
2833
2834 if (!dev->of_node)
2835 return NULL;
2836
2837
2838 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2839 "#power-domain-cells");
2840 if (index >= num_domains)
2841 return NULL;
2842
2843
2844 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2845 if (!virt_dev)
2846 return ERR_PTR(-ENOMEM);
2847
2848 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2849 virt_dev->bus = &genpd_bus_type;
2850 virt_dev->release = genpd_release_dev;
2851 virt_dev->of_node = of_node_get(dev->of_node);
2852
2853 ret = device_register(virt_dev);
2854 if (ret) {
2855 put_device(virt_dev);
2856 return ERR_PTR(ret);
2857 }
2858
2859
2860 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2861 if (ret < 1) {
2862 device_unregister(virt_dev);
2863 return ret ? ERR_PTR(ret) : NULL;
2864 }
2865
2866 pm_runtime_enable(virt_dev);
2867 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2868
2869 return virt_dev;
2870 }
2871 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2883 {
2884 int index;
2885
2886 if (!dev->of_node)
2887 return NULL;
2888
2889 index = of_property_match_string(dev->of_node, "power-domain-names",
2890 name);
2891 if (index < 0)
2892 return NULL;
2893
2894 return genpd_dev_pm_attach_by_id(dev, index);
2895 }
2896
2897 static const struct of_device_id idle_state_match[] = {
2898 { .compatible = "domain-idle-state", },
2899 { }
2900 };
2901
2902 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2903 struct device_node *state_node)
2904 {
2905 int err;
2906 u32 residency;
2907 u32 entry_latency, exit_latency;
2908
2909 err = of_property_read_u32(state_node, "entry-latency-us",
2910 &entry_latency);
2911 if (err) {
2912 pr_debug(" * %pOF missing entry-latency-us property\n",
2913 state_node);
2914 return -EINVAL;
2915 }
2916
2917 err = of_property_read_u32(state_node, "exit-latency-us",
2918 &exit_latency);
2919 if (err) {
2920 pr_debug(" * %pOF missing exit-latency-us property\n",
2921 state_node);
2922 return -EINVAL;
2923 }
2924
2925 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2926 if (!err)
2927 genpd_state->residency_ns = 1000 * residency;
2928
2929 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2930 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2931 genpd_state->fwnode = &state_node->fwnode;
2932
2933 return 0;
2934 }
2935
2936 static int genpd_iterate_idle_states(struct device_node *dn,
2937 struct genpd_power_state *states)
2938 {
2939 int ret;
2940 struct of_phandle_iterator it;
2941 struct device_node *np;
2942 int i = 0;
2943
2944 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2945 if (ret <= 0)
2946 return ret == -ENOENT ? 0 : ret;
2947
2948
2949 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2950 np = it.node;
2951 if (!of_match_node(idle_state_match, np))
2952 continue;
2953 if (states) {
2954 ret = genpd_parse_state(&states[i], np);
2955 if (ret) {
2956 pr_err("Parsing idle state node %pOF failed with err %d\n",
2957 np, ret);
2958 of_node_put(np);
2959 return ret;
2960 }
2961 }
2962 i++;
2963 }
2964
2965 return i;
2966 }
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980 int of_genpd_parse_idle_states(struct device_node *dn,
2981 struct genpd_power_state **states, int *n)
2982 {
2983 struct genpd_power_state *st;
2984 int ret;
2985
2986 ret = genpd_iterate_idle_states(dn, NULL);
2987 if (ret < 0)
2988 return ret;
2989
2990 if (!ret) {
2991 *states = NULL;
2992 *n = 0;
2993 return 0;
2994 }
2995
2996 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2997 if (!st)
2998 return -ENOMEM;
2999
3000 ret = genpd_iterate_idle_states(dn, st);
3001 if (ret <= 0) {
3002 kfree(st);
3003 return ret < 0 ? ret : -EINVAL;
3004 }
3005
3006 *states = st;
3007 *n = ret;
3008
3009 return 0;
3010 }
3011 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3027 struct dev_pm_opp *opp)
3028 {
3029 struct generic_pm_domain *genpd = NULL;
3030 int state;
3031
3032 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3033
3034 if (unlikely(!genpd->opp_to_performance_state))
3035 return 0;
3036
3037 genpd_lock(genpd);
3038 state = genpd->opp_to_performance_state(genpd, opp);
3039 genpd_unlock(genpd);
3040
3041 return state;
3042 }
3043 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3044
3045 static int __init genpd_bus_init(void)
3046 {
3047 return bus_register(&genpd_bus_type);
3048 }
3049 core_initcall(genpd_bus_init);
3050
3051 #endif
3052
3053
3054
3055
3056 #ifdef CONFIG_DEBUG_FS
3057
3058
3059
3060
3061 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3062 {
3063 static const char * const status_lookup[] = {
3064 [RPM_ACTIVE] = "active",
3065 [RPM_RESUMING] = "resuming",
3066 [RPM_SUSPENDED] = "suspended",
3067 [RPM_SUSPENDING] = "suspending"
3068 };
3069 const char *p = "";
3070
3071 if (dev->power.runtime_error)
3072 p = "error";
3073 else if (dev->power.disable_depth)
3074 p = "unsupported";
3075 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3076 p = status_lookup[dev->power.runtime_status];
3077 else
3078 WARN_ON(1);
3079
3080 seq_printf(s, "%-25s ", p);
3081 }
3082
3083 static void perf_status_str(struct seq_file *s, struct device *dev)
3084 {
3085 struct generic_pm_domain_data *gpd_data;
3086
3087 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3088 seq_put_decimal_ull(s, "", gpd_data->performance_state);
3089 }
3090
3091 static int genpd_summary_one(struct seq_file *s,
3092 struct generic_pm_domain *genpd)
3093 {
3094 static const char * const status_lookup[] = {
3095 [GENPD_STATE_ON] = "on",
3096 [GENPD_STATE_OFF] = "off"
3097 };
3098 struct pm_domain_data *pm_data;
3099 const char *kobj_path;
3100 struct gpd_link *link;
3101 char state[16];
3102 int ret;
3103
3104 ret = genpd_lock_interruptible(genpd);
3105 if (ret)
3106 return -ERESTARTSYS;
3107
3108 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3109 goto exit;
3110 if (!genpd_status_on(genpd))
3111 snprintf(state, sizeof(state), "%s-%u",
3112 status_lookup[genpd->status], genpd->state_idx);
3113 else
3114 snprintf(state, sizeof(state), "%s",
3115 status_lookup[genpd->status]);
3116 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
3117
3118
3119
3120
3121
3122
3123 list_for_each_entry(link, &genpd->parent_links, parent_node) {
3124 if (list_is_first(&link->parent_node, &genpd->parent_links))
3125 seq_printf(s, "\n%48s", " ");
3126 seq_printf(s, "%s", link->child->name);
3127 if (!list_is_last(&link->parent_node, &genpd->parent_links))
3128 seq_puts(s, ", ");
3129 }
3130
3131 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3132 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3133 genpd_is_irq_safe(genpd) ?
3134 GFP_ATOMIC : GFP_KERNEL);
3135 if (kobj_path == NULL)
3136 continue;
3137
3138 seq_printf(s, "\n %-50s ", kobj_path);
3139 rtpm_status_str(s, pm_data->dev);
3140 perf_status_str(s, pm_data->dev);
3141 kfree(kobj_path);
3142 }
3143
3144 seq_puts(s, "\n");
3145 exit:
3146 genpd_unlock(genpd);
3147
3148 return 0;
3149 }
3150
3151 static int summary_show(struct seq_file *s, void *data)
3152 {
3153 struct generic_pm_domain *genpd;
3154 int ret = 0;
3155
3156 seq_puts(s, "domain status children performance\n");
3157 seq_puts(s, " /device runtime status\n");
3158 seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3159
3160 ret = mutex_lock_interruptible(&gpd_list_lock);
3161 if (ret)
3162 return -ERESTARTSYS;
3163
3164 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3165 ret = genpd_summary_one(s, genpd);
3166 if (ret)
3167 break;
3168 }
3169 mutex_unlock(&gpd_list_lock);
3170
3171 return ret;
3172 }
3173
3174 static int status_show(struct seq_file *s, void *data)
3175 {
3176 static const char * const status_lookup[] = {
3177 [GENPD_STATE_ON] = "on",
3178 [GENPD_STATE_OFF] = "off"
3179 };
3180
3181 struct generic_pm_domain *genpd = s->private;
3182 int ret = 0;
3183
3184 ret = genpd_lock_interruptible(genpd);
3185 if (ret)
3186 return -ERESTARTSYS;
3187
3188 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3189 goto exit;
3190
3191 if (genpd->status == GENPD_STATE_OFF)
3192 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3193 genpd->state_idx);
3194 else
3195 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3196 exit:
3197 genpd_unlock(genpd);
3198 return ret;
3199 }
3200
3201 static int sub_domains_show(struct seq_file *s, void *data)
3202 {
3203 struct generic_pm_domain *genpd = s->private;
3204 struct gpd_link *link;
3205 int ret = 0;
3206
3207 ret = genpd_lock_interruptible(genpd);
3208 if (ret)
3209 return -ERESTARTSYS;
3210
3211 list_for_each_entry(link, &genpd->parent_links, parent_node)
3212 seq_printf(s, "%s\n", link->child->name);
3213
3214 genpd_unlock(genpd);
3215 return ret;
3216 }
3217
3218 static int idle_states_show(struct seq_file *s, void *data)
3219 {
3220 struct generic_pm_domain *genpd = s->private;
3221 u64 now, delta, idle_time = 0;
3222 unsigned int i;
3223 int ret = 0;
3224
3225 ret = genpd_lock_interruptible(genpd);
3226 if (ret)
3227 return -ERESTARTSYS;
3228
3229 seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
3230
3231 for (i = 0; i < genpd->state_count; i++) {
3232 idle_time += genpd->states[i].idle_time;
3233
3234 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3235 now = ktime_get_mono_fast_ns();
3236 if (now > genpd->accounting_time) {
3237 delta = now - genpd->accounting_time;
3238 idle_time += delta;
3239 }
3240 }
3241
3242 do_div(idle_time, NSEC_PER_MSEC);
3243 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3244 genpd->states[i].usage, genpd->states[i].rejected);
3245 }
3246
3247 genpd_unlock(genpd);
3248 return ret;
3249 }
3250
3251 static int active_time_show(struct seq_file *s, void *data)
3252 {
3253 struct generic_pm_domain *genpd = s->private;
3254 u64 now, on_time, delta = 0;
3255 int ret = 0;
3256
3257 ret = genpd_lock_interruptible(genpd);
3258 if (ret)
3259 return -ERESTARTSYS;
3260
3261 if (genpd->status == GENPD_STATE_ON) {
3262 now = ktime_get_mono_fast_ns();
3263 if (now > genpd->accounting_time)
3264 delta = now - genpd->accounting_time;
3265 }
3266
3267 on_time = genpd->on_time + delta;
3268 do_div(on_time, NSEC_PER_MSEC);
3269 seq_printf(s, "%llu ms\n", on_time);
3270
3271 genpd_unlock(genpd);
3272 return ret;
3273 }
3274
3275 static int total_idle_time_show(struct seq_file *s, void *data)
3276 {
3277 struct generic_pm_domain *genpd = s->private;
3278 u64 now, delta, total = 0;
3279 unsigned int i;
3280 int ret = 0;
3281
3282 ret = genpd_lock_interruptible(genpd);
3283 if (ret)
3284 return -ERESTARTSYS;
3285
3286 for (i = 0; i < genpd->state_count; i++) {
3287 total += genpd->states[i].idle_time;
3288
3289 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3290 now = ktime_get_mono_fast_ns();
3291 if (now > genpd->accounting_time) {
3292 delta = now - genpd->accounting_time;
3293 total += delta;
3294 }
3295 }
3296 }
3297
3298 do_div(total, NSEC_PER_MSEC);
3299 seq_printf(s, "%llu ms\n", total);
3300
3301 genpd_unlock(genpd);
3302 return ret;
3303 }
3304
3305
3306 static int devices_show(struct seq_file *s, void *data)
3307 {
3308 struct generic_pm_domain *genpd = s->private;
3309 struct pm_domain_data *pm_data;
3310 const char *kobj_path;
3311 int ret = 0;
3312
3313 ret = genpd_lock_interruptible(genpd);
3314 if (ret)
3315 return -ERESTARTSYS;
3316
3317 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3318 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3319 genpd_is_irq_safe(genpd) ?
3320 GFP_ATOMIC : GFP_KERNEL);
3321 if (kobj_path == NULL)
3322 continue;
3323
3324 seq_printf(s, "%s\n", kobj_path);
3325 kfree(kobj_path);
3326 }
3327
3328 genpd_unlock(genpd);
3329 return ret;
3330 }
3331
3332 static int perf_state_show(struct seq_file *s, void *data)
3333 {
3334 struct generic_pm_domain *genpd = s->private;
3335
3336 if (genpd_lock_interruptible(genpd))
3337 return -ERESTARTSYS;
3338
3339 seq_printf(s, "%u\n", genpd->performance_state);
3340
3341 genpd_unlock(genpd);
3342 return 0;
3343 }
3344
3345 DEFINE_SHOW_ATTRIBUTE(summary);
3346 DEFINE_SHOW_ATTRIBUTE(status);
3347 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3348 DEFINE_SHOW_ATTRIBUTE(idle_states);
3349 DEFINE_SHOW_ATTRIBUTE(active_time);
3350 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3351 DEFINE_SHOW_ATTRIBUTE(devices);
3352 DEFINE_SHOW_ATTRIBUTE(perf_state);
3353
3354 static void genpd_debug_add(struct generic_pm_domain *genpd)
3355 {
3356 struct dentry *d;
3357
3358 if (!genpd_debugfs_dir)
3359 return;
3360
3361 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3362
3363 debugfs_create_file("current_state", 0444,
3364 d, genpd, &status_fops);
3365 debugfs_create_file("sub_domains", 0444,
3366 d, genpd, &sub_domains_fops);
3367 debugfs_create_file("idle_states", 0444,
3368 d, genpd, &idle_states_fops);
3369 debugfs_create_file("active_time", 0444,
3370 d, genpd, &active_time_fops);
3371 debugfs_create_file("total_idle_time", 0444,
3372 d, genpd, &total_idle_time_fops);
3373 debugfs_create_file("devices", 0444,
3374 d, genpd, &devices_fops);
3375 if (genpd->set_performance_state)
3376 debugfs_create_file("perf_state", 0444,
3377 d, genpd, &perf_state_fops);
3378 }
3379
3380 static int __init genpd_debug_init(void)
3381 {
3382 struct generic_pm_domain *genpd;
3383
3384 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3385
3386 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3387 NULL, &summary_fops);
3388
3389 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3390 genpd_debug_add(genpd);
3391
3392 return 0;
3393 }
3394 late_initcall(genpd_debug_init);
3395
3396 static void __exit genpd_debug_exit(void)
3397 {
3398 debugfs_remove_recursive(genpd_debugfs_dir);
3399 }
3400 __exitcall(genpd_debug_exit);
3401 #endif