0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/device.h>
0010 #include <linux/io.h>
0011 #include <linux/pm.h>
0012 #include <linux/pm_clock.h>
0013 #include <linux/clk.h>
0014 #include <linux/clkdev.h>
0015 #include <linux/of_clk.h>
0016 #include <linux/slab.h>
0017 #include <linux/err.h>
0018 #include <linux/pm_domain.h>
0019 #include <linux/pm_runtime.h>
0020
0021 #ifdef CONFIG_PM_CLK
0022
0023 enum pce_status {
0024 PCE_STATUS_NONE = 0,
0025 PCE_STATUS_ACQUIRED,
0026 PCE_STATUS_PREPARED,
0027 PCE_STATUS_ENABLED,
0028 PCE_STATUS_ERROR,
0029 };
0030
0031 struct pm_clock_entry {
0032 struct list_head node;
0033 char *con_id;
0034 struct clk *clk;
0035 enum pce_status status;
0036 bool enabled_when_prepared;
0037 };
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 static void pm_clk_list_lock(struct pm_subsys_data *psd)
0053 __acquires(&psd->lock)
0054 {
0055 mutex_lock(&psd->clock_mutex);
0056 spin_lock_irq(&psd->lock);
0057 }
0058
0059
0060
0061
0062
0063
0064 static void pm_clk_list_unlock(struct pm_subsys_data *psd)
0065 __releases(&psd->lock)
0066 {
0067 spin_unlock_irq(&psd->lock);
0068 mutex_unlock(&psd->clock_mutex);
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
0087 const char *fn)
0088
0089 {
0090 bool atomic_context = in_atomic() || irqs_disabled();
0091
0092 try_again:
0093 spin_lock_irqsave(&psd->lock, *flags);
0094 if (!psd->clock_op_might_sleep) {
0095
0096 __release(&psd->lock);
0097 return 0;
0098 }
0099
0100
0101 if (atomic_context) {
0102 pr_err("%s: atomic context with clock_ops_might_sleep = %d",
0103 fn, psd->clock_op_might_sleep);
0104 spin_unlock_irqrestore(&psd->lock, *flags);
0105 might_sleep();
0106 return -EPERM;
0107 }
0108
0109
0110 spin_unlock_irqrestore(&psd->lock, *flags);
0111 mutex_lock(&psd->clock_mutex);
0112
0113
0114
0115
0116
0117 if (likely(psd->clock_op_might_sleep))
0118 return 0;
0119
0120 mutex_unlock(&psd->clock_mutex);
0121 goto try_again;
0122 }
0123
0124
0125
0126
0127
0128
0129
0130 static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
0131
0132 {
0133 if (psd->clock_op_might_sleep) {
0134 mutex_unlock(&psd->clock_mutex);
0135 } else {
0136
0137 __acquire(&psd->lock);
0138 spin_unlock_irqrestore(&psd->lock, *flags);
0139 }
0140 }
0141
0142
0143
0144
0145
0146
0147 static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
0148 {
0149 int ret;
0150
0151 switch (ce->status) {
0152 case PCE_STATUS_ACQUIRED:
0153 ret = clk_prepare_enable(ce->clk);
0154 break;
0155 case PCE_STATUS_PREPARED:
0156 ret = clk_enable(ce->clk);
0157 break;
0158 default:
0159 return;
0160 }
0161 if (!ret)
0162 ce->status = PCE_STATUS_ENABLED;
0163 else
0164 dev_err(dev, "%s: failed to enable clk %p, error %d\n",
0165 __func__, ce->clk, ret);
0166 }
0167
0168
0169
0170
0171
0172
0173 static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
0174 {
0175 if (!ce->clk)
0176 ce->clk = clk_get(dev, ce->con_id);
0177 if (IS_ERR(ce->clk)) {
0178 ce->status = PCE_STATUS_ERROR;
0179 return;
0180 } else if (clk_is_enabled_when_prepared(ce->clk)) {
0181
0182 ce->status = PCE_STATUS_ACQUIRED;
0183 ce->enabled_when_prepared = true;
0184 } else if (clk_prepare(ce->clk)) {
0185 ce->status = PCE_STATUS_ERROR;
0186 dev_err(dev, "clk_prepare() failed\n");
0187 return;
0188 } else {
0189 ce->status = PCE_STATUS_PREPARED;
0190 }
0191 dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
0192 ce->clk, ce->con_id);
0193 }
0194
0195 static int __pm_clk_add(struct device *dev, const char *con_id,
0196 struct clk *clk)
0197 {
0198 struct pm_subsys_data *psd = dev_to_psd(dev);
0199 struct pm_clock_entry *ce;
0200
0201 if (!psd)
0202 return -EINVAL;
0203
0204 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
0205 if (!ce)
0206 return -ENOMEM;
0207
0208 if (con_id) {
0209 ce->con_id = kstrdup(con_id, GFP_KERNEL);
0210 if (!ce->con_id) {
0211 kfree(ce);
0212 return -ENOMEM;
0213 }
0214 } else {
0215 if (IS_ERR(clk)) {
0216 kfree(ce);
0217 return -ENOENT;
0218 }
0219 ce->clk = clk;
0220 }
0221
0222 pm_clk_acquire(dev, ce);
0223
0224 pm_clk_list_lock(psd);
0225 list_add_tail(&ce->node, &psd->clock_list);
0226 if (ce->enabled_when_prepared)
0227 psd->clock_op_might_sleep++;
0228 pm_clk_list_unlock(psd);
0229 return 0;
0230 }
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 int pm_clk_add(struct device *dev, const char *con_id)
0241 {
0242 return __pm_clk_add(dev, con_id, NULL);
0243 }
0244 EXPORT_SYMBOL_GPL(pm_clk_add);
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 int pm_clk_add_clk(struct device *dev, struct clk *clk)
0257 {
0258 return __pm_clk_add(dev, NULL, clk);
0259 }
0260 EXPORT_SYMBOL_GPL(pm_clk_add_clk);
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 int of_pm_clk_add_clk(struct device *dev, const char *name)
0274 {
0275 struct clk *clk;
0276 int ret;
0277
0278 if (!dev || !dev->of_node || !name)
0279 return -EINVAL;
0280
0281 clk = of_clk_get_by_name(dev->of_node, name);
0282 if (IS_ERR(clk))
0283 return PTR_ERR(clk);
0284
0285 ret = pm_clk_add_clk(dev, clk);
0286 if (ret) {
0287 clk_put(clk);
0288 return ret;
0289 }
0290
0291 return 0;
0292 }
0293 EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 int of_pm_clk_add_clks(struct device *dev)
0306 {
0307 struct clk **clks;
0308 int i, count;
0309 int ret;
0310
0311 if (!dev || !dev->of_node)
0312 return -EINVAL;
0313
0314 count = of_clk_get_parent_count(dev->of_node);
0315 if (count <= 0)
0316 return -ENODEV;
0317
0318 clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
0319 if (!clks)
0320 return -ENOMEM;
0321
0322 for (i = 0; i < count; i++) {
0323 clks[i] = of_clk_get(dev->of_node, i);
0324 if (IS_ERR(clks[i])) {
0325 ret = PTR_ERR(clks[i]);
0326 goto error;
0327 }
0328
0329 ret = pm_clk_add_clk(dev, clks[i]);
0330 if (ret) {
0331 clk_put(clks[i]);
0332 goto error;
0333 }
0334 }
0335
0336 kfree(clks);
0337
0338 return i;
0339
0340 error:
0341 while (i--)
0342 pm_clk_remove_clk(dev, clks[i]);
0343
0344 kfree(clks);
0345
0346 return ret;
0347 }
0348 EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
0349
0350
0351
0352
0353
0354 static void __pm_clk_remove(struct pm_clock_entry *ce)
0355 {
0356 if (!ce)
0357 return;
0358
0359 switch (ce->status) {
0360 case PCE_STATUS_ENABLED:
0361 clk_disable(ce->clk);
0362 fallthrough;
0363 case PCE_STATUS_PREPARED:
0364 clk_unprepare(ce->clk);
0365 fallthrough;
0366 case PCE_STATUS_ACQUIRED:
0367 case PCE_STATUS_ERROR:
0368 if (!IS_ERR(ce->clk))
0369 clk_put(ce->clk);
0370 break;
0371 default:
0372 break;
0373 }
0374
0375 kfree(ce->con_id);
0376 kfree(ce);
0377 }
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 void pm_clk_remove(struct device *dev, const char *con_id)
0388 {
0389 struct pm_subsys_data *psd = dev_to_psd(dev);
0390 struct pm_clock_entry *ce;
0391
0392 if (!psd)
0393 return;
0394
0395 pm_clk_list_lock(psd);
0396
0397 list_for_each_entry(ce, &psd->clock_list, node) {
0398 if (!con_id && !ce->con_id)
0399 goto remove;
0400 else if (!con_id || !ce->con_id)
0401 continue;
0402 else if (!strcmp(con_id, ce->con_id))
0403 goto remove;
0404 }
0405
0406 pm_clk_list_unlock(psd);
0407 return;
0408
0409 remove:
0410 list_del(&ce->node);
0411 if (ce->enabled_when_prepared)
0412 psd->clock_op_might_sleep--;
0413 pm_clk_list_unlock(psd);
0414
0415 __pm_clk_remove(ce);
0416 }
0417 EXPORT_SYMBOL_GPL(pm_clk_remove);
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 void pm_clk_remove_clk(struct device *dev, struct clk *clk)
0428 {
0429 struct pm_subsys_data *psd = dev_to_psd(dev);
0430 struct pm_clock_entry *ce;
0431
0432 if (!psd || !clk)
0433 return;
0434
0435 pm_clk_list_lock(psd);
0436
0437 list_for_each_entry(ce, &psd->clock_list, node) {
0438 if (clk == ce->clk)
0439 goto remove;
0440 }
0441
0442 pm_clk_list_unlock(psd);
0443 return;
0444
0445 remove:
0446 list_del(&ce->node);
0447 if (ce->enabled_when_prepared)
0448 psd->clock_op_might_sleep--;
0449 pm_clk_list_unlock(psd);
0450
0451 __pm_clk_remove(ce);
0452 }
0453 EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
0454
0455
0456
0457
0458
0459
0460
0461
0462 void pm_clk_init(struct device *dev)
0463 {
0464 struct pm_subsys_data *psd = dev_to_psd(dev);
0465 if (psd) {
0466 INIT_LIST_HEAD(&psd->clock_list);
0467 mutex_init(&psd->clock_mutex);
0468 psd->clock_op_might_sleep = 0;
0469 }
0470 }
0471 EXPORT_SYMBOL_GPL(pm_clk_init);
0472
0473
0474
0475
0476
0477
0478
0479
0480 int pm_clk_create(struct device *dev)
0481 {
0482 return dev_pm_get_subsys_data(dev);
0483 }
0484 EXPORT_SYMBOL_GPL(pm_clk_create);
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494 void pm_clk_destroy(struct device *dev)
0495 {
0496 struct pm_subsys_data *psd = dev_to_psd(dev);
0497 struct pm_clock_entry *ce, *c;
0498 struct list_head list;
0499
0500 if (!psd)
0501 return;
0502
0503 INIT_LIST_HEAD(&list);
0504
0505 pm_clk_list_lock(psd);
0506
0507 list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
0508 list_move(&ce->node, &list);
0509 psd->clock_op_might_sleep = 0;
0510
0511 pm_clk_list_unlock(psd);
0512
0513 dev_pm_put_subsys_data(dev);
0514
0515 list_for_each_entry_safe_reverse(ce, c, &list, node) {
0516 list_del(&ce->node);
0517 __pm_clk_remove(ce);
0518 }
0519 }
0520 EXPORT_SYMBOL_GPL(pm_clk_destroy);
0521
0522 static void pm_clk_destroy_action(void *data)
0523 {
0524 pm_clk_destroy(data);
0525 }
0526
0527 int devm_pm_clk_create(struct device *dev)
0528 {
0529 int ret;
0530
0531 ret = pm_clk_create(dev);
0532 if (ret)
0533 return ret;
0534
0535 return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
0536 }
0537 EXPORT_SYMBOL_GPL(devm_pm_clk_create);
0538
0539
0540
0541
0542
0543 int pm_clk_suspend(struct device *dev)
0544 {
0545 struct pm_subsys_data *psd = dev_to_psd(dev);
0546 struct pm_clock_entry *ce;
0547 unsigned long flags;
0548 int ret;
0549
0550 dev_dbg(dev, "%s()\n", __func__);
0551
0552 if (!psd)
0553 return 0;
0554
0555 ret = pm_clk_op_lock(psd, &flags, __func__);
0556 if (ret)
0557 return ret;
0558
0559 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
0560 if (ce->status == PCE_STATUS_ENABLED) {
0561 if (ce->enabled_when_prepared) {
0562 clk_disable_unprepare(ce->clk);
0563 ce->status = PCE_STATUS_ACQUIRED;
0564 } else {
0565 clk_disable(ce->clk);
0566 ce->status = PCE_STATUS_PREPARED;
0567 }
0568 }
0569 }
0570
0571 pm_clk_op_unlock(psd, &flags);
0572
0573 return 0;
0574 }
0575 EXPORT_SYMBOL_GPL(pm_clk_suspend);
0576
0577
0578
0579
0580
0581 int pm_clk_resume(struct device *dev)
0582 {
0583 struct pm_subsys_data *psd = dev_to_psd(dev);
0584 struct pm_clock_entry *ce;
0585 unsigned long flags;
0586 int ret;
0587
0588 dev_dbg(dev, "%s()\n", __func__);
0589
0590 if (!psd)
0591 return 0;
0592
0593 ret = pm_clk_op_lock(psd, &flags, __func__);
0594 if (ret)
0595 return ret;
0596
0597 list_for_each_entry(ce, &psd->clock_list, node)
0598 __pm_clk_enable(dev, ce);
0599
0600 pm_clk_op_unlock(psd, &flags);
0601
0602 return 0;
0603 }
0604 EXPORT_SYMBOL_GPL(pm_clk_resume);
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622 static int pm_clk_notify(struct notifier_block *nb,
0623 unsigned long action, void *data)
0624 {
0625 struct pm_clk_notifier_block *clknb;
0626 struct device *dev = data;
0627 char **con_id;
0628 int error;
0629
0630 dev_dbg(dev, "%s() %ld\n", __func__, action);
0631
0632 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
0633
0634 switch (action) {
0635 case BUS_NOTIFY_ADD_DEVICE:
0636 if (dev->pm_domain)
0637 break;
0638
0639 error = pm_clk_create(dev);
0640 if (error)
0641 break;
0642
0643 dev_pm_domain_set(dev, clknb->pm_domain);
0644 if (clknb->con_ids[0]) {
0645 for (con_id = clknb->con_ids; *con_id; con_id++)
0646 pm_clk_add(dev, *con_id);
0647 } else {
0648 pm_clk_add(dev, NULL);
0649 }
0650
0651 break;
0652 case BUS_NOTIFY_DEL_DEVICE:
0653 if (dev->pm_domain != clknb->pm_domain)
0654 break;
0655
0656 dev_pm_domain_set(dev, NULL);
0657 pm_clk_destroy(dev);
0658 break;
0659 }
0660
0661 return 0;
0662 }
0663
0664 int pm_clk_runtime_suspend(struct device *dev)
0665 {
0666 int ret;
0667
0668 dev_dbg(dev, "%s\n", __func__);
0669
0670 ret = pm_generic_runtime_suspend(dev);
0671 if (ret) {
0672 dev_err(dev, "failed to suspend device\n");
0673 return ret;
0674 }
0675
0676 ret = pm_clk_suspend(dev);
0677 if (ret) {
0678 dev_err(dev, "failed to suspend clock\n");
0679 pm_generic_runtime_resume(dev);
0680 return ret;
0681 }
0682
0683 return 0;
0684 }
0685 EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
0686
0687 int pm_clk_runtime_resume(struct device *dev)
0688 {
0689 int ret;
0690
0691 dev_dbg(dev, "%s\n", __func__);
0692
0693 ret = pm_clk_resume(dev);
0694 if (ret) {
0695 dev_err(dev, "failed to resume clock\n");
0696 return ret;
0697 }
0698
0699 return pm_generic_runtime_resume(dev);
0700 }
0701 EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
0702
0703 #else
0704
0705
0706
0707
0708
0709
0710 static void enable_clock(struct device *dev, const char *con_id)
0711 {
0712 struct clk *clk;
0713
0714 clk = clk_get(dev, con_id);
0715 if (!IS_ERR(clk)) {
0716 clk_prepare_enable(clk);
0717 clk_put(clk);
0718 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
0719 }
0720 }
0721
0722
0723
0724
0725
0726
0727 static void disable_clock(struct device *dev, const char *con_id)
0728 {
0729 struct clk *clk;
0730
0731 clk = clk_get(dev, con_id);
0732 if (!IS_ERR(clk)) {
0733 clk_disable_unprepare(clk);
0734 clk_put(clk);
0735 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
0736 }
0737 }
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750 static int pm_clk_notify(struct notifier_block *nb,
0751 unsigned long action, void *data)
0752 {
0753 struct pm_clk_notifier_block *clknb;
0754 struct device *dev = data;
0755 char **con_id;
0756
0757 dev_dbg(dev, "%s() %ld\n", __func__, action);
0758
0759 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
0760
0761 switch (action) {
0762 case BUS_NOTIFY_BIND_DRIVER:
0763 if (clknb->con_ids[0]) {
0764 for (con_id = clknb->con_ids; *con_id; con_id++)
0765 enable_clock(dev, *con_id);
0766 } else {
0767 enable_clock(dev, NULL);
0768 }
0769 break;
0770 case BUS_NOTIFY_DRIVER_NOT_BOUND:
0771 case BUS_NOTIFY_UNBOUND_DRIVER:
0772 if (clknb->con_ids[0]) {
0773 for (con_id = clknb->con_ids; *con_id; con_id++)
0774 disable_clock(dev, *con_id);
0775 } else {
0776 disable_clock(dev, NULL);
0777 }
0778 break;
0779 }
0780
0781 return 0;
0782 }
0783
0784 #endif
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 void pm_clk_add_notifier(struct bus_type *bus,
0797 struct pm_clk_notifier_block *clknb)
0798 {
0799 if (!bus || !clknb)
0800 return;
0801
0802 clknb->nb.notifier_call = pm_clk_notify;
0803 bus_register_notifier(bus, &clknb->nb);
0804 }
0805 EXPORT_SYMBOL_GPL(pm_clk_add_notifier);