0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/clk-provider.h>
0011 #include <linux/clk/clk-conf.h>
0012 #include <linux/module.h>
0013 #include <linux/mutex.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/err.h>
0016 #include <linux/list.h>
0017 #include <linux/slab.h>
0018 #include <linux/of.h>
0019 #include <linux/device.h>
0020 #include <linux/init.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/sched.h>
0023 #include <linux/clkdev.h>
0024
0025 #include "clk.h"
0026
0027 static DEFINE_SPINLOCK(enable_lock);
0028 static DEFINE_MUTEX(prepare_lock);
0029
0030 static struct task_struct *prepare_owner;
0031 static struct task_struct *enable_owner;
0032
0033 static int prepare_refcnt;
0034 static int enable_refcnt;
0035
0036 static HLIST_HEAD(clk_root_list);
0037 static HLIST_HEAD(clk_orphan_list);
0038 static LIST_HEAD(clk_notifier_list);
0039
0040 static const struct hlist_head *all_lists[] = {
0041 &clk_root_list,
0042 &clk_orphan_list,
0043 NULL,
0044 };
0045
0046
0047
0048 struct clk_parent_map {
0049 const struct clk_hw *hw;
0050 struct clk_core *core;
0051 const char *fw_name;
0052 const char *name;
0053 int index;
0054 };
0055
0056 struct clk_core {
0057 const char *name;
0058 const struct clk_ops *ops;
0059 struct clk_hw *hw;
0060 struct module *owner;
0061 struct device *dev;
0062 struct device_node *of_node;
0063 struct clk_core *parent;
0064 struct clk_parent_map *parents;
0065 u8 num_parents;
0066 u8 new_parent_index;
0067 unsigned long rate;
0068 unsigned long req_rate;
0069 unsigned long new_rate;
0070 struct clk_core *new_parent;
0071 struct clk_core *new_child;
0072 unsigned long flags;
0073 bool orphan;
0074 bool rpm_enabled;
0075 unsigned int enable_count;
0076 unsigned int prepare_count;
0077 unsigned int protect_count;
0078 unsigned long min_rate;
0079 unsigned long max_rate;
0080 unsigned long accuracy;
0081 int phase;
0082 struct clk_duty duty;
0083 struct hlist_head children;
0084 struct hlist_node child_node;
0085 struct hlist_head clks;
0086 unsigned int notifier_count;
0087 #ifdef CONFIG_DEBUG_FS
0088 struct dentry *dentry;
0089 struct hlist_node debug_node;
0090 #endif
0091 struct kref ref;
0092 };
0093
0094 #define CREATE_TRACE_POINTS
0095 #include <trace/events/clk.h>
0096
0097 struct clk {
0098 struct clk_core *core;
0099 struct device *dev;
0100 const char *dev_id;
0101 const char *con_id;
0102 unsigned long min_rate;
0103 unsigned long max_rate;
0104 unsigned int exclusive_count;
0105 struct hlist_node clks_node;
0106 };
0107
0108
0109 static int clk_pm_runtime_get(struct clk_core *core)
0110 {
0111 if (!core->rpm_enabled)
0112 return 0;
0113
0114 return pm_runtime_resume_and_get(core->dev);
0115 }
0116
0117 static void clk_pm_runtime_put(struct clk_core *core)
0118 {
0119 if (!core->rpm_enabled)
0120 return;
0121
0122 pm_runtime_put_sync(core->dev);
0123 }
0124
0125
0126 static void clk_prepare_lock(void)
0127 {
0128 if (!mutex_trylock(&prepare_lock)) {
0129 if (prepare_owner == current) {
0130 prepare_refcnt++;
0131 return;
0132 }
0133 mutex_lock(&prepare_lock);
0134 }
0135 WARN_ON_ONCE(prepare_owner != NULL);
0136 WARN_ON_ONCE(prepare_refcnt != 0);
0137 prepare_owner = current;
0138 prepare_refcnt = 1;
0139 }
0140
0141 static void clk_prepare_unlock(void)
0142 {
0143 WARN_ON_ONCE(prepare_owner != current);
0144 WARN_ON_ONCE(prepare_refcnt == 0);
0145
0146 if (--prepare_refcnt)
0147 return;
0148 prepare_owner = NULL;
0149 mutex_unlock(&prepare_lock);
0150 }
0151
0152 static unsigned long clk_enable_lock(void)
0153 __acquires(enable_lock)
0154 {
0155 unsigned long flags;
0156
0157
0158
0159
0160
0161
0162 if (!IS_ENABLED(CONFIG_SMP) ||
0163 !spin_trylock_irqsave(&enable_lock, flags)) {
0164 if (enable_owner == current) {
0165 enable_refcnt++;
0166 __acquire(enable_lock);
0167 if (!IS_ENABLED(CONFIG_SMP))
0168 local_save_flags(flags);
0169 return flags;
0170 }
0171 spin_lock_irqsave(&enable_lock, flags);
0172 }
0173 WARN_ON_ONCE(enable_owner != NULL);
0174 WARN_ON_ONCE(enable_refcnt != 0);
0175 enable_owner = current;
0176 enable_refcnt = 1;
0177 return flags;
0178 }
0179
0180 static void clk_enable_unlock(unsigned long flags)
0181 __releases(enable_lock)
0182 {
0183 WARN_ON_ONCE(enable_owner != current);
0184 WARN_ON_ONCE(enable_refcnt == 0);
0185
0186 if (--enable_refcnt) {
0187 __release(enable_lock);
0188 return;
0189 }
0190 enable_owner = NULL;
0191 spin_unlock_irqrestore(&enable_lock, flags);
0192 }
0193
0194 static bool clk_core_rate_is_protected(struct clk_core *core)
0195 {
0196 return core->protect_count;
0197 }
0198
0199 static bool clk_core_is_prepared(struct clk_core *core)
0200 {
0201 bool ret = false;
0202
0203
0204
0205
0206
0207 if (!core->ops->is_prepared)
0208 return core->prepare_count;
0209
0210 if (!clk_pm_runtime_get(core)) {
0211 ret = core->ops->is_prepared(core->hw);
0212 clk_pm_runtime_put(core);
0213 }
0214
0215 return ret;
0216 }
0217
0218 static bool clk_core_is_enabled(struct clk_core *core)
0219 {
0220 bool ret = false;
0221
0222
0223
0224
0225
0226 if (!core->ops->is_enabled)
0227 return core->enable_count;
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 if (core->rpm_enabled) {
0240 pm_runtime_get_noresume(core->dev);
0241 if (!pm_runtime_active(core->dev)) {
0242 ret = false;
0243 goto done;
0244 }
0245 }
0246
0247 ret = core->ops->is_enabled(core->hw);
0248 done:
0249 if (core->rpm_enabled)
0250 pm_runtime_put(core->dev);
0251
0252 return ret;
0253 }
0254
0255
0256
0257 const char *__clk_get_name(const struct clk *clk)
0258 {
0259 return !clk ? NULL : clk->core->name;
0260 }
0261 EXPORT_SYMBOL_GPL(__clk_get_name);
0262
0263 const char *clk_hw_get_name(const struct clk_hw *hw)
0264 {
0265 return hw->core->name;
0266 }
0267 EXPORT_SYMBOL_GPL(clk_hw_get_name);
0268
0269 struct clk_hw *__clk_get_hw(struct clk *clk)
0270 {
0271 return !clk ? NULL : clk->core->hw;
0272 }
0273 EXPORT_SYMBOL_GPL(__clk_get_hw);
0274
0275 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
0276 {
0277 return hw->core->num_parents;
0278 }
0279 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
0280
0281 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
0282 {
0283 return hw->core->parent ? hw->core->parent->hw : NULL;
0284 }
0285 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
0286
0287 static struct clk_core *__clk_lookup_subtree(const char *name,
0288 struct clk_core *core)
0289 {
0290 struct clk_core *child;
0291 struct clk_core *ret;
0292
0293 if (!strcmp(core->name, name))
0294 return core;
0295
0296 hlist_for_each_entry(child, &core->children, child_node) {
0297 ret = __clk_lookup_subtree(name, child);
0298 if (ret)
0299 return ret;
0300 }
0301
0302 return NULL;
0303 }
0304
0305 static struct clk_core *clk_core_lookup(const char *name)
0306 {
0307 struct clk_core *root_clk;
0308 struct clk_core *ret;
0309
0310 if (!name)
0311 return NULL;
0312
0313
0314 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
0315 ret = __clk_lookup_subtree(name, root_clk);
0316 if (ret)
0317 return ret;
0318 }
0319
0320
0321 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
0322 ret = __clk_lookup_subtree(name, root_clk);
0323 if (ret)
0324 return ret;
0325 }
0326
0327 return NULL;
0328 }
0329
0330 #ifdef CONFIG_OF
0331 static int of_parse_clkspec(const struct device_node *np, int index,
0332 const char *name, struct of_phandle_args *out_args);
0333 static struct clk_hw *
0334 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
0335 #else
0336 static inline int of_parse_clkspec(const struct device_node *np, int index,
0337 const char *name,
0338 struct of_phandle_args *out_args)
0339 {
0340 return -ENOENT;
0341 }
0342 static inline struct clk_hw *
0343 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
0344 {
0345 return ERR_PTR(-ENOENT);
0346 }
0347 #endif
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
0386 {
0387 const char *name = core->parents[p_index].fw_name;
0388 int index = core->parents[p_index].index;
0389 struct clk_hw *hw = ERR_PTR(-ENOENT);
0390 struct device *dev = core->dev;
0391 const char *dev_id = dev ? dev_name(dev) : NULL;
0392 struct device_node *np = core->of_node;
0393 struct of_phandle_args clkspec;
0394
0395 if (np && (name || index >= 0) &&
0396 !of_parse_clkspec(np, index, name, &clkspec)) {
0397 hw = of_clk_get_hw_from_clkspec(&clkspec);
0398 of_node_put(clkspec.np);
0399 } else if (name) {
0400
0401
0402
0403
0404 hw = clk_find_hw(dev_id, name);
0405 }
0406
0407 if (IS_ERR(hw))
0408 return ERR_CAST(hw);
0409
0410 return hw->core;
0411 }
0412
0413 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
0414 {
0415 struct clk_parent_map *entry = &core->parents[index];
0416 struct clk_core *parent;
0417
0418 if (entry->hw) {
0419 parent = entry->hw->core;
0420 } else {
0421 parent = clk_core_get(core, index);
0422 if (PTR_ERR(parent) == -ENOENT && entry->name)
0423 parent = clk_core_lookup(entry->name);
0424 }
0425
0426
0427
0428
0429
0430
0431 if (!parent)
0432 parent = ERR_PTR(-EPROBE_DEFER);
0433
0434
0435 if (!IS_ERR(parent))
0436 entry->core = parent;
0437 }
0438
0439 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
0440 u8 index)
0441 {
0442 if (!core || index >= core->num_parents || !core->parents)
0443 return NULL;
0444
0445 if (!core->parents[index].core)
0446 clk_core_fill_parent_index(core, index);
0447
0448 return core->parents[index].core;
0449 }
0450
0451 struct clk_hw *
0452 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
0453 {
0454 struct clk_core *parent;
0455
0456 parent = clk_core_get_parent_by_index(hw->core, index);
0457
0458 return !parent ? NULL : parent->hw;
0459 }
0460 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
0461
0462 unsigned int __clk_get_enable_count(struct clk *clk)
0463 {
0464 return !clk ? 0 : clk->core->enable_count;
0465 }
0466
0467 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
0468 {
0469 if (!core)
0470 return 0;
0471
0472 if (!core->num_parents || core->parent)
0473 return core->rate;
0474
0475
0476
0477
0478
0479
0480 return 0;
0481 }
0482
0483 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
0484 {
0485 return clk_core_get_rate_nolock(hw->core);
0486 }
0487 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
0488
0489 static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
0490 {
0491 if (!core)
0492 return 0;
0493
0494 return core->accuracy;
0495 }
0496
0497 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
0498 {
0499 return hw->core->flags;
0500 }
0501 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
0502
0503 bool clk_hw_is_prepared(const struct clk_hw *hw)
0504 {
0505 return clk_core_is_prepared(hw->core);
0506 }
0507 EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
0508
0509 bool clk_hw_rate_is_protected(const struct clk_hw *hw)
0510 {
0511 return clk_core_rate_is_protected(hw->core);
0512 }
0513 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
0514
0515 bool clk_hw_is_enabled(const struct clk_hw *hw)
0516 {
0517 return clk_core_is_enabled(hw->core);
0518 }
0519 EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
0520
0521 bool __clk_is_enabled(struct clk *clk)
0522 {
0523 if (!clk)
0524 return false;
0525
0526 return clk_core_is_enabled(clk->core);
0527 }
0528 EXPORT_SYMBOL_GPL(__clk_is_enabled);
0529
0530 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
0531 unsigned long best, unsigned long flags)
0532 {
0533 if (flags & CLK_MUX_ROUND_CLOSEST)
0534 return abs(now - rate) < abs(best - rate);
0535
0536 return now <= rate && now > best;
0537 }
0538
0539 int clk_mux_determine_rate_flags(struct clk_hw *hw,
0540 struct clk_rate_request *req,
0541 unsigned long flags)
0542 {
0543 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
0544 int i, num_parents, ret;
0545 unsigned long best = 0;
0546 struct clk_rate_request parent_req = *req;
0547
0548
0549 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
0550 parent = core->parent;
0551 if (core->flags & CLK_SET_RATE_PARENT) {
0552 ret = __clk_determine_rate(parent ? parent->hw : NULL,
0553 &parent_req);
0554 if (ret)
0555 return ret;
0556
0557 best = parent_req.rate;
0558 } else if (parent) {
0559 best = clk_core_get_rate_nolock(parent);
0560 } else {
0561 best = clk_core_get_rate_nolock(core);
0562 }
0563
0564 goto out;
0565 }
0566
0567
0568 num_parents = core->num_parents;
0569 for (i = 0; i < num_parents; i++) {
0570 parent = clk_core_get_parent_by_index(core, i);
0571 if (!parent)
0572 continue;
0573
0574 if (core->flags & CLK_SET_RATE_PARENT) {
0575 parent_req = *req;
0576 ret = __clk_determine_rate(parent->hw, &parent_req);
0577 if (ret)
0578 continue;
0579 } else {
0580 parent_req.rate = clk_core_get_rate_nolock(parent);
0581 }
0582
0583 if (mux_is_better_rate(req->rate, parent_req.rate,
0584 best, flags)) {
0585 best_parent = parent;
0586 best = parent_req.rate;
0587 }
0588 }
0589
0590 if (!best_parent)
0591 return -EINVAL;
0592
0593 out:
0594 if (best_parent)
0595 req->best_parent_hw = best_parent->hw;
0596 req->best_parent_rate = best;
0597 req->rate = best;
0598
0599 return 0;
0600 }
0601 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
0602
0603 struct clk *__clk_lookup(const char *name)
0604 {
0605 struct clk_core *core = clk_core_lookup(name);
0606
0607 return !core ? NULL : core->hw->clk;
0608 }
0609
0610 static void clk_core_get_boundaries(struct clk_core *core,
0611 unsigned long *min_rate,
0612 unsigned long *max_rate)
0613 {
0614 struct clk *clk_user;
0615
0616 lockdep_assert_held(&prepare_lock);
0617
0618 *min_rate = core->min_rate;
0619 *max_rate = core->max_rate;
0620
0621 hlist_for_each_entry(clk_user, &core->clks, clks_node)
0622 *min_rate = max(*min_rate, clk_user->min_rate);
0623
0624 hlist_for_each_entry(clk_user, &core->clks, clks_node)
0625 *max_rate = min(*max_rate, clk_user->max_rate);
0626 }
0627
0628 static bool clk_core_check_boundaries(struct clk_core *core,
0629 unsigned long min_rate,
0630 unsigned long max_rate)
0631 {
0632 struct clk *user;
0633
0634 lockdep_assert_held(&prepare_lock);
0635
0636 if (min_rate > core->max_rate || max_rate < core->min_rate)
0637 return false;
0638
0639 hlist_for_each_entry(user, &core->clks, clks_node)
0640 if (min_rate > user->max_rate || max_rate < user->min_rate)
0641 return false;
0642
0643 return true;
0644 }
0645
0646 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
0647 unsigned long max_rate)
0648 {
0649 hw->core->min_rate = min_rate;
0650 hw->core->max_rate = max_rate;
0651 }
0652 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665 int __clk_mux_determine_rate(struct clk_hw *hw,
0666 struct clk_rate_request *req)
0667 {
0668 return clk_mux_determine_rate_flags(hw, req, 0);
0669 }
0670 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
0671
0672 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
0673 struct clk_rate_request *req)
0674 {
0675 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
0676 }
0677 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
0678
0679
0680
0681 static void clk_core_rate_unprotect(struct clk_core *core)
0682 {
0683 lockdep_assert_held(&prepare_lock);
0684
0685 if (!core)
0686 return;
0687
0688 if (WARN(core->protect_count == 0,
0689 "%s already unprotected\n", core->name))
0690 return;
0691
0692 if (--core->protect_count > 0)
0693 return;
0694
0695 clk_core_rate_unprotect(core->parent);
0696 }
0697
0698 static int clk_core_rate_nuke_protect(struct clk_core *core)
0699 {
0700 int ret;
0701
0702 lockdep_assert_held(&prepare_lock);
0703
0704 if (!core)
0705 return -EINVAL;
0706
0707 if (core->protect_count == 0)
0708 return 0;
0709
0710 ret = core->protect_count;
0711 core->protect_count = 1;
0712 clk_core_rate_unprotect(core);
0713
0714 return ret;
0715 }
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735 void clk_rate_exclusive_put(struct clk *clk)
0736 {
0737 if (!clk)
0738 return;
0739
0740 clk_prepare_lock();
0741
0742
0743
0744
0745
0746 if (WARN_ON(clk->exclusive_count <= 0))
0747 goto out;
0748
0749 clk_core_rate_unprotect(clk->core);
0750 clk->exclusive_count--;
0751 out:
0752 clk_prepare_unlock();
0753 }
0754 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
0755
0756 static void clk_core_rate_protect(struct clk_core *core)
0757 {
0758 lockdep_assert_held(&prepare_lock);
0759
0760 if (!core)
0761 return;
0762
0763 if (core->protect_count == 0)
0764 clk_core_rate_protect(core->parent);
0765
0766 core->protect_count++;
0767 }
0768
0769 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
0770 {
0771 lockdep_assert_held(&prepare_lock);
0772
0773 if (!core)
0774 return;
0775
0776 if (count == 0)
0777 return;
0778
0779 clk_core_rate_protect(core);
0780 core->protect_count = count;
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801 int clk_rate_exclusive_get(struct clk *clk)
0802 {
0803 if (!clk)
0804 return 0;
0805
0806 clk_prepare_lock();
0807 clk_core_rate_protect(clk->core);
0808 clk->exclusive_count++;
0809 clk_prepare_unlock();
0810
0811 return 0;
0812 }
0813 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
0814
0815 static void clk_core_unprepare(struct clk_core *core)
0816 {
0817 lockdep_assert_held(&prepare_lock);
0818
0819 if (!core)
0820 return;
0821
0822 if (WARN(core->prepare_count == 0,
0823 "%s already unprepared\n", core->name))
0824 return;
0825
0826 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
0827 "Unpreparing critical %s\n", core->name))
0828 return;
0829
0830 if (core->flags & CLK_SET_RATE_GATE)
0831 clk_core_rate_unprotect(core);
0832
0833 if (--core->prepare_count > 0)
0834 return;
0835
0836 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
0837
0838 trace_clk_unprepare(core);
0839
0840 if (core->ops->unprepare)
0841 core->ops->unprepare(core->hw);
0842
0843 trace_clk_unprepare_complete(core);
0844 clk_core_unprepare(core->parent);
0845 clk_pm_runtime_put(core);
0846 }
0847
0848 static void clk_core_unprepare_lock(struct clk_core *core)
0849 {
0850 clk_prepare_lock();
0851 clk_core_unprepare(core);
0852 clk_prepare_unlock();
0853 }
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866 void clk_unprepare(struct clk *clk)
0867 {
0868 if (IS_ERR_OR_NULL(clk))
0869 return;
0870
0871 clk_core_unprepare_lock(clk->core);
0872 }
0873 EXPORT_SYMBOL_GPL(clk_unprepare);
0874
0875 static int clk_core_prepare(struct clk_core *core)
0876 {
0877 int ret = 0;
0878
0879 lockdep_assert_held(&prepare_lock);
0880
0881 if (!core)
0882 return 0;
0883
0884 if (core->prepare_count == 0) {
0885 ret = clk_pm_runtime_get(core);
0886 if (ret)
0887 return ret;
0888
0889 ret = clk_core_prepare(core->parent);
0890 if (ret)
0891 goto runtime_put;
0892
0893 trace_clk_prepare(core);
0894
0895 if (core->ops->prepare)
0896 ret = core->ops->prepare(core->hw);
0897
0898 trace_clk_prepare_complete(core);
0899
0900 if (ret)
0901 goto unprepare;
0902 }
0903
0904 core->prepare_count++;
0905
0906
0907
0908
0909
0910
0911
0912
0913 if (core->flags & CLK_SET_RATE_GATE)
0914 clk_core_rate_protect(core);
0915
0916 return 0;
0917 unprepare:
0918 clk_core_unprepare(core->parent);
0919 runtime_put:
0920 clk_pm_runtime_put(core);
0921 return ret;
0922 }
0923
0924 static int clk_core_prepare_lock(struct clk_core *core)
0925 {
0926 int ret;
0927
0928 clk_prepare_lock();
0929 ret = clk_core_prepare(core);
0930 clk_prepare_unlock();
0931
0932 return ret;
0933 }
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947 int clk_prepare(struct clk *clk)
0948 {
0949 if (!clk)
0950 return 0;
0951
0952 return clk_core_prepare_lock(clk->core);
0953 }
0954 EXPORT_SYMBOL_GPL(clk_prepare);
0955
0956 static void clk_core_disable(struct clk_core *core)
0957 {
0958 lockdep_assert_held(&enable_lock);
0959
0960 if (!core)
0961 return;
0962
0963 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
0964 return;
0965
0966 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
0967 "Disabling critical %s\n", core->name))
0968 return;
0969
0970 if (--core->enable_count > 0)
0971 return;
0972
0973 trace_clk_disable_rcuidle(core);
0974
0975 if (core->ops->disable)
0976 core->ops->disable(core->hw);
0977
0978 trace_clk_disable_complete_rcuidle(core);
0979
0980 clk_core_disable(core->parent);
0981 }
0982
0983 static void clk_core_disable_lock(struct clk_core *core)
0984 {
0985 unsigned long flags;
0986
0987 flags = clk_enable_lock();
0988 clk_core_disable(core);
0989 clk_enable_unlock(flags);
0990 }
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004 void clk_disable(struct clk *clk)
1005 {
1006 if (IS_ERR_OR_NULL(clk))
1007 return;
1008
1009 clk_core_disable_lock(clk->core);
1010 }
1011 EXPORT_SYMBOL_GPL(clk_disable);
1012
1013 static int clk_core_enable(struct clk_core *core)
1014 {
1015 int ret = 0;
1016
1017 lockdep_assert_held(&enable_lock);
1018
1019 if (!core)
1020 return 0;
1021
1022 if (WARN(core->prepare_count == 0,
1023 "Enabling unprepared %s\n", core->name))
1024 return -ESHUTDOWN;
1025
1026 if (core->enable_count == 0) {
1027 ret = clk_core_enable(core->parent);
1028
1029 if (ret)
1030 return ret;
1031
1032 trace_clk_enable_rcuidle(core);
1033
1034 if (core->ops->enable)
1035 ret = core->ops->enable(core->hw);
1036
1037 trace_clk_enable_complete_rcuidle(core);
1038
1039 if (ret) {
1040 clk_core_disable(core->parent);
1041 return ret;
1042 }
1043 }
1044
1045 core->enable_count++;
1046 return 0;
1047 }
1048
1049 static int clk_core_enable_lock(struct clk_core *core)
1050 {
1051 unsigned long flags;
1052 int ret;
1053
1054 flags = clk_enable_lock();
1055 ret = clk_core_enable(core);
1056 clk_enable_unlock(flags);
1057
1058 return ret;
1059 }
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 void clk_gate_restore_context(struct clk_hw *hw)
1072 {
1073 struct clk_core *core = hw->core;
1074
1075 if (core->enable_count)
1076 core->ops->enable(hw);
1077 else
1078 core->ops->disable(hw);
1079 }
1080 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1081
1082 static int clk_core_save_context(struct clk_core *core)
1083 {
1084 struct clk_core *child;
1085 int ret = 0;
1086
1087 hlist_for_each_entry(child, &core->children, child_node) {
1088 ret = clk_core_save_context(child);
1089 if (ret < 0)
1090 return ret;
1091 }
1092
1093 if (core->ops && core->ops->save_context)
1094 ret = core->ops->save_context(core->hw);
1095
1096 return ret;
1097 }
1098
1099 static void clk_core_restore_context(struct clk_core *core)
1100 {
1101 struct clk_core *child;
1102
1103 if (core->ops && core->ops->restore_context)
1104 core->ops->restore_context(core->hw);
1105
1106 hlist_for_each_entry(child, &core->children, child_node)
1107 clk_core_restore_context(child);
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117 int clk_save_context(void)
1118 {
1119 struct clk_core *clk;
1120 int ret;
1121
1122 hlist_for_each_entry(clk, &clk_root_list, child_node) {
1123 ret = clk_core_save_context(clk);
1124 if (ret < 0)
1125 return ret;
1126 }
1127
1128 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1129 ret = clk_core_save_context(clk);
1130 if (ret < 0)
1131 return ret;
1132 }
1133
1134 return 0;
1135 }
1136 EXPORT_SYMBOL_GPL(clk_save_context);
1137
1138
1139
1140
1141
1142
1143
1144 void clk_restore_context(void)
1145 {
1146 struct clk_core *core;
1147
1148 hlist_for_each_entry(core, &clk_root_list, child_node)
1149 clk_core_restore_context(core);
1150
1151 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1152 clk_core_restore_context(core);
1153 }
1154 EXPORT_SYMBOL_GPL(clk_restore_context);
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 int clk_enable(struct clk *clk)
1170 {
1171 if (!clk)
1172 return 0;
1173
1174 return clk_core_enable_lock(clk->core);
1175 }
1176 EXPORT_SYMBOL_GPL(clk_enable);
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 bool clk_is_enabled_when_prepared(struct clk *clk)
1194 {
1195 return clk && !(clk->core->ops->enable && clk->core->ops->disable);
1196 }
1197 EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
1198
1199 static int clk_core_prepare_enable(struct clk_core *core)
1200 {
1201 int ret;
1202
1203 ret = clk_core_prepare_lock(core);
1204 if (ret)
1205 return ret;
1206
1207 ret = clk_core_enable_lock(core);
1208 if (ret)
1209 clk_core_unprepare_lock(core);
1210
1211 return ret;
1212 }
1213
1214 static void clk_core_disable_unprepare(struct clk_core *core)
1215 {
1216 clk_core_disable_lock(core);
1217 clk_core_unprepare_lock(core);
1218 }
1219
1220 static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1221 {
1222 struct clk_core *child;
1223
1224 lockdep_assert_held(&prepare_lock);
1225
1226 hlist_for_each_entry(child, &core->children, child_node)
1227 clk_unprepare_unused_subtree(child);
1228
1229 if (core->prepare_count)
1230 return;
1231
1232 if (core->flags & CLK_IGNORE_UNUSED)
1233 return;
1234
1235 if (clk_pm_runtime_get(core))
1236 return;
1237
1238 if (clk_core_is_prepared(core)) {
1239 trace_clk_unprepare(core);
1240 if (core->ops->unprepare_unused)
1241 core->ops->unprepare_unused(core->hw);
1242 else if (core->ops->unprepare)
1243 core->ops->unprepare(core->hw);
1244 trace_clk_unprepare_complete(core);
1245 }
1246
1247 clk_pm_runtime_put(core);
1248 }
1249
1250 static void __init clk_disable_unused_subtree(struct clk_core *core)
1251 {
1252 struct clk_core *child;
1253 unsigned long flags;
1254
1255 lockdep_assert_held(&prepare_lock);
1256
1257 hlist_for_each_entry(child, &core->children, child_node)
1258 clk_disable_unused_subtree(child);
1259
1260 if (core->flags & CLK_OPS_PARENT_ENABLE)
1261 clk_core_prepare_enable(core->parent);
1262
1263 if (clk_pm_runtime_get(core))
1264 goto unprepare_out;
1265
1266 flags = clk_enable_lock();
1267
1268 if (core->enable_count)
1269 goto unlock_out;
1270
1271 if (core->flags & CLK_IGNORE_UNUSED)
1272 goto unlock_out;
1273
1274
1275
1276
1277
1278
1279 if (clk_core_is_enabled(core)) {
1280 trace_clk_disable(core);
1281 if (core->ops->disable_unused)
1282 core->ops->disable_unused(core->hw);
1283 else if (core->ops->disable)
1284 core->ops->disable(core->hw);
1285 trace_clk_disable_complete(core);
1286 }
1287
1288 unlock_out:
1289 clk_enable_unlock(flags);
1290 clk_pm_runtime_put(core);
1291 unprepare_out:
1292 if (core->flags & CLK_OPS_PARENT_ENABLE)
1293 clk_core_disable_unprepare(core->parent);
1294 }
1295
1296 static bool clk_ignore_unused __initdata;
1297 static int __init clk_ignore_unused_setup(char *__unused)
1298 {
1299 clk_ignore_unused = true;
1300 return 1;
1301 }
1302 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1303
1304 static int __init clk_disable_unused(void)
1305 {
1306 struct clk_core *core;
1307
1308 if (clk_ignore_unused) {
1309 pr_warn("clk: Not disabling unused clocks\n");
1310 return 0;
1311 }
1312
1313 clk_prepare_lock();
1314
1315 hlist_for_each_entry(core, &clk_root_list, child_node)
1316 clk_disable_unused_subtree(core);
1317
1318 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1319 clk_disable_unused_subtree(core);
1320
1321 hlist_for_each_entry(core, &clk_root_list, child_node)
1322 clk_unprepare_unused_subtree(core);
1323
1324 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1325 clk_unprepare_unused_subtree(core);
1326
1327 clk_prepare_unlock();
1328
1329 return 0;
1330 }
1331 late_initcall_sync(clk_disable_unused);
1332
1333 static int clk_core_determine_round_nolock(struct clk_core *core,
1334 struct clk_rate_request *req)
1335 {
1336 long rate;
1337
1338 lockdep_assert_held(&prepare_lock);
1339
1340 if (!core)
1341 return 0;
1342
1343 req->rate = clamp(req->rate, req->min_rate, req->max_rate);
1344
1345
1346
1347
1348
1349
1350
1351 if (clk_core_rate_is_protected(core)) {
1352 req->rate = core->rate;
1353 } else if (core->ops->determine_rate) {
1354 return core->ops->determine_rate(core->hw, req);
1355 } else if (core->ops->round_rate) {
1356 rate = core->ops->round_rate(core->hw, req->rate,
1357 &req->best_parent_rate);
1358 if (rate < 0)
1359 return rate;
1360
1361 req->rate = rate;
1362 } else {
1363 return -EINVAL;
1364 }
1365
1366 return 0;
1367 }
1368
1369 static void clk_core_init_rate_req(struct clk_core * const core,
1370 struct clk_rate_request *req)
1371 {
1372 struct clk_core *parent;
1373
1374 if (WARN_ON(!core || !req))
1375 return;
1376
1377 parent = core->parent;
1378 if (parent) {
1379 req->best_parent_hw = parent->hw;
1380 req->best_parent_rate = parent->rate;
1381 } else {
1382 req->best_parent_hw = NULL;
1383 req->best_parent_rate = 0;
1384 }
1385 }
1386
1387 static bool clk_core_can_round(struct clk_core * const core)
1388 {
1389 return core->ops->determine_rate || core->ops->round_rate;
1390 }
1391
1392 static int clk_core_round_rate_nolock(struct clk_core *core,
1393 struct clk_rate_request *req)
1394 {
1395 lockdep_assert_held(&prepare_lock);
1396
1397 if (!core) {
1398 req->rate = 0;
1399 return 0;
1400 }
1401
1402 clk_core_init_rate_req(core, req);
1403
1404 if (clk_core_can_round(core))
1405 return clk_core_determine_round_nolock(core, req);
1406 else if (core->flags & CLK_SET_RATE_PARENT)
1407 return clk_core_round_rate_nolock(core->parent, req);
1408
1409 req->rate = core->rate;
1410 return 0;
1411 }
1412
1413
1414
1415
1416
1417
1418
1419
1420 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1421 {
1422 if (!hw) {
1423 req->rate = 0;
1424 return 0;
1425 }
1426
1427 return clk_core_round_rate_nolock(hw->core, req);
1428 }
1429 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1447 {
1448 int ret;
1449 struct clk_rate_request req;
1450
1451 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1452 req.rate = rate;
1453
1454 ret = clk_core_round_rate_nolock(hw->core, &req);
1455 if (ret)
1456 return 0;
1457
1458 return req.rate;
1459 }
1460 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 long clk_round_rate(struct clk *clk, unsigned long rate)
1472 {
1473 struct clk_rate_request req;
1474 int ret;
1475
1476 if (!clk)
1477 return 0;
1478
1479 clk_prepare_lock();
1480
1481 if (clk->exclusive_count)
1482 clk_core_rate_unprotect(clk->core);
1483
1484 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1485 req.rate = rate;
1486
1487 ret = clk_core_round_rate_nolock(clk->core, &req);
1488
1489 if (clk->exclusive_count)
1490 clk_core_rate_protect(clk->core);
1491
1492 clk_prepare_unlock();
1493
1494 if (ret)
1495 return ret;
1496
1497 return req.rate;
1498 }
1499 EXPORT_SYMBOL_GPL(clk_round_rate);
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515 static int __clk_notify(struct clk_core *core, unsigned long msg,
1516 unsigned long old_rate, unsigned long new_rate)
1517 {
1518 struct clk_notifier *cn;
1519 struct clk_notifier_data cnd;
1520 int ret = NOTIFY_DONE;
1521
1522 cnd.old_rate = old_rate;
1523 cnd.new_rate = new_rate;
1524
1525 list_for_each_entry(cn, &clk_notifier_list, node) {
1526 if (cn->clk->core == core) {
1527 cnd.clk = cn->clk;
1528 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1529 &cnd);
1530 if (ret & NOTIFY_STOP_MASK)
1531 return ret;
1532 }
1533 }
1534
1535 return ret;
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 static void __clk_recalc_accuracies(struct clk_core *core)
1548 {
1549 unsigned long parent_accuracy = 0;
1550 struct clk_core *child;
1551
1552 lockdep_assert_held(&prepare_lock);
1553
1554 if (core->parent)
1555 parent_accuracy = core->parent->accuracy;
1556
1557 if (core->ops->recalc_accuracy)
1558 core->accuracy = core->ops->recalc_accuracy(core->hw,
1559 parent_accuracy);
1560 else
1561 core->accuracy = parent_accuracy;
1562
1563 hlist_for_each_entry(child, &core->children, child_node)
1564 __clk_recalc_accuracies(child);
1565 }
1566
1567 static long clk_core_get_accuracy_recalc(struct clk_core *core)
1568 {
1569 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1570 __clk_recalc_accuracies(core);
1571
1572 return clk_core_get_accuracy_no_lock(core);
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584 long clk_get_accuracy(struct clk *clk)
1585 {
1586 long accuracy;
1587
1588 if (!clk)
1589 return 0;
1590
1591 clk_prepare_lock();
1592 accuracy = clk_core_get_accuracy_recalc(clk->core);
1593 clk_prepare_unlock();
1594
1595 return accuracy;
1596 }
1597 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1598
1599 static unsigned long clk_recalc(struct clk_core *core,
1600 unsigned long parent_rate)
1601 {
1602 unsigned long rate = parent_rate;
1603
1604 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1605 rate = core->ops->recalc_rate(core->hw, parent_rate);
1606 clk_pm_runtime_put(core);
1607 }
1608 return rate;
1609 }
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1624 {
1625 unsigned long old_rate;
1626 unsigned long parent_rate = 0;
1627 struct clk_core *child;
1628
1629 lockdep_assert_held(&prepare_lock);
1630
1631 old_rate = core->rate;
1632
1633 if (core->parent)
1634 parent_rate = core->parent->rate;
1635
1636 core->rate = clk_recalc(core, parent_rate);
1637
1638
1639
1640
1641
1642 if (core->notifier_count && msg)
1643 __clk_notify(core, msg, old_rate, core->rate);
1644
1645 hlist_for_each_entry(child, &core->children, child_node)
1646 __clk_recalc_rates(child, msg);
1647 }
1648
1649 static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1650 {
1651 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1652 __clk_recalc_rates(core, 0);
1653
1654 return clk_core_get_rate_nolock(core);
1655 }
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665 unsigned long clk_get_rate(struct clk *clk)
1666 {
1667 unsigned long rate;
1668
1669 if (!clk)
1670 return 0;
1671
1672 clk_prepare_lock();
1673 rate = clk_core_get_rate_recalc(clk->core);
1674 clk_prepare_unlock();
1675
1676 return rate;
1677 }
1678 EXPORT_SYMBOL_GPL(clk_get_rate);
1679
1680 static int clk_fetch_parent_index(struct clk_core *core,
1681 struct clk_core *parent)
1682 {
1683 int i;
1684
1685 if (!parent)
1686 return -EINVAL;
1687
1688 for (i = 0; i < core->num_parents; i++) {
1689
1690 if (core->parents[i].core == parent)
1691 return i;
1692
1693
1694 if (core->parents[i].core)
1695 continue;
1696
1697
1698 if (core->parents[i].hw) {
1699 if (core->parents[i].hw == parent->hw)
1700 break;
1701
1702
1703 continue;
1704 }
1705
1706
1707 if (parent == clk_core_get(core, i))
1708 break;
1709
1710
1711 if (core->parents[i].name &&
1712 !strcmp(parent->name, core->parents[i].name))
1713 break;
1714 }
1715
1716 if (i == core->num_parents)
1717 return -EINVAL;
1718
1719 core->parents[i].core = parent;
1720 return i;
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730 int clk_hw_get_parent_index(struct clk_hw *hw)
1731 {
1732 struct clk_hw *parent = clk_hw_get_parent(hw);
1733
1734 if (WARN_ON(parent == NULL))
1735 return -EINVAL;
1736
1737 return clk_fetch_parent_index(hw->core, parent->core);
1738 }
1739 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
1740
1741
1742
1743
1744 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1745 {
1746 struct clk_core *child;
1747
1748 core->orphan = is_orphan;
1749
1750 hlist_for_each_entry(child, &core->children, child_node)
1751 clk_core_update_orphan_status(child, is_orphan);
1752 }
1753
1754 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1755 {
1756 bool was_orphan = core->orphan;
1757
1758 hlist_del(&core->child_node);
1759
1760 if (new_parent) {
1761 bool becomes_orphan = new_parent->orphan;
1762
1763
1764 if (new_parent->new_child == core)
1765 new_parent->new_child = NULL;
1766
1767 hlist_add_head(&core->child_node, &new_parent->children);
1768
1769 if (was_orphan != becomes_orphan)
1770 clk_core_update_orphan_status(core, becomes_orphan);
1771 } else {
1772 hlist_add_head(&core->child_node, &clk_orphan_list);
1773 if (!was_orphan)
1774 clk_core_update_orphan_status(core, true);
1775 }
1776
1777 core->parent = new_parent;
1778 }
1779
1780 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1781 struct clk_core *parent)
1782 {
1783 unsigned long flags;
1784 struct clk_core *old_parent = core->parent;
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1808 clk_core_prepare_enable(old_parent);
1809 clk_core_prepare_enable(parent);
1810 }
1811
1812
1813 if (core->prepare_count) {
1814 clk_core_prepare_enable(parent);
1815 clk_core_enable_lock(core);
1816 }
1817
1818
1819 flags = clk_enable_lock();
1820 clk_reparent(core, parent);
1821 clk_enable_unlock(flags);
1822
1823 return old_parent;
1824 }
1825
1826 static void __clk_set_parent_after(struct clk_core *core,
1827 struct clk_core *parent,
1828 struct clk_core *old_parent)
1829 {
1830
1831
1832
1833
1834 if (core->prepare_count) {
1835 clk_core_disable_lock(core);
1836 clk_core_disable_unprepare(old_parent);
1837 }
1838
1839
1840 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1841 clk_core_disable_unprepare(parent);
1842 clk_core_disable_unprepare(old_parent);
1843 }
1844 }
1845
1846 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1847 u8 p_index)
1848 {
1849 unsigned long flags;
1850 int ret = 0;
1851 struct clk_core *old_parent;
1852
1853 old_parent = __clk_set_parent_before(core, parent);
1854
1855 trace_clk_set_parent(core, parent);
1856
1857
1858 if (parent && core->ops->set_parent)
1859 ret = core->ops->set_parent(core->hw, p_index);
1860
1861 trace_clk_set_parent_complete(core, parent);
1862
1863 if (ret) {
1864 flags = clk_enable_lock();
1865 clk_reparent(core, old_parent);
1866 clk_enable_unlock(flags);
1867 __clk_set_parent_after(core, old_parent, parent);
1868
1869 return ret;
1870 }
1871
1872 __clk_set_parent_after(core, parent, old_parent);
1873
1874 return 0;
1875 }
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891 static int __clk_speculate_rates(struct clk_core *core,
1892 unsigned long parent_rate)
1893 {
1894 struct clk_core *child;
1895 unsigned long new_rate;
1896 int ret = NOTIFY_DONE;
1897
1898 lockdep_assert_held(&prepare_lock);
1899
1900 new_rate = clk_recalc(core, parent_rate);
1901
1902
1903 if (core->notifier_count)
1904 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1905
1906 if (ret & NOTIFY_STOP_MASK) {
1907 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1908 __func__, core->name, ret);
1909 goto out;
1910 }
1911
1912 hlist_for_each_entry(child, &core->children, child_node) {
1913 ret = __clk_speculate_rates(child, new_rate);
1914 if (ret & NOTIFY_STOP_MASK)
1915 break;
1916 }
1917
1918 out:
1919 return ret;
1920 }
1921
1922 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1923 struct clk_core *new_parent, u8 p_index)
1924 {
1925 struct clk_core *child;
1926
1927 core->new_rate = new_rate;
1928 core->new_parent = new_parent;
1929 core->new_parent_index = p_index;
1930
1931 core->new_child = NULL;
1932 if (new_parent && new_parent != core->parent)
1933 new_parent->new_child = core;
1934
1935 hlist_for_each_entry(child, &core->children, child_node) {
1936 child->new_rate = clk_recalc(child, new_rate);
1937 clk_calc_subtree(child, child->new_rate, NULL, 0);
1938 }
1939 }
1940
1941
1942
1943
1944
1945 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1946 unsigned long rate)
1947 {
1948 struct clk_core *top = core;
1949 struct clk_core *old_parent, *parent;
1950 unsigned long best_parent_rate = 0;
1951 unsigned long new_rate;
1952 unsigned long min_rate;
1953 unsigned long max_rate;
1954 int p_index = 0;
1955 long ret;
1956
1957
1958 if (IS_ERR_OR_NULL(core))
1959 return NULL;
1960
1961
1962 parent = old_parent = core->parent;
1963 if (parent)
1964 best_parent_rate = parent->rate;
1965
1966 clk_core_get_boundaries(core, &min_rate, &max_rate);
1967
1968
1969 if (clk_core_can_round(core)) {
1970 struct clk_rate_request req;
1971
1972 req.rate = rate;
1973 req.min_rate = min_rate;
1974 req.max_rate = max_rate;
1975
1976 clk_core_init_rate_req(core, &req);
1977
1978 ret = clk_core_determine_round_nolock(core, &req);
1979 if (ret < 0)
1980 return NULL;
1981
1982 best_parent_rate = req.best_parent_rate;
1983 new_rate = req.rate;
1984 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1985
1986 if (new_rate < min_rate || new_rate > max_rate)
1987 return NULL;
1988 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1989
1990 core->new_rate = core->rate;
1991 return NULL;
1992 } else {
1993
1994 top = clk_calc_new_rates(parent, rate);
1995 new_rate = parent->new_rate;
1996 goto out;
1997 }
1998
1999
2000 if (parent != old_parent &&
2001 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2002 pr_debug("%s: %s not gated but wants to reparent\n",
2003 __func__, core->name);
2004 return NULL;
2005 }
2006
2007
2008 if (parent && core->num_parents > 1) {
2009 p_index = clk_fetch_parent_index(core, parent);
2010 if (p_index < 0) {
2011 pr_debug("%s: clk %s can not be parent of clk %s\n",
2012 __func__, parent->name, core->name);
2013 return NULL;
2014 }
2015 }
2016
2017 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2018 best_parent_rate != parent->rate)
2019 top = clk_calc_new_rates(parent, best_parent_rate);
2020
2021 out:
2022 clk_calc_subtree(core, new_rate, parent, p_index);
2023
2024 return top;
2025 }
2026
2027
2028
2029
2030
2031
2032 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2033 unsigned long event)
2034 {
2035 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2036 int ret = NOTIFY_DONE;
2037
2038 if (core->rate == core->new_rate)
2039 return NULL;
2040
2041 if (core->notifier_count) {
2042 ret = __clk_notify(core, event, core->rate, core->new_rate);
2043 if (ret & NOTIFY_STOP_MASK)
2044 fail_clk = core;
2045 }
2046
2047 hlist_for_each_entry(child, &core->children, child_node) {
2048
2049 if (child->new_parent && child->new_parent != core)
2050 continue;
2051 tmp_clk = clk_propagate_rate_change(child, event);
2052 if (tmp_clk)
2053 fail_clk = tmp_clk;
2054 }
2055
2056
2057 if (core->new_child) {
2058 tmp_clk = clk_propagate_rate_change(core->new_child, event);
2059 if (tmp_clk)
2060 fail_clk = tmp_clk;
2061 }
2062
2063 return fail_clk;
2064 }
2065
2066
2067
2068
2069
2070 static void clk_change_rate(struct clk_core *core)
2071 {
2072 struct clk_core *child;
2073 struct hlist_node *tmp;
2074 unsigned long old_rate;
2075 unsigned long best_parent_rate = 0;
2076 bool skip_set_rate = false;
2077 struct clk_core *old_parent;
2078 struct clk_core *parent = NULL;
2079
2080 old_rate = core->rate;
2081
2082 if (core->new_parent) {
2083 parent = core->new_parent;
2084 best_parent_rate = core->new_parent->rate;
2085 } else if (core->parent) {
2086 parent = core->parent;
2087 best_parent_rate = core->parent->rate;
2088 }
2089
2090 if (clk_pm_runtime_get(core))
2091 return;
2092
2093 if (core->flags & CLK_SET_RATE_UNGATE) {
2094 clk_core_prepare(core);
2095 clk_core_enable_lock(core);
2096 }
2097
2098 if (core->new_parent && core->new_parent != core->parent) {
2099 old_parent = __clk_set_parent_before(core, core->new_parent);
2100 trace_clk_set_parent(core, core->new_parent);
2101
2102 if (core->ops->set_rate_and_parent) {
2103 skip_set_rate = true;
2104 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2105 best_parent_rate,
2106 core->new_parent_index);
2107 } else if (core->ops->set_parent) {
2108 core->ops->set_parent(core->hw, core->new_parent_index);
2109 }
2110
2111 trace_clk_set_parent_complete(core, core->new_parent);
2112 __clk_set_parent_after(core, core->new_parent, old_parent);
2113 }
2114
2115 if (core->flags & CLK_OPS_PARENT_ENABLE)
2116 clk_core_prepare_enable(parent);
2117
2118 trace_clk_set_rate(core, core->new_rate);
2119
2120 if (!skip_set_rate && core->ops->set_rate)
2121 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2122
2123 trace_clk_set_rate_complete(core, core->new_rate);
2124
2125 core->rate = clk_recalc(core, best_parent_rate);
2126
2127 if (core->flags & CLK_SET_RATE_UNGATE) {
2128 clk_core_disable_lock(core);
2129 clk_core_unprepare(core);
2130 }
2131
2132 if (core->flags & CLK_OPS_PARENT_ENABLE)
2133 clk_core_disable_unprepare(parent);
2134
2135 if (core->notifier_count && old_rate != core->rate)
2136 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2137
2138 if (core->flags & CLK_RECALC_NEW_RATES)
2139 (void)clk_calc_new_rates(core, core->new_rate);
2140
2141
2142
2143
2144
2145 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2146
2147 if (child->new_parent && child->new_parent != core)
2148 continue;
2149 clk_change_rate(child);
2150 }
2151
2152
2153 if (core->new_child)
2154 clk_change_rate(core->new_child);
2155
2156 clk_pm_runtime_put(core);
2157 }
2158
2159 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2160 unsigned long req_rate)
2161 {
2162 int ret, cnt;
2163 struct clk_rate_request req;
2164
2165 lockdep_assert_held(&prepare_lock);
2166
2167 if (!core)
2168 return 0;
2169
2170
2171 cnt = clk_core_rate_nuke_protect(core);
2172 if (cnt < 0)
2173 return cnt;
2174
2175 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2176 req.rate = req_rate;
2177
2178 ret = clk_core_round_rate_nolock(core, &req);
2179
2180
2181 clk_core_rate_restore_protect(core, cnt);
2182
2183 return ret ? 0 : req.rate;
2184 }
2185
2186 static int clk_core_set_rate_nolock(struct clk_core *core,
2187 unsigned long req_rate)
2188 {
2189 struct clk_core *top, *fail_clk;
2190 unsigned long rate;
2191 int ret = 0;
2192
2193 if (!core)
2194 return 0;
2195
2196 rate = clk_core_req_round_rate_nolock(core, req_rate);
2197
2198
2199 if (rate == clk_core_get_rate_nolock(core))
2200 return 0;
2201
2202
2203 if (clk_core_rate_is_protected(core))
2204 return -EBUSY;
2205
2206
2207 top = clk_calc_new_rates(core, req_rate);
2208 if (!top)
2209 return -EINVAL;
2210
2211 ret = clk_pm_runtime_get(core);
2212 if (ret)
2213 return ret;
2214
2215
2216 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2217 if (fail_clk) {
2218 pr_debug("%s: failed to set %s rate\n", __func__,
2219 fail_clk->name);
2220 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2221 ret = -EBUSY;
2222 goto err;
2223 }
2224
2225
2226 clk_change_rate(top);
2227
2228 core->req_rate = req_rate;
2229 err:
2230 clk_pm_runtime_put(core);
2231
2232 return ret;
2233 }
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 int clk_set_rate(struct clk *clk, unsigned long rate)
2257 {
2258 int ret;
2259
2260 if (!clk)
2261 return 0;
2262
2263
2264 clk_prepare_lock();
2265
2266 if (clk->exclusive_count)
2267 clk_core_rate_unprotect(clk->core);
2268
2269 ret = clk_core_set_rate_nolock(clk->core, rate);
2270
2271 if (clk->exclusive_count)
2272 clk_core_rate_protect(clk->core);
2273
2274 clk_prepare_unlock();
2275
2276 return ret;
2277 }
2278 EXPORT_SYMBOL_GPL(clk_set_rate);
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2300 {
2301 int ret;
2302
2303 if (!clk)
2304 return 0;
2305
2306
2307 clk_prepare_lock();
2308
2309
2310
2311
2312
2313
2314
2315 ret = clk_core_set_rate_nolock(clk->core, rate);
2316 if (!ret) {
2317 clk_core_rate_protect(clk->core);
2318 clk->exclusive_count++;
2319 }
2320
2321 clk_prepare_unlock();
2322
2323 return ret;
2324 }
2325 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2336 {
2337 int ret = 0;
2338 unsigned long old_min, old_max, rate;
2339
2340 if (!clk)
2341 return 0;
2342
2343 trace_clk_set_rate_range(clk->core, min, max);
2344
2345 if (min > max) {
2346 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2347 __func__, clk->core->name, clk->dev_id, clk->con_id,
2348 min, max);
2349 return -EINVAL;
2350 }
2351
2352 clk_prepare_lock();
2353
2354 if (clk->exclusive_count)
2355 clk_core_rate_unprotect(clk->core);
2356
2357
2358 old_min = clk->min_rate;
2359 old_max = clk->max_rate;
2360 clk->min_rate = min;
2361 clk->max_rate = max;
2362
2363 if (!clk_core_check_boundaries(clk->core, min, max)) {
2364 ret = -EINVAL;
2365 goto out;
2366 }
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385 rate = clamp(clk->core->req_rate, min, max);
2386 ret = clk_core_set_rate_nolock(clk->core, rate);
2387 if (ret) {
2388
2389 clk->min_rate = old_min;
2390 clk->max_rate = old_max;
2391 }
2392
2393 out:
2394 if (clk->exclusive_count)
2395 clk_core_rate_protect(clk->core);
2396
2397 clk_prepare_unlock();
2398
2399 return ret;
2400 }
2401 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2402
2403
2404
2405
2406
2407
2408
2409
2410 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2411 {
2412 if (!clk)
2413 return 0;
2414
2415 trace_clk_set_min_rate(clk->core, rate);
2416
2417 return clk_set_rate_range(clk, rate, clk->max_rate);
2418 }
2419 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2420
2421
2422
2423
2424
2425
2426
2427
2428 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2429 {
2430 if (!clk)
2431 return 0;
2432
2433 trace_clk_set_max_rate(clk->core, rate);
2434
2435 return clk_set_rate_range(clk, clk->min_rate, rate);
2436 }
2437 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2438
2439
2440
2441
2442
2443
2444
2445 struct clk *clk_get_parent(struct clk *clk)
2446 {
2447 struct clk *parent;
2448
2449 if (!clk)
2450 return NULL;
2451
2452 clk_prepare_lock();
2453
2454 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2455 clk_prepare_unlock();
2456
2457 return parent;
2458 }
2459 EXPORT_SYMBOL_GPL(clk_get_parent);
2460
2461 static struct clk_core *__clk_init_parent(struct clk_core *core)
2462 {
2463 u8 index = 0;
2464
2465 if (core->num_parents > 1 && core->ops->get_parent)
2466 index = core->ops->get_parent(core->hw);
2467
2468 return clk_core_get_parent_by_index(core, index);
2469 }
2470
2471 static void clk_core_reparent(struct clk_core *core,
2472 struct clk_core *new_parent)
2473 {
2474 clk_reparent(core, new_parent);
2475 __clk_recalc_accuracies(core);
2476 __clk_recalc_rates(core, POST_RATE_CHANGE);
2477 }
2478
2479 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2480 {
2481 if (!hw)
2482 return;
2483
2484 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2485 }
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497 bool clk_has_parent(struct clk *clk, struct clk *parent)
2498 {
2499 struct clk_core *core, *parent_core;
2500 int i;
2501
2502
2503 if (!clk || !parent)
2504 return true;
2505
2506 core = clk->core;
2507 parent_core = parent->core;
2508
2509
2510 if (core->parent == parent_core)
2511 return true;
2512
2513 for (i = 0; i < core->num_parents; i++)
2514 if (!strcmp(core->parents[i].name, parent_core->name))
2515 return true;
2516
2517 return false;
2518 }
2519 EXPORT_SYMBOL_GPL(clk_has_parent);
2520
2521 static int clk_core_set_parent_nolock(struct clk_core *core,
2522 struct clk_core *parent)
2523 {
2524 int ret = 0;
2525 int p_index = 0;
2526 unsigned long p_rate = 0;
2527
2528 lockdep_assert_held(&prepare_lock);
2529
2530 if (!core)
2531 return 0;
2532
2533 if (core->parent == parent)
2534 return 0;
2535
2536
2537 if (core->num_parents > 1 && !core->ops->set_parent)
2538 return -EPERM;
2539
2540
2541 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2542 return -EBUSY;
2543
2544 if (clk_core_rate_is_protected(core))
2545 return -EBUSY;
2546
2547
2548 if (parent) {
2549 p_index = clk_fetch_parent_index(core, parent);
2550 if (p_index < 0) {
2551 pr_debug("%s: clk %s can not be parent of clk %s\n",
2552 __func__, parent->name, core->name);
2553 return p_index;
2554 }
2555 p_rate = parent->rate;
2556 }
2557
2558 ret = clk_pm_runtime_get(core);
2559 if (ret)
2560 return ret;
2561
2562
2563 ret = __clk_speculate_rates(core, p_rate);
2564
2565
2566 if (ret & NOTIFY_STOP_MASK)
2567 goto runtime_put;
2568
2569
2570 ret = __clk_set_parent(core, parent, p_index);
2571
2572
2573 if (ret) {
2574 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2575 } else {
2576 __clk_recalc_rates(core, POST_RATE_CHANGE);
2577 __clk_recalc_accuracies(core);
2578 }
2579
2580 runtime_put:
2581 clk_pm_runtime_put(core);
2582
2583 return ret;
2584 }
2585
2586 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2587 {
2588 return clk_core_set_parent_nolock(hw->core, parent->core);
2589 }
2590 EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609 int clk_set_parent(struct clk *clk, struct clk *parent)
2610 {
2611 int ret;
2612
2613 if (!clk)
2614 return 0;
2615
2616 clk_prepare_lock();
2617
2618 if (clk->exclusive_count)
2619 clk_core_rate_unprotect(clk->core);
2620
2621 ret = clk_core_set_parent_nolock(clk->core,
2622 parent ? parent->core : NULL);
2623
2624 if (clk->exclusive_count)
2625 clk_core_rate_protect(clk->core);
2626
2627 clk_prepare_unlock();
2628
2629 return ret;
2630 }
2631 EXPORT_SYMBOL_GPL(clk_set_parent);
2632
2633 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2634 {
2635 int ret = -EINVAL;
2636
2637 lockdep_assert_held(&prepare_lock);
2638
2639 if (!core)
2640 return 0;
2641
2642 if (clk_core_rate_is_protected(core))
2643 return -EBUSY;
2644
2645 trace_clk_set_phase(core, degrees);
2646
2647 if (core->ops->set_phase) {
2648 ret = core->ops->set_phase(core->hw, degrees);
2649 if (!ret)
2650 core->phase = degrees;
2651 }
2652
2653 trace_clk_set_phase_complete(core, degrees);
2654
2655 return ret;
2656 }
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678 int clk_set_phase(struct clk *clk, int degrees)
2679 {
2680 int ret;
2681
2682 if (!clk)
2683 return 0;
2684
2685
2686 degrees %= 360;
2687 if (degrees < 0)
2688 degrees += 360;
2689
2690 clk_prepare_lock();
2691
2692 if (clk->exclusive_count)
2693 clk_core_rate_unprotect(clk->core);
2694
2695 ret = clk_core_set_phase_nolock(clk->core, degrees);
2696
2697 if (clk->exclusive_count)
2698 clk_core_rate_protect(clk->core);
2699
2700 clk_prepare_unlock();
2701
2702 return ret;
2703 }
2704 EXPORT_SYMBOL_GPL(clk_set_phase);
2705
2706 static int clk_core_get_phase(struct clk_core *core)
2707 {
2708 int ret;
2709
2710 lockdep_assert_held(&prepare_lock);
2711 if (!core->ops->get_phase)
2712 return 0;
2713
2714
2715 ret = core->ops->get_phase(core->hw);
2716 if (ret >= 0)
2717 core->phase = ret;
2718
2719 return ret;
2720 }
2721
2722
2723
2724
2725
2726
2727
2728
2729 int clk_get_phase(struct clk *clk)
2730 {
2731 int ret;
2732
2733 if (!clk)
2734 return 0;
2735
2736 clk_prepare_lock();
2737 ret = clk_core_get_phase(clk->core);
2738 clk_prepare_unlock();
2739
2740 return ret;
2741 }
2742 EXPORT_SYMBOL_GPL(clk_get_phase);
2743
2744 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2745 {
2746
2747 core->duty.num = 1;
2748 core->duty.den = 2;
2749 }
2750
2751 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2752
2753 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2754 {
2755 struct clk_duty *duty = &core->duty;
2756 int ret = 0;
2757
2758 if (!core->ops->get_duty_cycle)
2759 return clk_core_update_duty_cycle_parent_nolock(core);
2760
2761 ret = core->ops->get_duty_cycle(core->hw, duty);
2762 if (ret)
2763 goto reset;
2764
2765
2766 if (duty->den == 0 || duty->num > duty->den) {
2767 ret = -EINVAL;
2768 goto reset;
2769 }
2770
2771 return 0;
2772
2773 reset:
2774 clk_core_reset_duty_cycle_nolock(core);
2775 return ret;
2776 }
2777
2778 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2779 {
2780 int ret = 0;
2781
2782 if (core->parent &&
2783 core->flags & CLK_DUTY_CYCLE_PARENT) {
2784 ret = clk_core_update_duty_cycle_nolock(core->parent);
2785 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2786 } else {
2787 clk_core_reset_duty_cycle_nolock(core);
2788 }
2789
2790 return ret;
2791 }
2792
2793 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2794 struct clk_duty *duty);
2795
2796 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2797 struct clk_duty *duty)
2798 {
2799 int ret;
2800
2801 lockdep_assert_held(&prepare_lock);
2802
2803 if (clk_core_rate_is_protected(core))
2804 return -EBUSY;
2805
2806 trace_clk_set_duty_cycle(core, duty);
2807
2808 if (!core->ops->set_duty_cycle)
2809 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2810
2811 ret = core->ops->set_duty_cycle(core->hw, duty);
2812 if (!ret)
2813 memcpy(&core->duty, duty, sizeof(*duty));
2814
2815 trace_clk_set_duty_cycle_complete(core, duty);
2816
2817 return ret;
2818 }
2819
2820 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2821 struct clk_duty *duty)
2822 {
2823 int ret = 0;
2824
2825 if (core->parent &&
2826 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2827 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2828 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2829 }
2830
2831 return ret;
2832 }
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2846 {
2847 int ret;
2848 struct clk_duty duty;
2849
2850 if (!clk)
2851 return 0;
2852
2853
2854 if (den == 0 || num > den)
2855 return -EINVAL;
2856
2857 duty.num = num;
2858 duty.den = den;
2859
2860 clk_prepare_lock();
2861
2862 if (clk->exclusive_count)
2863 clk_core_rate_unprotect(clk->core);
2864
2865 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2866
2867 if (clk->exclusive_count)
2868 clk_core_rate_protect(clk->core);
2869
2870 clk_prepare_unlock();
2871
2872 return ret;
2873 }
2874 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2875
2876 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2877 unsigned int scale)
2878 {
2879 struct clk_duty *duty = &core->duty;
2880 int ret;
2881
2882 clk_prepare_lock();
2883
2884 ret = clk_core_update_duty_cycle_nolock(core);
2885 if (!ret)
2886 ret = mult_frac(scale, duty->num, duty->den);
2887
2888 clk_prepare_unlock();
2889
2890 return ret;
2891 }
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2902 {
2903 if (!clk)
2904 return 0;
2905
2906 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2907 }
2908 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921 bool clk_is_match(const struct clk *p, const struct clk *q)
2922 {
2923
2924 if (p == q)
2925 return true;
2926
2927
2928 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2929 if (p->core == q->core)
2930 return true;
2931
2932 return false;
2933 }
2934 EXPORT_SYMBOL_GPL(clk_is_match);
2935
2936
2937
2938 #ifdef CONFIG_DEBUG_FS
2939 #include <linux/debugfs.h>
2940
2941 static struct dentry *rootdir;
2942 static int inited = 0;
2943 static DEFINE_MUTEX(clk_debug_lock);
2944 static HLIST_HEAD(clk_debug_list);
2945
2946 static struct hlist_head *orphan_list[] = {
2947 &clk_orphan_list,
2948 NULL,
2949 };
2950
2951 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2952 int level)
2953 {
2954 int phase;
2955
2956 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2957 level * 3 + 1, "",
2958 30 - level * 3, c->name,
2959 c->enable_count, c->prepare_count, c->protect_count,
2960 clk_core_get_rate_recalc(c),
2961 clk_core_get_accuracy_recalc(c));
2962
2963 phase = clk_core_get_phase(c);
2964 if (phase >= 0)
2965 seq_printf(s, "%5d", phase);
2966 else
2967 seq_puts(s, "-----");
2968
2969 seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
2970
2971 if (c->ops->is_enabled)
2972 seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
2973 else if (!c->ops->enable)
2974 seq_printf(s, " %9c\n", 'Y');
2975 else
2976 seq_printf(s, " %9c\n", '?');
2977 }
2978
2979 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2980 int level)
2981 {
2982 struct clk_core *child;
2983
2984 clk_pm_runtime_get(c);
2985 clk_summary_show_one(s, c, level);
2986 clk_pm_runtime_put(c);
2987
2988 hlist_for_each_entry(child, &c->children, child_node)
2989 clk_summary_show_subtree(s, child, level + 1);
2990 }
2991
2992 static int clk_summary_show(struct seq_file *s, void *data)
2993 {
2994 struct clk_core *c;
2995 struct hlist_head **lists = (struct hlist_head **)s->private;
2996
2997 seq_puts(s, " enable prepare protect duty hardware\n");
2998 seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
2999 seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
3000
3001 clk_prepare_lock();
3002
3003 for (; *lists; lists++)
3004 hlist_for_each_entry(c, *lists, child_node)
3005 clk_summary_show_subtree(s, c, 0);
3006
3007 clk_prepare_unlock();
3008
3009 return 0;
3010 }
3011 DEFINE_SHOW_ATTRIBUTE(clk_summary);
3012
3013 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
3014 {
3015 int phase;
3016 unsigned long min_rate, max_rate;
3017
3018 clk_core_get_boundaries(c, &min_rate, &max_rate);
3019
3020
3021 seq_printf(s, "\"%s\": { ", c->name);
3022 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3023 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3024 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3025 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
3026 seq_printf(s, "\"min_rate\": %lu,", min_rate);
3027 seq_printf(s, "\"max_rate\": %lu,", max_rate);
3028 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
3029 phase = clk_core_get_phase(c);
3030 if (phase >= 0)
3031 seq_printf(s, "\"phase\": %d,", phase);
3032 seq_printf(s, "\"duty_cycle\": %u",
3033 clk_core_get_scaled_duty_cycle(c, 100000));
3034 }
3035
3036 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
3037 {
3038 struct clk_core *child;
3039
3040 clk_dump_one(s, c, level);
3041
3042 hlist_for_each_entry(child, &c->children, child_node) {
3043 seq_putc(s, ',');
3044 clk_dump_subtree(s, child, level + 1);
3045 }
3046
3047 seq_putc(s, '}');
3048 }
3049
3050 static int clk_dump_show(struct seq_file *s, void *data)
3051 {
3052 struct clk_core *c;
3053 bool first_node = true;
3054 struct hlist_head **lists = (struct hlist_head **)s->private;
3055
3056 seq_putc(s, '{');
3057 clk_prepare_lock();
3058
3059 for (; *lists; lists++) {
3060 hlist_for_each_entry(c, *lists, child_node) {
3061 if (!first_node)
3062 seq_putc(s, ',');
3063 first_node = false;
3064 clk_dump_subtree(s, c, 0);
3065 }
3066 }
3067
3068 clk_prepare_unlock();
3069
3070 seq_puts(s, "}\n");
3071 return 0;
3072 }
3073 DEFINE_SHOW_ATTRIBUTE(clk_dump);
3074
3075 #undef CLOCK_ALLOW_WRITE_DEBUGFS
3076 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3077
3078
3079
3080
3081
3082 static int clk_rate_set(void *data, u64 val)
3083 {
3084 struct clk_core *core = data;
3085 int ret;
3086
3087 clk_prepare_lock();
3088 ret = clk_core_set_rate_nolock(core, val);
3089 clk_prepare_unlock();
3090
3091 return ret;
3092 }
3093
3094 #define clk_rate_mode 0644
3095
3096 static int clk_prepare_enable_set(void *data, u64 val)
3097 {
3098 struct clk_core *core = data;
3099 int ret = 0;
3100
3101 if (val)
3102 ret = clk_prepare_enable(core->hw->clk);
3103 else
3104 clk_disable_unprepare(core->hw->clk);
3105
3106 return ret;
3107 }
3108
3109 static int clk_prepare_enable_get(void *data, u64 *val)
3110 {
3111 struct clk_core *core = data;
3112
3113 *val = core->enable_count && core->prepare_count;
3114 return 0;
3115 }
3116
3117 DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3118 clk_prepare_enable_set, "%llu\n");
3119
3120 #else
3121 #define clk_rate_set NULL
3122 #define clk_rate_mode 0444
3123 #endif
3124
3125 static int clk_rate_get(void *data, u64 *val)
3126 {
3127 struct clk_core *core = data;
3128
3129 clk_prepare_lock();
3130 *val = clk_core_get_rate_recalc(core);
3131 clk_prepare_unlock();
3132
3133 return 0;
3134 }
3135
3136 DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3137
3138 static const struct {
3139 unsigned long flag;
3140 const char *name;
3141 } clk_flags[] = {
3142 #define ENTRY(f) { f, #f }
3143 ENTRY(CLK_SET_RATE_GATE),
3144 ENTRY(CLK_SET_PARENT_GATE),
3145 ENTRY(CLK_SET_RATE_PARENT),
3146 ENTRY(CLK_IGNORE_UNUSED),
3147 ENTRY(CLK_GET_RATE_NOCACHE),
3148 ENTRY(CLK_SET_RATE_NO_REPARENT),
3149 ENTRY(CLK_GET_ACCURACY_NOCACHE),
3150 ENTRY(CLK_RECALC_NEW_RATES),
3151 ENTRY(CLK_SET_RATE_UNGATE),
3152 ENTRY(CLK_IS_CRITICAL),
3153 ENTRY(CLK_OPS_PARENT_ENABLE),
3154 ENTRY(CLK_DUTY_CYCLE_PARENT),
3155 #undef ENTRY
3156 };
3157
3158 static int clk_flags_show(struct seq_file *s, void *data)
3159 {
3160 struct clk_core *core = s->private;
3161 unsigned long flags = core->flags;
3162 unsigned int i;
3163
3164 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3165 if (flags & clk_flags[i].flag) {
3166 seq_printf(s, "%s\n", clk_flags[i].name);
3167 flags &= ~clk_flags[i].flag;
3168 }
3169 }
3170 if (flags) {
3171
3172 seq_printf(s, "0x%lx\n", flags);
3173 }
3174
3175 return 0;
3176 }
3177 DEFINE_SHOW_ATTRIBUTE(clk_flags);
3178
3179 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3180 unsigned int i, char terminator)
3181 {
3182 struct clk_core *parent;
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196 parent = clk_core_get_parent_by_index(core, i);
3197 if (parent)
3198 seq_puts(s, parent->name);
3199 else if (core->parents[i].name)
3200 seq_puts(s, core->parents[i].name);
3201 else if (core->parents[i].fw_name)
3202 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3203 else if (core->parents[i].index >= 0)
3204 seq_puts(s,
3205 of_clk_get_parent_name(core->of_node,
3206 core->parents[i].index));
3207 else
3208 seq_puts(s, "(missing)");
3209
3210 seq_putc(s, terminator);
3211 }
3212
3213 static int possible_parents_show(struct seq_file *s, void *data)
3214 {
3215 struct clk_core *core = s->private;
3216 int i;
3217
3218 for (i = 0; i < core->num_parents - 1; i++)
3219 possible_parent_show(s, core, i, ' ');
3220
3221 possible_parent_show(s, core, i, '\n');
3222
3223 return 0;
3224 }
3225 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3226
3227 static int current_parent_show(struct seq_file *s, void *data)
3228 {
3229 struct clk_core *core = s->private;
3230
3231 if (core->parent)
3232 seq_printf(s, "%s\n", core->parent->name);
3233
3234 return 0;
3235 }
3236 DEFINE_SHOW_ATTRIBUTE(current_parent);
3237
3238 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3239 static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
3240 size_t count, loff_t *ppos)
3241 {
3242 struct seq_file *s = file->private_data;
3243 struct clk_core *core = s->private;
3244 struct clk_core *parent;
3245 u8 idx;
3246 int err;
3247
3248 err = kstrtou8_from_user(ubuf, count, 0, &idx);
3249 if (err < 0)
3250 return err;
3251
3252 parent = clk_core_get_parent_by_index(core, idx);
3253 if (!parent)
3254 return -ENOENT;
3255
3256 clk_prepare_lock();
3257 err = clk_core_set_parent_nolock(core, parent);
3258 clk_prepare_unlock();
3259 if (err)
3260 return err;
3261
3262 return count;
3263 }
3264
3265 static const struct file_operations current_parent_rw_fops = {
3266 .open = current_parent_open,
3267 .write = current_parent_write,
3268 .read = seq_read,
3269 .llseek = seq_lseek,
3270 .release = single_release,
3271 };
3272 #endif
3273
3274 static int clk_duty_cycle_show(struct seq_file *s, void *data)
3275 {
3276 struct clk_core *core = s->private;
3277 struct clk_duty *duty = &core->duty;
3278
3279 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3280
3281 return 0;
3282 }
3283 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3284
3285 static int clk_min_rate_show(struct seq_file *s, void *data)
3286 {
3287 struct clk_core *core = s->private;
3288 unsigned long min_rate, max_rate;
3289
3290 clk_prepare_lock();
3291 clk_core_get_boundaries(core, &min_rate, &max_rate);
3292 clk_prepare_unlock();
3293 seq_printf(s, "%lu\n", min_rate);
3294
3295 return 0;
3296 }
3297 DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3298
3299 static int clk_max_rate_show(struct seq_file *s, void *data)
3300 {
3301 struct clk_core *core = s->private;
3302 unsigned long min_rate, max_rate;
3303
3304 clk_prepare_lock();
3305 clk_core_get_boundaries(core, &min_rate, &max_rate);
3306 clk_prepare_unlock();
3307 seq_printf(s, "%lu\n", max_rate);
3308
3309 return 0;
3310 }
3311 DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3312
3313 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3314 {
3315 struct dentry *root;
3316
3317 if (!core || !pdentry)
3318 return;
3319
3320 root = debugfs_create_dir(core->name, pdentry);
3321 core->dentry = root;
3322
3323 debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3324 &clk_rate_fops);
3325 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3326 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3327 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3328 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3329 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3330 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3331 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3332 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3333 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3334 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3335 &clk_duty_cycle_fops);
3336 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3337 debugfs_create_file("clk_prepare_enable", 0644, root, core,
3338 &clk_prepare_enable_fops);
3339
3340 if (core->num_parents > 1)
3341 debugfs_create_file("clk_parent", 0644, root, core,
3342 ¤t_parent_rw_fops);
3343 else
3344 #endif
3345 if (core->num_parents > 0)
3346 debugfs_create_file("clk_parent", 0444, root, core,
3347 ¤t_parent_fops);
3348
3349 if (core->num_parents > 1)
3350 debugfs_create_file("clk_possible_parents", 0444, root, core,
3351 &possible_parents_fops);
3352
3353 if (core->ops->debug_init)
3354 core->ops->debug_init(core->hw, core->dentry);
3355 }
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365 static void clk_debug_register(struct clk_core *core)
3366 {
3367 mutex_lock(&clk_debug_lock);
3368 hlist_add_head(&core->debug_node, &clk_debug_list);
3369 if (inited)
3370 clk_debug_create_one(core, rootdir);
3371 mutex_unlock(&clk_debug_lock);
3372 }
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382 static void clk_debug_unregister(struct clk_core *core)
3383 {
3384 mutex_lock(&clk_debug_lock);
3385 hlist_del_init(&core->debug_node);
3386 debugfs_remove_recursive(core->dentry);
3387 core->dentry = NULL;
3388 mutex_unlock(&clk_debug_lock);
3389 }
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400 static int __init clk_debug_init(void)
3401 {
3402 struct clk_core *core;
3403
3404 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3405 pr_warn("\n");
3406 pr_warn("********************************************************************\n");
3407 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3408 pr_warn("** **\n");
3409 pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
3410 pr_warn("** **\n");
3411 pr_warn("** This means that this kernel is built to expose clk operations **\n");
3412 pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
3413 pr_warn("** to userspace, which may compromise security on your system. **\n");
3414 pr_warn("** **\n");
3415 pr_warn("** If you see this message and you are not debugging the **\n");
3416 pr_warn("** kernel, report this immediately to your vendor! **\n");
3417 pr_warn("** **\n");
3418 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3419 pr_warn("********************************************************************\n");
3420 #endif
3421
3422 rootdir = debugfs_create_dir("clk", NULL);
3423
3424 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3425 &clk_summary_fops);
3426 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3427 &clk_dump_fops);
3428 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3429 &clk_summary_fops);
3430 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3431 &clk_dump_fops);
3432
3433 mutex_lock(&clk_debug_lock);
3434 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3435 clk_debug_create_one(core, rootdir);
3436
3437 inited = 1;
3438 mutex_unlock(&clk_debug_lock);
3439
3440 return 0;
3441 }
3442 late_initcall(clk_debug_init);
3443 #else
3444 static inline void clk_debug_register(struct clk_core *core) { }
3445 static inline void clk_debug_unregister(struct clk_core *core)
3446 {
3447 }
3448 #endif
3449
3450 static void clk_core_reparent_orphans_nolock(void)
3451 {
3452 struct clk_core *orphan;
3453 struct hlist_node *tmp2;
3454
3455
3456
3457
3458
3459 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3460 struct clk_core *parent = __clk_init_parent(orphan);
3461
3462
3463
3464
3465
3466
3467
3468 if (parent) {
3469
3470 __clk_set_parent_before(orphan, parent);
3471 __clk_set_parent_after(orphan, parent, NULL);
3472 __clk_recalc_accuracies(orphan);
3473 __clk_recalc_rates(orphan, 0);
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486 orphan->req_rate = orphan->rate;
3487 }
3488 }
3489 }
3490
3491
3492
3493
3494
3495
3496
3497
3498 static int __clk_core_init(struct clk_core *core)
3499 {
3500 int ret;
3501 struct clk_core *parent;
3502 unsigned long rate;
3503 int phase;
3504
3505 clk_prepare_lock();
3506
3507
3508
3509
3510
3511
3512
3513 core->hw->core = core;
3514
3515 ret = clk_pm_runtime_get(core);
3516 if (ret)
3517 goto unlock;
3518
3519
3520 if (clk_core_lookup(core->name)) {
3521 pr_debug("%s: clk %s already initialized\n",
3522 __func__, core->name);
3523 ret = -EEXIST;
3524 goto out;
3525 }
3526
3527
3528 if (core->ops->set_rate &&
3529 !((core->ops->round_rate || core->ops->determine_rate) &&
3530 core->ops->recalc_rate)) {
3531 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3532 __func__, core->name);
3533 ret = -EINVAL;
3534 goto out;
3535 }
3536
3537 if (core->ops->set_parent && !core->ops->get_parent) {
3538 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3539 __func__, core->name);
3540 ret = -EINVAL;
3541 goto out;
3542 }
3543
3544 if (core->num_parents > 1 && !core->ops->get_parent) {
3545 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3546 __func__, core->name);
3547 ret = -EINVAL;
3548 goto out;
3549 }
3550
3551 if (core->ops->set_rate_and_parent &&
3552 !(core->ops->set_parent && core->ops->set_rate)) {
3553 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3554 __func__, core->name);
3555 ret = -EINVAL;
3556 goto out;
3557 }
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573 if (core->ops->init) {
3574 ret = core->ops->init(core->hw);
3575 if (ret)
3576 goto out;
3577 }
3578
3579 parent = core->parent = __clk_init_parent(core);
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591 if (parent) {
3592 hlist_add_head(&core->child_node, &parent->children);
3593 core->orphan = parent->orphan;
3594 } else if (!core->num_parents) {
3595 hlist_add_head(&core->child_node, &clk_root_list);
3596 core->orphan = false;
3597 } else {
3598 hlist_add_head(&core->child_node, &clk_orphan_list);
3599 core->orphan = true;
3600 }
3601
3602
3603
3604
3605
3606
3607
3608
3609 if (core->ops->recalc_accuracy)
3610 core->accuracy = core->ops->recalc_accuracy(core->hw,
3611 clk_core_get_accuracy_no_lock(parent));
3612 else if (parent)
3613 core->accuracy = parent->accuracy;
3614 else
3615 core->accuracy = 0;
3616
3617
3618
3619
3620
3621
3622 phase = clk_core_get_phase(core);
3623 if (phase < 0) {
3624 ret = phase;
3625 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3626 core->name);
3627 goto out;
3628 }
3629
3630
3631
3632
3633 clk_core_update_duty_cycle_nolock(core);
3634
3635
3636
3637
3638
3639
3640
3641 if (core->ops->recalc_rate)
3642 rate = core->ops->recalc_rate(core->hw,
3643 clk_core_get_rate_nolock(parent));
3644 else if (parent)
3645 rate = parent->rate;
3646 else
3647 rate = 0;
3648 core->rate = core->req_rate = rate;
3649
3650
3651
3652
3653
3654
3655 if (core->flags & CLK_IS_CRITICAL) {
3656 ret = clk_core_prepare(core);
3657 if (ret) {
3658 pr_warn("%s: critical clk '%s' failed to prepare\n",
3659 __func__, core->name);
3660 goto out;
3661 }
3662
3663 ret = clk_core_enable_lock(core);
3664 if (ret) {
3665 pr_warn("%s: critical clk '%s' failed to enable\n",
3666 __func__, core->name);
3667 clk_core_unprepare(core);
3668 goto out;
3669 }
3670 }
3671
3672 clk_core_reparent_orphans_nolock();
3673
3674
3675 kref_init(&core->ref);
3676 out:
3677 clk_pm_runtime_put(core);
3678 unlock:
3679 if (ret) {
3680 hlist_del_init(&core->child_node);
3681 core->hw->core = NULL;
3682 }
3683
3684 clk_prepare_unlock();
3685
3686 if (!ret)
3687 clk_debug_register(core);
3688
3689 return ret;
3690 }
3691
3692
3693
3694
3695
3696
3697 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3698 {
3699 clk_prepare_lock();
3700 hlist_add_head(&clk->clks_node, &core->clks);
3701 clk_prepare_unlock();
3702 }
3703
3704
3705
3706
3707
3708 static void clk_core_unlink_consumer(struct clk *clk)
3709 {
3710 lockdep_assert_held(&prepare_lock);
3711 hlist_del(&clk->clks_node);
3712 }
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3723 const char *con_id)
3724 {
3725 struct clk *clk;
3726
3727 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3728 if (!clk)
3729 return ERR_PTR(-ENOMEM);
3730
3731 clk->core = core;
3732 clk->dev_id = dev_id;
3733 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3734 clk->max_rate = ULONG_MAX;
3735
3736 return clk;
3737 }
3738
3739
3740
3741
3742
3743
3744
3745
3746 static void free_clk(struct clk *clk)
3747 {
3748 kfree_const(clk->con_id);
3749 kfree(clk);
3750 }
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3765 const char *dev_id, const char *con_id)
3766 {
3767 struct clk *clk;
3768 struct clk_core *core;
3769
3770
3771 if (IS_ERR_OR_NULL(hw))
3772 return ERR_CAST(hw);
3773
3774 core = hw->core;
3775 clk = alloc_clk(core, dev_id, con_id);
3776 if (IS_ERR(clk))
3777 return clk;
3778 clk->dev = dev;
3779
3780 if (!try_module_get(core->owner)) {
3781 free_clk(clk);
3782 return ERR_PTR(-ENOENT);
3783 }
3784
3785 kref_get(&core->ref);
3786 clk_core_link_consumer(core, clk);
3787
3788 return clk;
3789 }
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801 struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
3802 {
3803 struct device *dev = hw->core->dev;
3804 const char *name = dev ? dev_name(dev) : NULL;
3805
3806 return clk_hw_create_clk(dev, hw, name, con_id);
3807 }
3808 EXPORT_SYMBOL(clk_hw_get_clk);
3809
3810 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3811 {
3812 const char *dst;
3813
3814 if (!src) {
3815 if (must_exist)
3816 return -EINVAL;
3817 return 0;
3818 }
3819
3820 *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3821 if (!dst)
3822 return -ENOMEM;
3823
3824 return 0;
3825 }
3826
3827 static int clk_core_populate_parent_map(struct clk_core *core,
3828 const struct clk_init_data *init)
3829 {
3830 u8 num_parents = init->num_parents;
3831 const char * const *parent_names = init->parent_names;
3832 const struct clk_hw **parent_hws = init->parent_hws;
3833 const struct clk_parent_data *parent_data = init->parent_data;
3834 int i, ret = 0;
3835 struct clk_parent_map *parents, *parent;
3836
3837 if (!num_parents)
3838 return 0;
3839
3840
3841
3842
3843
3844 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3845 core->parents = parents;
3846 if (!parents)
3847 return -ENOMEM;
3848
3849
3850 for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3851 parent->index = -1;
3852 if (parent_names) {
3853
3854 WARN(!parent_names[i],
3855 "%s: invalid NULL in %s's .parent_names\n",
3856 __func__, core->name);
3857 ret = clk_cpy_name(&parent->name, parent_names[i],
3858 true);
3859 } else if (parent_data) {
3860 parent->hw = parent_data[i].hw;
3861 parent->index = parent_data[i].index;
3862 ret = clk_cpy_name(&parent->fw_name,
3863 parent_data[i].fw_name, false);
3864 if (!ret)
3865 ret = clk_cpy_name(&parent->name,
3866 parent_data[i].name,
3867 false);
3868 } else if (parent_hws) {
3869 parent->hw = parent_hws[i];
3870 } else {
3871 ret = -EINVAL;
3872 WARN(1, "Must specify parents if num_parents > 0\n");
3873 }
3874
3875 if (ret) {
3876 do {
3877 kfree_const(parents[i].name);
3878 kfree_const(parents[i].fw_name);
3879 } while (--i >= 0);
3880 kfree(parents);
3881
3882 return ret;
3883 }
3884 }
3885
3886 return 0;
3887 }
3888
3889 static void clk_core_free_parent_map(struct clk_core *core)
3890 {
3891 int i = core->num_parents;
3892
3893 if (!core->num_parents)
3894 return;
3895
3896 while (--i >= 0) {
3897 kfree_const(core->parents[i].name);
3898 kfree_const(core->parents[i].fw_name);
3899 }
3900
3901 kfree(core->parents);
3902 }
3903
3904 static struct clk *
3905 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3906 {
3907 int ret;
3908 struct clk_core *core;
3909 const struct clk_init_data *init = hw->init;
3910
3911
3912
3913
3914
3915
3916 hw->init = NULL;
3917
3918 core = kzalloc(sizeof(*core), GFP_KERNEL);
3919 if (!core) {
3920 ret = -ENOMEM;
3921 goto fail_out;
3922 }
3923
3924 core->name = kstrdup_const(init->name, GFP_KERNEL);
3925 if (!core->name) {
3926 ret = -ENOMEM;
3927 goto fail_name;
3928 }
3929
3930 if (WARN_ON(!init->ops)) {
3931 ret = -EINVAL;
3932 goto fail_ops;
3933 }
3934 core->ops = init->ops;
3935
3936 if (dev && pm_runtime_enabled(dev))
3937 core->rpm_enabled = true;
3938 core->dev = dev;
3939 core->of_node = np;
3940 if (dev && dev->driver)
3941 core->owner = dev->driver->owner;
3942 core->hw = hw;
3943 core->flags = init->flags;
3944 core->num_parents = init->num_parents;
3945 core->min_rate = 0;
3946 core->max_rate = ULONG_MAX;
3947
3948 ret = clk_core_populate_parent_map(core, init);
3949 if (ret)
3950 goto fail_parents;
3951
3952 INIT_HLIST_HEAD(&core->clks);
3953
3954
3955
3956
3957
3958 hw->clk = alloc_clk(core, NULL, NULL);
3959 if (IS_ERR(hw->clk)) {
3960 ret = PTR_ERR(hw->clk);
3961 goto fail_create_clk;
3962 }
3963
3964 clk_core_link_consumer(core, hw->clk);
3965
3966 ret = __clk_core_init(core);
3967 if (!ret)
3968 return hw->clk;
3969
3970 clk_prepare_lock();
3971 clk_core_unlink_consumer(hw->clk);
3972 clk_prepare_unlock();
3973
3974 free_clk(hw->clk);
3975 hw->clk = NULL;
3976
3977 fail_create_clk:
3978 clk_core_free_parent_map(core);
3979 fail_parents:
3980 fail_ops:
3981 kfree_const(core->name);
3982 fail_name:
3983 kfree(core);
3984 fail_out:
3985 return ERR_PTR(ret);
3986 }
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996 static struct device_node *dev_or_parent_of_node(struct device *dev)
3997 {
3998 struct device_node *np;
3999
4000 if (!dev)
4001 return NULL;
4002
4003 np = dev_of_node(dev);
4004 if (!np)
4005 np = dev_of_node(dev->parent);
4006
4007 return np;
4008 }
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
4024 {
4025 return __clk_register(dev, dev_or_parent_of_node(dev), hw);
4026 }
4027 EXPORT_SYMBOL_GPL(clk_register);
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039 int clk_hw_register(struct device *dev, struct clk_hw *hw)
4040 {
4041 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
4042 hw));
4043 }
4044 EXPORT_SYMBOL_GPL(clk_hw_register);
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
4058 {
4059 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
4060 }
4061 EXPORT_SYMBOL_GPL(of_clk_hw_register);
4062
4063
4064 static void __clk_release(struct kref *ref)
4065 {
4066 struct clk_core *core = container_of(ref, struct clk_core, ref);
4067
4068 lockdep_assert_held(&prepare_lock);
4069
4070 clk_core_free_parent_map(core);
4071 kfree_const(core->name);
4072 kfree(core);
4073 }
4074
4075
4076
4077
4078
4079
4080 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
4081 {
4082 return -ENXIO;
4083 }
4084
4085 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
4086 {
4087 WARN_ON_ONCE(1);
4088 }
4089
4090 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
4091 unsigned long parent_rate)
4092 {
4093 return -ENXIO;
4094 }
4095
4096 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
4097 {
4098 return -ENXIO;
4099 }
4100
4101 static const struct clk_ops clk_nodrv_ops = {
4102 .enable = clk_nodrv_prepare_enable,
4103 .disable = clk_nodrv_disable_unprepare,
4104 .prepare = clk_nodrv_prepare_enable,
4105 .unprepare = clk_nodrv_disable_unprepare,
4106 .set_rate = clk_nodrv_set_rate,
4107 .set_parent = clk_nodrv_set_parent,
4108 };
4109
4110 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
4111 const struct clk_core *target)
4112 {
4113 int i;
4114 struct clk_core *child;
4115
4116 for (i = 0; i < root->num_parents; i++)
4117 if (root->parents[i].core == target)
4118 root->parents[i].core = NULL;
4119
4120 hlist_for_each_entry(child, &root->children, child_node)
4121 clk_core_evict_parent_cache_subtree(child, target);
4122 }
4123
4124
4125 static void clk_core_evict_parent_cache(struct clk_core *core)
4126 {
4127 const struct hlist_head **lists;
4128 struct clk_core *root;
4129
4130 lockdep_assert_held(&prepare_lock);
4131
4132 for (lists = all_lists; *lists; lists++)
4133 hlist_for_each_entry(root, *lists, child_node)
4134 clk_core_evict_parent_cache_subtree(root, core);
4135
4136 }
4137
4138
4139
4140
4141
4142 void clk_unregister(struct clk *clk)
4143 {
4144 unsigned long flags;
4145 const struct clk_ops *ops;
4146
4147 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4148 return;
4149
4150 clk_debug_unregister(clk->core);
4151
4152 clk_prepare_lock();
4153
4154 ops = clk->core->ops;
4155 if (ops == &clk_nodrv_ops) {
4156 pr_err("%s: unregistered clock: %s\n", __func__,
4157 clk->core->name);
4158 goto unlock;
4159 }
4160
4161
4162
4163
4164 flags = clk_enable_lock();
4165 clk->core->ops = &clk_nodrv_ops;
4166 clk_enable_unlock(flags);
4167
4168 if (ops->terminate)
4169 ops->terminate(clk->core->hw);
4170
4171 if (!hlist_empty(&clk->core->children)) {
4172 struct clk_core *child;
4173 struct hlist_node *t;
4174
4175
4176 hlist_for_each_entry_safe(child, t, &clk->core->children,
4177 child_node)
4178 clk_core_set_parent_nolock(child, NULL);
4179 }
4180
4181 clk_core_evict_parent_cache(clk->core);
4182
4183 hlist_del_init(&clk->core->child_node);
4184
4185 if (clk->core->prepare_count)
4186 pr_warn("%s: unregistering prepared clock: %s\n",
4187 __func__, clk->core->name);
4188
4189 if (clk->core->protect_count)
4190 pr_warn("%s: unregistering protected clock: %s\n",
4191 __func__, clk->core->name);
4192
4193 kref_put(&clk->core->ref, __clk_release);
4194 free_clk(clk);
4195 unlock:
4196 clk_prepare_unlock();
4197 }
4198 EXPORT_SYMBOL_GPL(clk_unregister);
4199
4200
4201
4202
4203
4204 void clk_hw_unregister(struct clk_hw *hw)
4205 {
4206 clk_unregister(hw->clk);
4207 }
4208 EXPORT_SYMBOL_GPL(clk_hw_unregister);
4209
4210 static void devm_clk_unregister_cb(struct device *dev, void *res)
4211 {
4212 clk_unregister(*(struct clk **)res);
4213 }
4214
4215 static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
4216 {
4217 clk_hw_unregister(*(struct clk_hw **)res);
4218 }
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4231 {
4232 struct clk *clk;
4233 struct clk **clkp;
4234
4235 clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
4236 if (!clkp)
4237 return ERR_PTR(-ENOMEM);
4238
4239 clk = clk_register(dev, hw);
4240 if (!IS_ERR(clk)) {
4241 *clkp = clk;
4242 devres_add(dev, clkp);
4243 } else {
4244 devres_free(clkp);
4245 }
4246
4247 return clk;
4248 }
4249 EXPORT_SYMBOL_GPL(devm_clk_register);
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4261 {
4262 struct clk_hw **hwp;
4263 int ret;
4264
4265 hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
4266 if (!hwp)
4267 return -ENOMEM;
4268
4269 ret = clk_hw_register(dev, hw);
4270 if (!ret) {
4271 *hwp = hw;
4272 devres_add(dev, hwp);
4273 } else {
4274 devres_free(hwp);
4275 }
4276
4277 return ret;
4278 }
4279 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4280
4281 static void devm_clk_release(struct device *dev, void *res)
4282 {
4283 clk_put(*(struct clk **)res);
4284 }
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296 struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
4297 const char *con_id)
4298 {
4299 struct clk *clk;
4300 struct clk **clkp;
4301
4302
4303
4304
4305
4306 WARN_ON_ONCE(dev != hw->core->dev);
4307
4308 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4309 if (!clkp)
4310 return ERR_PTR(-ENOMEM);
4311
4312 clk = clk_hw_get_clk(hw, con_id);
4313 if (!IS_ERR(clk)) {
4314 *clkp = clk;
4315 devres_add(dev, clkp);
4316 } else {
4317 devres_free(clkp);
4318 }
4319
4320 return clk;
4321 }
4322 EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
4323
4324
4325
4326
4327
4328 void __clk_put(struct clk *clk)
4329 {
4330 struct module *owner;
4331
4332 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4333 return;
4334
4335 clk_prepare_lock();
4336
4337
4338
4339
4340
4341
4342 if (WARN_ON(clk->exclusive_count)) {
4343
4344 clk->core->protect_count -= (clk->exclusive_count - 1);
4345 clk_core_rate_unprotect(clk->core);
4346 clk->exclusive_count = 0;
4347 }
4348
4349 hlist_del(&clk->clks_node);
4350 if (clk->min_rate > clk->core->req_rate ||
4351 clk->max_rate < clk->core->req_rate)
4352 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4353
4354 owner = clk->core->owner;
4355 kref_put(&clk->core->ref, __clk_release);
4356
4357 clk_prepare_unlock();
4358
4359 module_put(owner);
4360
4361 free_clk(clk);
4362 }
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4387 {
4388 struct clk_notifier *cn;
4389 int ret = -ENOMEM;
4390
4391 if (!clk || !nb)
4392 return -EINVAL;
4393
4394 clk_prepare_lock();
4395
4396
4397 list_for_each_entry(cn, &clk_notifier_list, node)
4398 if (cn->clk == clk)
4399 goto found;
4400
4401
4402 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4403 if (!cn)
4404 goto out;
4405
4406 cn->clk = clk;
4407 srcu_init_notifier_head(&cn->notifier_head);
4408
4409 list_add(&cn->node, &clk_notifier_list);
4410
4411 found:
4412 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4413
4414 clk->core->notifier_count++;
4415
4416 out:
4417 clk_prepare_unlock();
4418
4419 return ret;
4420 }
4421 EXPORT_SYMBOL_GPL(clk_notifier_register);
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4435 {
4436 struct clk_notifier *cn;
4437 int ret = -ENOENT;
4438
4439 if (!clk || !nb)
4440 return -EINVAL;
4441
4442 clk_prepare_lock();
4443
4444 list_for_each_entry(cn, &clk_notifier_list, node) {
4445 if (cn->clk == clk) {
4446 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4447
4448 clk->core->notifier_count--;
4449
4450
4451 if (!cn->notifier_head.head) {
4452 srcu_cleanup_notifier_head(&cn->notifier_head);
4453 list_del(&cn->node);
4454 kfree(cn);
4455 }
4456 break;
4457 }
4458 }
4459
4460 clk_prepare_unlock();
4461
4462 return ret;
4463 }
4464 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4465
4466 struct clk_notifier_devres {
4467 struct clk *clk;
4468 struct notifier_block *nb;
4469 };
4470
4471 static void devm_clk_notifier_release(struct device *dev, void *res)
4472 {
4473 struct clk_notifier_devres *devres = res;
4474
4475 clk_notifier_unregister(devres->clk, devres->nb);
4476 }
4477
4478 int devm_clk_notifier_register(struct device *dev, struct clk *clk,
4479 struct notifier_block *nb)
4480 {
4481 struct clk_notifier_devres *devres;
4482 int ret;
4483
4484 devres = devres_alloc(devm_clk_notifier_release,
4485 sizeof(*devres), GFP_KERNEL);
4486
4487 if (!devres)
4488 return -ENOMEM;
4489
4490 ret = clk_notifier_register(clk, nb);
4491 if (!ret) {
4492 devres->clk = clk;
4493 devres->nb = nb;
4494 } else {
4495 devres_free(devres);
4496 }
4497
4498 return ret;
4499 }
4500 EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
4501
4502 #ifdef CONFIG_OF
4503 static void clk_core_reparent_orphans(void)
4504 {
4505 clk_prepare_lock();
4506 clk_core_reparent_orphans_nolock();
4507 clk_prepare_unlock();
4508 }
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520 struct of_clk_provider {
4521 struct list_head link;
4522
4523 struct device_node *node;
4524 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4525 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4526 void *data;
4527 };
4528
4529 extern struct of_device_id __clk_of_table;
4530 static const struct of_device_id __clk_of_table_sentinel
4531 __used __section("__clk_of_table_end");
4532
4533 static LIST_HEAD(of_clk_providers);
4534 static DEFINE_MUTEX(of_clk_mutex);
4535
4536 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4537 void *data)
4538 {
4539 return data;
4540 }
4541 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4542
4543 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4544 {
4545 return data;
4546 }
4547 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4548
4549 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4550 {
4551 struct clk_onecell_data *clk_data = data;
4552 unsigned int idx = clkspec->args[0];
4553
4554 if (idx >= clk_data->clk_num) {
4555 pr_err("%s: invalid clock index %u\n", __func__, idx);
4556 return ERR_PTR(-EINVAL);
4557 }
4558
4559 return clk_data->clks[idx];
4560 }
4561 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4562
4563 struct clk_hw *
4564 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4565 {
4566 struct clk_hw_onecell_data *hw_data = data;
4567 unsigned int idx = clkspec->args[0];
4568
4569 if (idx >= hw_data->num) {
4570 pr_err("%s: invalid index %u\n", __func__, idx);
4571 return ERR_PTR(-EINVAL);
4572 }
4573
4574 return hw_data->hws[idx];
4575 }
4576 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586 int of_clk_add_provider(struct device_node *np,
4587 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4588 void *data),
4589 void *data)
4590 {
4591 struct of_clk_provider *cp;
4592 int ret;
4593
4594 if (!np)
4595 return 0;
4596
4597 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4598 if (!cp)
4599 return -ENOMEM;
4600
4601 cp->node = of_node_get(np);
4602 cp->data = data;
4603 cp->get = clk_src_get;
4604
4605 mutex_lock(&of_clk_mutex);
4606 list_add(&cp->link, &of_clk_providers);
4607 mutex_unlock(&of_clk_mutex);
4608 pr_debug("Added clock from %pOF\n", np);
4609
4610 clk_core_reparent_orphans();
4611
4612 ret = of_clk_set_defaults(np, true);
4613 if (ret < 0)
4614 of_clk_del_provider(np);
4615
4616 fwnode_dev_initialized(&np->fwnode, true);
4617
4618 return ret;
4619 }
4620 EXPORT_SYMBOL_GPL(of_clk_add_provider);
4621
4622
4623
4624
4625
4626
4627
4628 int of_clk_add_hw_provider(struct device_node *np,
4629 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4630 void *data),
4631 void *data)
4632 {
4633 struct of_clk_provider *cp;
4634 int ret;
4635
4636 if (!np)
4637 return 0;
4638
4639 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4640 if (!cp)
4641 return -ENOMEM;
4642
4643 cp->node = of_node_get(np);
4644 cp->data = data;
4645 cp->get_hw = get;
4646
4647 mutex_lock(&of_clk_mutex);
4648 list_add(&cp->link, &of_clk_providers);
4649 mutex_unlock(&of_clk_mutex);
4650 pr_debug("Added clk_hw provider from %pOF\n", np);
4651
4652 clk_core_reparent_orphans();
4653
4654 ret = of_clk_set_defaults(np, true);
4655 if (ret < 0)
4656 of_clk_del_provider(np);
4657
4658 fwnode_dev_initialized(&np->fwnode, true);
4659
4660 return ret;
4661 }
4662 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4663
4664 static void devm_of_clk_release_provider(struct device *dev, void *res)
4665 {
4666 of_clk_del_provider(*(struct device_node **)res);
4667 }
4668
4669
4670
4671
4672
4673
4674 static struct device_node *get_clk_provider_node(struct device *dev)
4675 {
4676 struct device_node *np, *parent_np;
4677
4678 np = dev->of_node;
4679 parent_np = dev->parent ? dev->parent->of_node : NULL;
4680
4681 if (!of_find_property(np, "#clock-cells", NULL))
4682 if (of_find_property(parent_np, "#clock-cells", NULL))
4683 np = parent_np;
4684
4685 return np;
4686 }
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702 int devm_of_clk_add_hw_provider(struct device *dev,
4703 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4704 void *data),
4705 void *data)
4706 {
4707 struct device_node **ptr, *np;
4708 int ret;
4709
4710 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4711 GFP_KERNEL);
4712 if (!ptr)
4713 return -ENOMEM;
4714
4715 np = get_clk_provider_node(dev);
4716 ret = of_clk_add_hw_provider(np, get, data);
4717 if (!ret) {
4718 *ptr = np;
4719 devres_add(dev, ptr);
4720 } else {
4721 devres_free(ptr);
4722 }
4723
4724 return ret;
4725 }
4726 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4727
4728
4729
4730
4731
4732 void of_clk_del_provider(struct device_node *np)
4733 {
4734 struct of_clk_provider *cp;
4735
4736 if (!np)
4737 return;
4738
4739 mutex_lock(&of_clk_mutex);
4740 list_for_each_entry(cp, &of_clk_providers, link) {
4741 if (cp->node == np) {
4742 list_del(&cp->link);
4743 fwnode_dev_initialized(&np->fwnode, false);
4744 of_node_put(cp->node);
4745 kfree(cp);
4746 break;
4747 }
4748 }
4749 mutex_unlock(&of_clk_mutex);
4750 }
4751 EXPORT_SYMBOL_GPL(of_clk_del_provider);
4752
4753 static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4754 {
4755 struct device_node **np = res;
4756
4757 if (WARN_ON(!np || !*np))
4758 return 0;
4759
4760 return *np == data;
4761 }
4762
4763
4764
4765
4766
4767 void devm_of_clk_del_provider(struct device *dev)
4768 {
4769 int ret;
4770 struct device_node *np = get_clk_provider_node(dev);
4771
4772 ret = devres_release(dev, devm_of_clk_release_provider,
4773 devm_clk_provider_match, np);
4774
4775 WARN_ON(ret);
4776 }
4777 EXPORT_SYMBOL(devm_of_clk_del_provider);
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817 static int of_parse_clkspec(const struct device_node *np, int index,
4818 const char *name, struct of_phandle_args *out_args)
4819 {
4820 int ret = -ENOENT;
4821
4822
4823 while (np) {
4824
4825
4826
4827
4828
4829
4830 if (name)
4831 index = of_property_match_string(np, "clock-names", name);
4832 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4833 index, out_args);
4834 if (!ret)
4835 break;
4836 if (name && index >= 0)
4837 break;
4838
4839
4840
4841
4842
4843
4844 np = np->parent;
4845 if (np && !of_get_property(np, "clock-ranges", NULL))
4846 break;
4847 index = 0;
4848 }
4849
4850 return ret;
4851 }
4852
4853 static struct clk_hw *
4854 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4855 struct of_phandle_args *clkspec)
4856 {
4857 struct clk *clk;
4858
4859 if (provider->get_hw)
4860 return provider->get_hw(clkspec, provider->data);
4861
4862 clk = provider->get(clkspec, provider->data);
4863 if (IS_ERR(clk))
4864 return ERR_CAST(clk);
4865 return __clk_get_hw(clk);
4866 }
4867
4868 static struct clk_hw *
4869 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4870 {
4871 struct of_clk_provider *provider;
4872 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4873
4874 if (!clkspec)
4875 return ERR_PTR(-EINVAL);
4876
4877 mutex_lock(&of_clk_mutex);
4878 list_for_each_entry(provider, &of_clk_providers, link) {
4879 if (provider->node == clkspec->np) {
4880 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4881 if (!IS_ERR(hw))
4882 break;
4883 }
4884 }
4885 mutex_unlock(&of_clk_mutex);
4886
4887 return hw;
4888 }
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4899 {
4900 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4901
4902 return clk_hw_create_clk(NULL, hw, NULL, __func__);
4903 }
4904 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4905
4906 struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4907 const char *con_id)
4908 {
4909 int ret;
4910 struct clk_hw *hw;
4911 struct of_phandle_args clkspec;
4912
4913 ret = of_parse_clkspec(np, index, con_id, &clkspec);
4914 if (ret)
4915 return ERR_PTR(ret);
4916
4917 hw = of_clk_get_hw_from_clkspec(&clkspec);
4918 of_node_put(clkspec.np);
4919
4920 return hw;
4921 }
4922
4923 static struct clk *__of_clk_get(struct device_node *np,
4924 int index, const char *dev_id,
4925 const char *con_id)
4926 {
4927 struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4928
4929 return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4930 }
4931
4932 struct clk *of_clk_get(struct device_node *np, int index)
4933 {
4934 return __of_clk_get(np, index, np->full_name, NULL);
4935 }
4936 EXPORT_SYMBOL(of_clk_get);
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4948 {
4949 if (!np)
4950 return ERR_PTR(-ENOENT);
4951
4952 return __of_clk_get(np, 0, np->full_name, name);
4953 }
4954 EXPORT_SYMBOL(of_clk_get_by_name);
4955
4956
4957
4958
4959
4960
4961
4962 unsigned int of_clk_get_parent_count(const struct device_node *np)
4963 {
4964 int count;
4965
4966 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4967 if (count < 0)
4968 return 0;
4969
4970 return count;
4971 }
4972 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4973
4974 const char *of_clk_get_parent_name(const struct device_node *np, int index)
4975 {
4976 struct of_phandle_args clkspec;
4977 struct property *prop;
4978 const char *clk_name;
4979 const __be32 *vp;
4980 u32 pv;
4981 int rc;
4982 int count;
4983 struct clk *clk;
4984
4985 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4986 &clkspec);
4987 if (rc)
4988 return NULL;
4989
4990 index = clkspec.args_count ? clkspec.args[0] : 0;
4991 count = 0;
4992
4993
4994
4995
4996 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4997 if (index == pv) {
4998 index = count;
4999 break;
5000 }
5001 count++;
5002 }
5003
5004 if (prop && !vp)
5005 return NULL;
5006
5007 if (of_property_read_string_index(clkspec.np, "clock-output-names",
5008 index,
5009 &clk_name) < 0) {
5010
5011
5012
5013
5014
5015
5016 clk = of_clk_get_from_provider(&clkspec);
5017 if (IS_ERR(clk)) {
5018 if (clkspec.args_count == 0)
5019 clk_name = clkspec.np->name;
5020 else
5021 clk_name = NULL;
5022 } else {
5023 clk_name = __clk_get_name(clk);
5024 clk_put(clk);
5025 }
5026 }
5027
5028
5029 of_node_put(clkspec.np);
5030 return clk_name;
5031 }
5032 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043 int of_clk_parent_fill(struct device_node *np, const char **parents,
5044 unsigned int size)
5045 {
5046 unsigned int i = 0;
5047
5048 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
5049 i++;
5050
5051 return i;
5052 }
5053 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
5054
5055 struct clock_provider {
5056 void (*clk_init_cb)(struct device_node *);
5057 struct device_node *np;
5058 struct list_head node;
5059 };
5060
5061
5062
5063
5064
5065
5066 static int parent_ready(struct device_node *np)
5067 {
5068 int i = 0;
5069
5070 while (true) {
5071 struct clk *clk = of_clk_get(np, i);
5072
5073
5074 if (!IS_ERR(clk)) {
5075 clk_put(clk);
5076 i++;
5077 continue;
5078 }
5079
5080
5081 if (PTR_ERR(clk) == -EPROBE_DEFER)
5082 return 0;
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092 return 1;
5093 }
5094 }
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114 int of_clk_detect_critical(struct device_node *np, int index,
5115 unsigned long *flags)
5116 {
5117 struct property *prop;
5118 const __be32 *cur;
5119 uint32_t idx;
5120
5121 if (!np || !flags)
5122 return -EINVAL;
5123
5124 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
5125 if (index == idx)
5126 *flags |= CLK_IS_CRITICAL;
5127
5128 return 0;
5129 }
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139 void __init of_clk_init(const struct of_device_id *matches)
5140 {
5141 const struct of_device_id *match;
5142 struct device_node *np;
5143 struct clock_provider *clk_provider, *next;
5144 bool is_init_done;
5145 bool force = false;
5146 LIST_HEAD(clk_provider_list);
5147
5148 if (!matches)
5149 matches = &__clk_of_table;
5150
5151
5152 for_each_matching_node_and_match(np, matches, &match) {
5153 struct clock_provider *parent;
5154
5155 if (!of_device_is_available(np))
5156 continue;
5157
5158 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
5159 if (!parent) {
5160 list_for_each_entry_safe(clk_provider, next,
5161 &clk_provider_list, node) {
5162 list_del(&clk_provider->node);
5163 of_node_put(clk_provider->np);
5164 kfree(clk_provider);
5165 }
5166 of_node_put(np);
5167 return;
5168 }
5169
5170 parent->clk_init_cb = match->data;
5171 parent->np = of_node_get(np);
5172 list_add_tail(&parent->node, &clk_provider_list);
5173 }
5174
5175 while (!list_empty(&clk_provider_list)) {
5176 is_init_done = false;
5177 list_for_each_entry_safe(clk_provider, next,
5178 &clk_provider_list, node) {
5179 if (force || parent_ready(clk_provider->np)) {
5180
5181
5182 of_node_set_flag(clk_provider->np,
5183 OF_POPULATED);
5184
5185 clk_provider->clk_init_cb(clk_provider->np);
5186 of_clk_set_defaults(clk_provider->np, true);
5187
5188 list_del(&clk_provider->node);
5189 of_node_put(clk_provider->np);
5190 kfree(clk_provider);
5191 is_init_done = true;
5192 }
5193 }
5194
5195
5196
5197
5198
5199
5200
5201 if (!is_init_done)
5202 force = true;
5203 }
5204 }
5205 #endif