0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) "clock: " fmt
0018
0019 #include <linux/kernel.h>
0020 #include <linux/init.h>
0021 #include <linux/module.h>
0022 #include <linux/mutex.h>
0023 #include <linux/list.h>
0024 #include <linux/syscore_ops.h>
0025 #include <linux/seq_file.h>
0026 #include <linux/err.h>
0027 #include <linux/io.h>
0028 #include <linux/cpufreq.h>
0029 #include <linux/clk.h>
0030 #include <linux/sh_clk.h>
0031
0032 static LIST_HEAD(clock_list);
0033 static DEFINE_SPINLOCK(clock_lock);
0034 static DEFINE_MUTEX(clock_list_sem);
0035
0036
0037 static int allow_disable;
0038
0039 void clk_rate_table_build(struct clk *clk,
0040 struct cpufreq_frequency_table *freq_table,
0041 int nr_freqs,
0042 struct clk_div_mult_table *src_table,
0043 unsigned long *bitmap)
0044 {
0045 unsigned long mult, div;
0046 unsigned long freq;
0047 int i;
0048
0049 clk->nr_freqs = nr_freqs;
0050
0051 for (i = 0; i < nr_freqs; i++) {
0052 div = 1;
0053 mult = 1;
0054
0055 if (src_table->divisors && i < src_table->nr_divisors)
0056 div = src_table->divisors[i];
0057
0058 if (src_table->multipliers && i < src_table->nr_multipliers)
0059 mult = src_table->multipliers[i];
0060
0061 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
0062 freq = CPUFREQ_ENTRY_INVALID;
0063 else
0064 freq = clk->parent->rate * mult / div;
0065
0066 freq_table[i].driver_data = i;
0067 freq_table[i].frequency = freq;
0068 }
0069
0070
0071 freq_table[i].driver_data = i;
0072 freq_table[i].frequency = CPUFREQ_TABLE_END;
0073 }
0074
0075 struct clk_rate_round_data;
0076
0077 struct clk_rate_round_data {
0078 unsigned long rate;
0079 unsigned int min, max;
0080 long (*func)(unsigned int, struct clk_rate_round_data *);
0081 void *arg;
0082 };
0083
0084 #define for_each_frequency(pos, r, freq) \
0085 for (pos = r->min, freq = r->func(pos, r); \
0086 pos <= r->max; pos++, freq = r->func(pos, r)) \
0087 if (unlikely(freq == 0)) \
0088 ; \
0089 else
0090
0091 static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
0092 {
0093 unsigned long rate_error, rate_error_prev = ~0UL;
0094 unsigned long highest, lowest, freq;
0095 long rate_best_fit = -ENOENT;
0096 int i;
0097
0098 highest = 0;
0099 lowest = ~0UL;
0100
0101 for_each_frequency(i, rounder, freq) {
0102 if (freq > highest)
0103 highest = freq;
0104 if (freq < lowest)
0105 lowest = freq;
0106
0107 rate_error = abs(freq - rounder->rate);
0108 if (rate_error < rate_error_prev) {
0109 rate_best_fit = freq;
0110 rate_error_prev = rate_error;
0111 }
0112
0113 if (rate_error == 0)
0114 break;
0115 }
0116
0117 if (rounder->rate >= highest)
0118 rate_best_fit = highest;
0119 if (rounder->rate <= lowest)
0120 rate_best_fit = lowest;
0121
0122 return rate_best_fit;
0123 }
0124
0125 static long clk_rate_table_iter(unsigned int pos,
0126 struct clk_rate_round_data *rounder)
0127 {
0128 struct cpufreq_frequency_table *freq_table = rounder->arg;
0129 unsigned long freq = freq_table[pos].frequency;
0130
0131 if (freq == CPUFREQ_ENTRY_INVALID)
0132 freq = 0;
0133
0134 return freq;
0135 }
0136
0137 long clk_rate_table_round(struct clk *clk,
0138 struct cpufreq_frequency_table *freq_table,
0139 unsigned long rate)
0140 {
0141 struct clk_rate_round_data table_round = {
0142 .min = 0,
0143 .max = clk->nr_freqs - 1,
0144 .func = clk_rate_table_iter,
0145 .arg = freq_table,
0146 .rate = rate,
0147 };
0148
0149 if (clk->nr_freqs < 1)
0150 return -ENOSYS;
0151
0152 return clk_rate_round_helper(&table_round);
0153 }
0154
0155 static long clk_rate_div_range_iter(unsigned int pos,
0156 struct clk_rate_round_data *rounder)
0157 {
0158 return clk_get_rate(rounder->arg) / pos;
0159 }
0160
0161 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
0162 unsigned int div_max, unsigned long rate)
0163 {
0164 struct clk_rate_round_data div_range_round = {
0165 .min = div_min,
0166 .max = div_max,
0167 .func = clk_rate_div_range_iter,
0168 .arg = clk_get_parent(clk),
0169 .rate = rate,
0170 };
0171
0172 return clk_rate_round_helper(&div_range_round);
0173 }
0174
0175 static long clk_rate_mult_range_iter(unsigned int pos,
0176 struct clk_rate_round_data *rounder)
0177 {
0178 return clk_get_rate(rounder->arg) * pos;
0179 }
0180
0181 long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
0182 unsigned int mult_max, unsigned long rate)
0183 {
0184 struct clk_rate_round_data mult_range_round = {
0185 .min = mult_min,
0186 .max = mult_max,
0187 .func = clk_rate_mult_range_iter,
0188 .arg = clk_get_parent(clk),
0189 .rate = rate,
0190 };
0191
0192 return clk_rate_round_helper(&mult_range_round);
0193 }
0194
0195 int clk_rate_table_find(struct clk *clk,
0196 struct cpufreq_frequency_table *freq_table,
0197 unsigned long rate)
0198 {
0199 struct cpufreq_frequency_table *pos;
0200 int idx;
0201
0202 cpufreq_for_each_valid_entry_idx(pos, freq_table, idx)
0203 if (pos->frequency == rate)
0204 return idx;
0205
0206 return -ENOENT;
0207 }
0208
0209
0210 unsigned long followparent_recalc(struct clk *clk)
0211 {
0212 return clk->parent ? clk->parent->rate : 0;
0213 }
0214
0215 int clk_reparent(struct clk *child, struct clk *parent)
0216 {
0217 list_del_init(&child->sibling);
0218 if (parent)
0219 list_add(&child->sibling, &parent->children);
0220 child->parent = parent;
0221
0222 return 0;
0223 }
0224
0225
0226 void propagate_rate(struct clk *tclk)
0227 {
0228 struct clk *clkp;
0229
0230 list_for_each_entry(clkp, &tclk->children, sibling) {
0231 if (clkp->ops && clkp->ops->recalc)
0232 clkp->rate = clkp->ops->recalc(clkp);
0233
0234 propagate_rate(clkp);
0235 }
0236 }
0237
0238 static void __clk_disable(struct clk *clk)
0239 {
0240 if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
0241 clk))
0242 return;
0243
0244 if (!(--clk->usecount)) {
0245 if (likely(allow_disable && clk->ops && clk->ops->disable))
0246 clk->ops->disable(clk);
0247 if (likely(clk->parent))
0248 __clk_disable(clk->parent);
0249 }
0250 }
0251
0252 void clk_disable(struct clk *clk)
0253 {
0254 unsigned long flags;
0255
0256 if (!clk)
0257 return;
0258
0259 spin_lock_irqsave(&clock_lock, flags);
0260 __clk_disable(clk);
0261 spin_unlock_irqrestore(&clock_lock, flags);
0262 }
0263 EXPORT_SYMBOL_GPL(clk_disable);
0264
0265 static int __clk_enable(struct clk *clk)
0266 {
0267 int ret = 0;
0268
0269 if (clk->usecount++ == 0) {
0270 if (clk->parent) {
0271 ret = __clk_enable(clk->parent);
0272 if (unlikely(ret))
0273 goto err;
0274 }
0275
0276 if (clk->ops && clk->ops->enable) {
0277 ret = clk->ops->enable(clk);
0278 if (ret) {
0279 if (clk->parent)
0280 __clk_disable(clk->parent);
0281 goto err;
0282 }
0283 }
0284 }
0285
0286 return ret;
0287 err:
0288 clk->usecount--;
0289 return ret;
0290 }
0291
0292 int clk_enable(struct clk *clk)
0293 {
0294 unsigned long flags;
0295 int ret;
0296
0297 if (!clk)
0298 return -EINVAL;
0299
0300 spin_lock_irqsave(&clock_lock, flags);
0301 ret = __clk_enable(clk);
0302 spin_unlock_irqrestore(&clock_lock, flags);
0303
0304 return ret;
0305 }
0306 EXPORT_SYMBOL_GPL(clk_enable);
0307
0308 static LIST_HEAD(root_clks);
0309
0310
0311
0312
0313
0314
0315
0316
0317 void recalculate_root_clocks(void)
0318 {
0319 struct clk *clkp;
0320
0321 list_for_each_entry(clkp, &root_clks, sibling) {
0322 if (clkp->ops && clkp->ops->recalc)
0323 clkp->rate = clkp->ops->recalc(clkp);
0324 propagate_rate(clkp);
0325 }
0326 }
0327
0328 static struct clk_mapping dummy_mapping;
0329
0330 static struct clk *lookup_root_clock(struct clk *clk)
0331 {
0332 while (clk->parent)
0333 clk = clk->parent;
0334
0335 return clk;
0336 }
0337
0338 static int clk_establish_mapping(struct clk *clk)
0339 {
0340 struct clk_mapping *mapping = clk->mapping;
0341
0342
0343
0344
0345 if (!mapping) {
0346 struct clk *clkp;
0347
0348
0349
0350
0351 if (!clk->parent) {
0352 clk->mapping = &dummy_mapping;
0353 goto out;
0354 }
0355
0356
0357
0358
0359
0360 clkp = lookup_root_clock(clk);
0361 mapping = clkp->mapping;
0362 BUG_ON(!mapping);
0363 }
0364
0365
0366
0367
0368 if (!mapping->base && mapping->phys) {
0369 kref_init(&mapping->ref);
0370
0371 mapping->base = ioremap(mapping->phys, mapping->len);
0372 if (unlikely(!mapping->base))
0373 return -ENXIO;
0374 } else if (mapping->base) {
0375
0376
0377
0378 kref_get(&mapping->ref);
0379 }
0380
0381 clk->mapping = mapping;
0382 out:
0383 clk->mapped_reg = clk->mapping->base;
0384 clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
0385 return 0;
0386 }
0387
0388 static void clk_destroy_mapping(struct kref *kref)
0389 {
0390 struct clk_mapping *mapping;
0391
0392 mapping = container_of(kref, struct clk_mapping, ref);
0393
0394 iounmap(mapping->base);
0395 }
0396
0397 static void clk_teardown_mapping(struct clk *clk)
0398 {
0399 struct clk_mapping *mapping = clk->mapping;
0400
0401
0402 if (mapping == &dummy_mapping)
0403 goto out;
0404
0405 kref_put(&mapping->ref, clk_destroy_mapping);
0406 clk->mapping = NULL;
0407 out:
0408 clk->mapped_reg = NULL;
0409 }
0410
0411 int clk_register(struct clk *clk)
0412 {
0413 int ret;
0414
0415 if (IS_ERR_OR_NULL(clk))
0416 return -EINVAL;
0417
0418
0419
0420
0421 if (clk->node.next || clk->node.prev)
0422 return 0;
0423
0424 mutex_lock(&clock_list_sem);
0425
0426 INIT_LIST_HEAD(&clk->children);
0427 clk->usecount = 0;
0428
0429 ret = clk_establish_mapping(clk);
0430 if (unlikely(ret))
0431 goto out_unlock;
0432
0433 if (clk->parent)
0434 list_add(&clk->sibling, &clk->parent->children);
0435 else
0436 list_add(&clk->sibling, &root_clks);
0437
0438 list_add(&clk->node, &clock_list);
0439
0440 #ifdef CONFIG_SH_CLK_CPG_LEGACY
0441 if (clk->ops && clk->ops->init)
0442 clk->ops->init(clk);
0443 #endif
0444
0445 out_unlock:
0446 mutex_unlock(&clock_list_sem);
0447
0448 return ret;
0449 }
0450 EXPORT_SYMBOL_GPL(clk_register);
0451
0452 void clk_unregister(struct clk *clk)
0453 {
0454 mutex_lock(&clock_list_sem);
0455 list_del(&clk->sibling);
0456 list_del(&clk->node);
0457 clk_teardown_mapping(clk);
0458 mutex_unlock(&clock_list_sem);
0459 }
0460 EXPORT_SYMBOL_GPL(clk_unregister);
0461
0462 void clk_enable_init_clocks(void)
0463 {
0464 struct clk *clkp;
0465
0466 list_for_each_entry(clkp, &clock_list, node)
0467 if (clkp->flags & CLK_ENABLE_ON_INIT)
0468 clk_enable(clkp);
0469 }
0470
0471 unsigned long clk_get_rate(struct clk *clk)
0472 {
0473 if (!clk)
0474 return 0;
0475
0476 return clk->rate;
0477 }
0478 EXPORT_SYMBOL_GPL(clk_get_rate);
0479
0480 int clk_set_rate(struct clk *clk, unsigned long rate)
0481 {
0482 int ret = -EOPNOTSUPP;
0483 unsigned long flags;
0484
0485 if (!clk)
0486 return 0;
0487
0488 spin_lock_irqsave(&clock_lock, flags);
0489
0490 if (likely(clk->ops && clk->ops->set_rate)) {
0491 ret = clk->ops->set_rate(clk, rate);
0492 if (ret != 0)
0493 goto out_unlock;
0494 } else {
0495 clk->rate = rate;
0496 ret = 0;
0497 }
0498
0499 if (clk->ops && clk->ops->recalc)
0500 clk->rate = clk->ops->recalc(clk);
0501
0502 propagate_rate(clk);
0503
0504 out_unlock:
0505 spin_unlock_irqrestore(&clock_lock, flags);
0506
0507 return ret;
0508 }
0509 EXPORT_SYMBOL_GPL(clk_set_rate);
0510
0511 int clk_set_parent(struct clk *clk, struct clk *parent)
0512 {
0513 unsigned long flags;
0514 int ret = -EINVAL;
0515
0516 if (!parent || !clk)
0517 return ret;
0518 if (clk->parent == parent)
0519 return 0;
0520
0521 spin_lock_irqsave(&clock_lock, flags);
0522 if (clk->usecount == 0) {
0523 if (clk->ops->set_parent)
0524 ret = clk->ops->set_parent(clk, parent);
0525 else
0526 ret = clk_reparent(clk, parent);
0527
0528 if (ret == 0) {
0529 if (clk->ops->recalc)
0530 clk->rate = clk->ops->recalc(clk);
0531 pr_debug("set parent of %p to %p (new rate %ld)\n",
0532 clk, clk->parent, clk->rate);
0533 propagate_rate(clk);
0534 }
0535 } else
0536 ret = -EBUSY;
0537 spin_unlock_irqrestore(&clock_lock, flags);
0538
0539 return ret;
0540 }
0541 EXPORT_SYMBOL_GPL(clk_set_parent);
0542
0543 struct clk *clk_get_parent(struct clk *clk)
0544 {
0545 if (!clk)
0546 return NULL;
0547
0548 return clk->parent;
0549 }
0550 EXPORT_SYMBOL_GPL(clk_get_parent);
0551
0552 long clk_round_rate(struct clk *clk, unsigned long rate)
0553 {
0554 if (!clk)
0555 return 0;
0556
0557 if (likely(clk->ops && clk->ops->round_rate)) {
0558 unsigned long flags, rounded;
0559
0560 spin_lock_irqsave(&clock_lock, flags);
0561 rounded = clk->ops->round_rate(clk, rate);
0562 spin_unlock_irqrestore(&clock_lock, flags);
0563
0564 return rounded;
0565 }
0566
0567 return clk_get_rate(clk);
0568 }
0569 EXPORT_SYMBOL_GPL(clk_round_rate);
0570
0571 #ifdef CONFIG_PM
0572 static void clks_core_resume(void)
0573 {
0574 struct clk *clkp;
0575
0576 list_for_each_entry(clkp, &clock_list, node) {
0577 if (likely(clkp->usecount && clkp->ops)) {
0578 unsigned long rate = clkp->rate;
0579
0580 if (likely(clkp->ops->set_parent))
0581 clkp->ops->set_parent(clkp,
0582 clkp->parent);
0583 if (likely(clkp->ops->set_rate))
0584 clkp->ops->set_rate(clkp, rate);
0585 else if (likely(clkp->ops->recalc))
0586 clkp->rate = clkp->ops->recalc(clkp);
0587 }
0588 }
0589 }
0590
0591 static struct syscore_ops clks_syscore_ops = {
0592 .resume = clks_core_resume,
0593 };
0594
0595 static int __init clk_syscore_init(void)
0596 {
0597 register_syscore_ops(&clks_syscore_ops);
0598
0599 return 0;
0600 }
0601 subsys_initcall(clk_syscore_init);
0602 #endif
0603
0604 static int __init clk_late_init(void)
0605 {
0606 unsigned long flags;
0607 struct clk *clk;
0608
0609
0610 mutex_lock(&clock_list_sem);
0611 spin_lock_irqsave(&clock_lock, flags);
0612
0613 list_for_each_entry(clk, &clock_list, node)
0614 if (!clk->usecount && clk->ops && clk->ops->disable)
0615 clk->ops->disable(clk);
0616
0617
0618 allow_disable = 1;
0619
0620 spin_unlock_irqrestore(&clock_lock, flags);
0621 mutex_unlock(&clock_list_sem);
0622 return 0;
0623 }
0624 late_initcall(clk_late_init);