0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/kernel.h>
0019 #include <linux/device.h>
0020 #include <linux/list.h>
0021 #include <linux/errno.h>
0022 #include <linux/delay.h>
0023 #include <linux/clk.h>
0024 #include <linux/io.h>
0025 #include <linux/bitops.h>
0026 #include <linux/clkdev.h>
0027 #include <linux/clk/ti.h>
0028
0029 #include "clock.h"
0030
0031
0032 #define DPLL_AUTOIDLE_DISABLE 0x0
0033 #define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
0034
0035 #define MAX_DPLL_WAIT_TRIES 1000000
0036
0037 #define OMAP3XXX_EN_DPLL_LOCKED 0x7
0038
0039
0040 static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
0041 static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
0042 static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
0043
0044
0045
0046
0047 static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
0048 {
0049 const struct dpll_data *dd;
0050 u32 v;
0051
0052 dd = clk->dpll_data;
0053
0054 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
0055 v &= ~dd->enable_mask;
0056 v |= clken_bits << __ffs(dd->enable_mask);
0057 ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
0058 }
0059
0060
0061 static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
0062 {
0063 const struct dpll_data *dd;
0064 int i = 0;
0065 int ret = -EINVAL;
0066 const char *clk_name;
0067
0068 dd = clk->dpll_data;
0069 clk_name = clk_hw_get_name(&clk->hw);
0070
0071 state <<= __ffs(dd->idlest_mask);
0072
0073 while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask)
0074 != state) && i < MAX_DPLL_WAIT_TRIES) {
0075 i++;
0076 udelay(1);
0077 }
0078
0079 if (i == MAX_DPLL_WAIT_TRIES) {
0080 pr_err("clock: %s failed transition to '%s'\n",
0081 clk_name, (state) ? "locked" : "bypassed");
0082 } else {
0083 pr_debug("clock: %s transition to '%s' in %d loops\n",
0084 clk_name, (state) ? "locked" : "bypassed", i);
0085
0086 ret = 0;
0087 }
0088
0089 return ret;
0090 }
0091
0092
0093 static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
0094 {
0095 unsigned long fint;
0096 u16 f = 0;
0097
0098 fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
0099
0100 pr_debug("clock: fint is %lu\n", fint);
0101
0102 if (fint >= 750000 && fint <= 1000000)
0103 f = 0x3;
0104 else if (fint > 1000000 && fint <= 1250000)
0105 f = 0x4;
0106 else if (fint > 1250000 && fint <= 1500000)
0107 f = 0x5;
0108 else if (fint > 1500000 && fint <= 1750000)
0109 f = 0x6;
0110 else if (fint > 1750000 && fint <= 2100000)
0111 f = 0x7;
0112 else if (fint > 7500000 && fint <= 10000000)
0113 f = 0xB;
0114 else if (fint > 10000000 && fint <= 12500000)
0115 f = 0xC;
0116 else if (fint > 12500000 && fint <= 15000000)
0117 f = 0xD;
0118 else if (fint > 15000000 && fint <= 17500000)
0119 f = 0xE;
0120 else if (fint > 17500000 && fint <= 21000000)
0121 f = 0xF;
0122 else
0123 pr_debug("clock: unknown freqsel setting for %d\n", n);
0124
0125 return f;
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
0139 {
0140 const struct dpll_data *dd;
0141 u8 ai;
0142 u8 state = 1;
0143 int r = 0;
0144
0145 pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
0146
0147 dd = clk->dpll_data;
0148 state <<= __ffs(dd->idlest_mask);
0149
0150
0151 if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) ==
0152 state)
0153 goto done;
0154
0155 ai = omap3_dpll_autoidle_read(clk);
0156
0157 if (ai)
0158 omap3_dpll_deny_idle(clk);
0159
0160 _omap3_dpll_write_clken(clk, DPLL_LOCKED);
0161
0162 r = _omap3_wait_dpll_status(clk, 1);
0163
0164 if (ai)
0165 omap3_dpll_allow_idle(clk);
0166
0167 done:
0168 return r;
0169 }
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
0185 {
0186 int r;
0187 u8 ai;
0188
0189 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
0190 return -EINVAL;
0191
0192 pr_debug("clock: configuring DPLL %s for low-power bypass\n",
0193 clk_hw_get_name(&clk->hw));
0194
0195 ai = omap3_dpll_autoidle_read(clk);
0196
0197 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
0198
0199 r = _omap3_wait_dpll_status(clk, 0);
0200
0201 if (ai)
0202 omap3_dpll_allow_idle(clk);
0203
0204 return r;
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
0217 {
0218 u8 ai;
0219
0220 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
0221 return -EINVAL;
0222
0223 pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
0224
0225 ai = omap3_dpll_autoidle_read(clk);
0226
0227 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
0228
0229 if (ai)
0230 omap3_dpll_allow_idle(clk);
0231
0232 return 0;
0233 }
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
0248 {
0249 unsigned long fint, clkinp;
0250
0251 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
0252 fint = (clkinp / n) * m;
0253
0254 if (fint < 1000000000)
0255 *dco = 2;
0256 else
0257 *dco = 4;
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
0273 {
0274 unsigned long clkinp, sd;
0275 int mod1, mod2;
0276
0277 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
0278
0279
0280
0281
0282
0283 clkinp /= 100000;
0284 mod1 = (clkinp * m) % (250 * n);
0285 sd = (clkinp * m) / (250 * n);
0286 mod2 = sd % 10;
0287 sd /= 10;
0288
0289 if (mod1 || mod2)
0290 sd++;
0291 *sd_div = sd;
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301 static void omap3_noncore_dpll_ssc_program(struct clk_hw_omap *clk)
0302 {
0303 struct dpll_data *dd = clk->dpll_data;
0304 unsigned long ref_rate;
0305 u32 v, ctrl, mod_freq_divider, exponent, mantissa;
0306 u32 deltam_step, deltam_ceil;
0307
0308 ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
0309
0310 if (dd->ssc_modfreq && dd->ssc_deltam) {
0311 ctrl |= dd->ssc_enable_mask;
0312
0313 if (dd->ssc_downspread)
0314 ctrl |= dd->ssc_downspread_mask;
0315 else
0316 ctrl &= ~dd->ssc_downspread_mask;
0317
0318 ref_rate = clk_hw_get_rate(dd->clk_ref);
0319 mod_freq_divider =
0320 (ref_rate / dd->last_rounded_n) / (4 * dd->ssc_modfreq);
0321 if (dd->ssc_modfreq > (ref_rate / 70))
0322 pr_warn("clock: SSC modulation frequency of DPLL %s greater than %ld\n",
0323 __clk_get_name(clk->hw.clk), ref_rate / 70);
0324
0325 exponent = 0;
0326 mantissa = mod_freq_divider;
0327 while ((mantissa > 127) && (exponent < 7)) {
0328 exponent++;
0329 mantissa /= 2;
0330 }
0331 if (mantissa > 127)
0332 mantissa = 127;
0333
0334 v = ti_clk_ll_ops->clk_readl(&dd->ssc_modfreq_reg);
0335 v &= ~(dd->ssc_modfreq_mant_mask | dd->ssc_modfreq_exp_mask);
0336 v |= mantissa << __ffs(dd->ssc_modfreq_mant_mask);
0337 v |= exponent << __ffs(dd->ssc_modfreq_exp_mask);
0338 ti_clk_ll_ops->clk_writel(v, &dd->ssc_modfreq_reg);
0339
0340 deltam_step = dd->last_rounded_m * dd->ssc_deltam;
0341 deltam_step /= 10;
0342 if (dd->ssc_downspread)
0343 deltam_step /= 2;
0344
0345 deltam_step <<= __ffs(dd->ssc_deltam_int_mask);
0346 deltam_step /= 100;
0347 deltam_step /= mod_freq_divider;
0348 if (deltam_step > 0xFFFFF)
0349 deltam_step = 0xFFFFF;
0350
0351 deltam_ceil = (deltam_step & dd->ssc_deltam_int_mask) >>
0352 __ffs(dd->ssc_deltam_int_mask);
0353 if (deltam_step & dd->ssc_deltam_frac_mask)
0354 deltam_ceil++;
0355
0356 if ((dd->ssc_downspread &&
0357 ((dd->last_rounded_m - (2 * deltam_ceil)) < 20 ||
0358 dd->last_rounded_m > 2045)) ||
0359 ((dd->last_rounded_m - deltam_ceil) < 20 ||
0360 (dd->last_rounded_m + deltam_ceil) > 2045))
0361 pr_warn("clock: SSC multiplier of DPLL %s is out of range\n",
0362 __clk_get_name(clk->hw.clk));
0363
0364 v = ti_clk_ll_ops->clk_readl(&dd->ssc_deltam_reg);
0365 v &= ~(dd->ssc_deltam_int_mask | dd->ssc_deltam_frac_mask);
0366 v |= deltam_step << __ffs(dd->ssc_deltam_int_mask |
0367 dd->ssc_deltam_frac_mask);
0368 ti_clk_ll_ops->clk_writel(v, &dd->ssc_deltam_reg);
0369 } else {
0370 ctrl &= ~dd->ssc_enable_mask;
0371 }
0372
0373 ti_clk_ll_ops->clk_writel(ctrl, &dd->control_reg);
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384 static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
0385 {
0386 struct dpll_data *dd = clk->dpll_data;
0387 u8 dco, sd_div, ai = 0;
0388 u32 v;
0389 bool errata_i810;
0390
0391
0392 _omap3_noncore_dpll_bypass(clk);
0393
0394
0395
0396
0397
0398 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
0399 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
0400 v &= ~dd->freqsel_mask;
0401 v |= freqsel << __ffs(dd->freqsel_mask);
0402 ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
0403 }
0404
0405
0406 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
0407
0408
0409 if (dd->dcc_mask) {
0410 if (dd->last_rounded_rate >= dd->dcc_rate)
0411 v |= dd->dcc_mask;
0412 else
0413 v &= ~dd->dcc_mask;
0414 }
0415
0416 v &= ~(dd->mult_mask | dd->div1_mask);
0417 v |= dd->last_rounded_m << __ffs(dd->mult_mask);
0418 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
0419
0420
0421 if (dd->dco_mask) {
0422 _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
0423 v &= ~(dd->dco_mask);
0424 v |= dco << __ffs(dd->dco_mask);
0425 }
0426 if (dd->sddiv_mask) {
0427 _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
0428 dd->last_rounded_n);
0429 v &= ~(dd->sddiv_mask);
0430 v |= sd_div << __ffs(dd->sddiv_mask);
0431 }
0432
0433
0434
0435
0436
0437
0438
0439
0440 errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;
0441
0442 if (errata_i810) {
0443 ai = omap3_dpll_autoidle_read(clk);
0444 if (ai) {
0445 omap3_dpll_deny_idle(clk);
0446
0447
0448 omap3_dpll_autoidle_read(clk);
0449 }
0450 }
0451
0452 ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
0453
0454
0455 if (dd->m4xen_mask || dd->lpmode_mask) {
0456 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
0457
0458 if (dd->m4xen_mask) {
0459 if (dd->last_rounded_m4xen)
0460 v |= dd->m4xen_mask;
0461 else
0462 v &= ~dd->m4xen_mask;
0463 }
0464
0465 if (dd->lpmode_mask) {
0466 if (dd->last_rounded_lpmode)
0467 v |= dd->lpmode_mask;
0468 else
0469 v &= ~dd->lpmode_mask;
0470 }
0471
0472 ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
0473 }
0474
0475 if (dd->ssc_enable_mask)
0476 omap3_noncore_dpll_ssc_program(clk);
0477
0478
0479
0480
0481
0482 _omap3_noncore_dpll_lock(clk);
0483
0484 if (errata_i810 && ai)
0485 omap3_dpll_allow_idle(clk);
0486
0487 return 0;
0488 }
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
0500 {
0501 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0502
0503 return omap2_get_dpll_rate(clk);
0504 }
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522 int omap3_noncore_dpll_enable(struct clk_hw *hw)
0523 {
0524 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0525 int r;
0526 struct dpll_data *dd;
0527 struct clk_hw *parent;
0528
0529 dd = clk->dpll_data;
0530 if (!dd)
0531 return -EINVAL;
0532
0533 if (clk->clkdm) {
0534 r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
0535 if (r) {
0536 WARN(1,
0537 "%s: could not enable %s's clockdomain %s: %d\n",
0538 __func__, clk_hw_get_name(hw),
0539 clk->clkdm_name, r);
0540 return r;
0541 }
0542 }
0543
0544 parent = clk_hw_get_parent(hw);
0545
0546 if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
0547 WARN_ON(parent != dd->clk_bypass);
0548 r = _omap3_noncore_dpll_bypass(clk);
0549 } else {
0550 WARN_ON(parent != dd->clk_ref);
0551 r = _omap3_noncore_dpll_lock(clk);
0552 }
0553
0554 return r;
0555 }
0556
0557
0558
0559
0560
0561
0562
0563
0564 void omap3_noncore_dpll_disable(struct clk_hw *hw)
0565 {
0566 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0567
0568 _omap3_noncore_dpll_stop(clk);
0569 if (clk->clkdm)
0570 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
0571 }
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
0586 struct clk_rate_request *req)
0587 {
0588 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0589 struct dpll_data *dd;
0590
0591 if (!req->rate)
0592 return -EINVAL;
0593
0594 dd = clk->dpll_data;
0595 if (!dd)
0596 return -EINVAL;
0597
0598 if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
0599 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
0600 req->best_parent_hw = dd->clk_bypass;
0601 } else {
0602 req->rate = omap2_dpll_round_rate(hw, req->rate,
0603 &req->best_parent_rate);
0604 req->best_parent_hw = dd->clk_ref;
0605 }
0606
0607 req->best_parent_rate = req->rate;
0608
0609 return 0;
0610 }
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620 int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
0621 {
0622 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0623 int ret;
0624
0625 if (!hw)
0626 return -EINVAL;
0627
0628 if (index)
0629 ret = _omap3_noncore_dpll_bypass(clk);
0630 else
0631 ret = _omap3_noncore_dpll_lock(clk);
0632
0633 return ret;
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647 int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
0648 unsigned long parent_rate)
0649 {
0650 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0651 struct dpll_data *dd;
0652 u16 freqsel = 0;
0653 int ret;
0654
0655 if (!hw || !rate)
0656 return -EINVAL;
0657
0658 dd = clk->dpll_data;
0659 if (!dd)
0660 return -EINVAL;
0661
0662 if (clk_hw_get_parent(hw) != dd->clk_ref)
0663 return -EINVAL;
0664
0665 if (dd->last_rounded_rate == 0)
0666 return -EINVAL;
0667
0668
0669 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
0670 freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
0671 WARN_ON(!freqsel);
0672 }
0673
0674 pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
0675 clk_hw_get_name(hw), rate);
0676
0677 ret = omap3_noncore_dpll_program(clk, freqsel);
0678
0679 return ret;
0680 }
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695 int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
0696 unsigned long rate,
0697 unsigned long parent_rate,
0698 u8 index)
0699 {
0700 int ret;
0701
0702 if (!hw || !rate)
0703 return -EINVAL;
0704
0705
0706
0707
0708
0709
0710 if (index)
0711 ret = omap3_noncore_dpll_set_parent(hw, index);
0712 else
0713 ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
0714
0715 return ret;
0716 }
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
0729 {
0730 const struct dpll_data *dd;
0731 u32 v;
0732
0733 if (!clk || !clk->dpll_data)
0734 return -EINVAL;
0735
0736 dd = clk->dpll_data;
0737
0738 if (!dd->autoidle_mask)
0739 return -EINVAL;
0740
0741 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
0742 v &= dd->autoidle_mask;
0743 v >>= __ffs(dd->autoidle_mask);
0744
0745 return v;
0746 }
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757 static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
0758 {
0759 const struct dpll_data *dd;
0760 u32 v;
0761
0762 if (!clk || !clk->dpll_data)
0763 return;
0764
0765 dd = clk->dpll_data;
0766
0767 if (!dd->autoidle_mask)
0768 return;
0769
0770
0771
0772
0773
0774
0775 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
0776 v &= ~dd->autoidle_mask;
0777 v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
0778 ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
0779 }
0780
0781
0782
0783
0784
0785
0786
0787 static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
0788 {
0789 const struct dpll_data *dd;
0790 u32 v;
0791
0792 if (!clk || !clk->dpll_data)
0793 return;
0794
0795 dd = clk->dpll_data;
0796
0797 if (!dd->autoidle_mask)
0798 return;
0799
0800 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
0801 v &= ~dd->autoidle_mask;
0802 v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
0803 ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
0804 }
0805
0806
0807
0808
0809 static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
0810 {
0811 struct clk_hw_omap *pclk = NULL;
0812
0813
0814 do {
0815 do {
0816 hw = clk_hw_get_parent(hw);
0817 } while (hw && (!omap2_clk_is_hw_omap(hw)));
0818 if (!hw)
0819 break;
0820 pclk = to_clk_hw_omap(hw);
0821 } while (pclk && !pclk->dpll_data);
0822
0823
0824 if (!pclk) {
0825 WARN_ON(1);
0826 return NULL;
0827 }
0828
0829 return pclk;
0830 }
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840 unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
0841 unsigned long parent_rate)
0842 {
0843 const struct dpll_data *dd;
0844 unsigned long rate;
0845 u32 v;
0846 struct clk_hw_omap *pclk = NULL;
0847
0848 if (!parent_rate)
0849 return 0;
0850
0851 pclk = omap3_find_clkoutx2_dpll(hw);
0852
0853 if (!pclk)
0854 return 0;
0855
0856 dd = pclk->dpll_data;
0857
0858 WARN_ON(!dd->enable_mask);
0859
0860 v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask;
0861 v >>= __ffs(dd->enable_mask);
0862 if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
0863 rate = parent_rate;
0864 else
0865 rate = parent_rate * 2;
0866 return rate;
0867 }
0868
0869
0870
0871
0872
0873
0874
0875
0876 int omap3_core_dpll_save_context(struct clk_hw *hw)
0877 {
0878 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0879 struct dpll_data *dd;
0880 u32 v;
0881
0882 dd = clk->dpll_data;
0883
0884 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
0885 clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
0886
0887 if (clk->context == DPLL_LOCKED) {
0888 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
0889 dd->last_rounded_m = (v & dd->mult_mask) >>
0890 __ffs(dd->mult_mask);
0891 dd->last_rounded_n = ((v & dd->div1_mask) >>
0892 __ffs(dd->div1_mask)) + 1;
0893 }
0894
0895 return 0;
0896 }
0897
0898
0899
0900
0901
0902
0903
0904
0905 void omap3_core_dpll_restore_context(struct clk_hw *hw)
0906 {
0907 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0908 const struct dpll_data *dd;
0909 u32 v;
0910
0911 dd = clk->dpll_data;
0912
0913 if (clk->context == DPLL_LOCKED) {
0914 _omap3_dpll_write_clken(clk, 0x4);
0915 _omap3_wait_dpll_status(clk, 0);
0916
0917 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
0918 v &= ~(dd->mult_mask | dd->div1_mask);
0919 v |= dd->last_rounded_m << __ffs(dd->mult_mask);
0920 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
0921 ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
0922
0923 _omap3_dpll_write_clken(clk, DPLL_LOCKED);
0924 _omap3_wait_dpll_status(clk, 1);
0925 } else {
0926 _omap3_dpll_write_clken(clk, clk->context);
0927 }
0928 }
0929
0930
0931
0932
0933
0934
0935
0936
0937 int omap3_noncore_dpll_save_context(struct clk_hw *hw)
0938 {
0939 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0940 struct dpll_data *dd;
0941 u32 v;
0942
0943 dd = clk->dpll_data;
0944
0945 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
0946 clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
0947
0948 if (clk->context == DPLL_LOCKED) {
0949 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
0950 dd->last_rounded_m = (v & dd->mult_mask) >>
0951 __ffs(dd->mult_mask);
0952 dd->last_rounded_n = ((v & dd->div1_mask) >>
0953 __ffs(dd->div1_mask)) + 1;
0954 }
0955
0956 return 0;
0957 }
0958
0959
0960
0961
0962
0963
0964
0965
0966 void omap3_noncore_dpll_restore_context(struct clk_hw *hw)
0967 {
0968 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
0969 const struct dpll_data *dd;
0970 u32 ctrl, mult_div1;
0971
0972 dd = clk->dpll_data;
0973
0974 ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
0975 mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
0976
0977 if (clk->context == ((ctrl & dd->enable_mask) >>
0978 __ffs(dd->enable_mask)) &&
0979 dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
0980 __ffs(dd->mult_mask)) &&
0981 dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
0982 __ffs(dd->div1_mask)) + 1) {
0983
0984 return;
0985 }
0986
0987 if (clk->context == DPLL_LOCKED)
0988 omap3_noncore_dpll_program(clk, 0);
0989 else
0990 _omap3_dpll_write_clken(clk, clk->context);
0991 }
0992
0993
0994 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
0995 .allow_idle = omap3_dpll_allow_idle,
0996 .deny_idle = omap3_dpll_deny_idle,
0997 };
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
1011 unsigned long parent_rate)
1012 {
1013
1014
1015
1016
1017
1018 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
1019 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
1020 return -EINVAL;
1021 }
1022
1023 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1039 unsigned long parent_rate, u8 index)
1040 {
1041 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
1042 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
1043 return -EINVAL;
1044 }
1045
1046 return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
1047 index);
1048 }
1049
1050
1051 static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
1052 unsigned long parent_rate)
1053 {
1054 struct omap3_dpll5_settings {
1055 unsigned int rate, m, n;
1056 };
1057
1058 static const struct omap3_dpll5_settings precomputed[] = {
1059
1060
1061
1062
1063
1064
1065 { 12000000, 80, 0 + 1 },
1066 { 13000000, 443, 5 + 1 },
1067 { 19200000, 50, 0 + 1 },
1068 { 26000000, 443, 11 + 1 },
1069 { 38400000, 25, 0 + 1 }
1070 };
1071
1072 const struct omap3_dpll5_settings *d;
1073 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
1074 struct dpll_data *dd;
1075 unsigned int i;
1076
1077 for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
1078 if (parent_rate == precomputed[i].rate)
1079 break;
1080 }
1081
1082 if (i == ARRAY_SIZE(precomputed))
1083 return false;
1084
1085 d = &precomputed[i];
1086
1087
1088 dd = clk->dpll_data;
1089 dd->last_rounded_m = d->m;
1090 dd->last_rounded_n = d->n;
1091 dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
1092 omap3_noncore_dpll_program(clk, 0);
1093
1094 return true;
1095 }
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
1107 unsigned long parent_rate)
1108 {
1109 if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
1110 if (omap3_dpll5_apply_errata(hw, parent_rate))
1111 return 0;
1112 }
1113
1114 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
1115 }