0001
0002
0003
0004
0005
0006 #include <linux/clk-provider.h>
0007 #include <linux/delay.h>
0008 #include <linux/device.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/io.h>
0011 #include <linux/iopoll.h>
0012 #include <asm/mach-pic32/pic32.h>
0013 #include <asm/traps.h>
0014
0015 #include "clk-core.h"
0016
0017
0018 #define OSC_CUR_MASK 0x07
0019 #define OSC_CUR_SHIFT 12
0020 #define OSC_NEW_MASK 0x07
0021 #define OSC_NEW_SHIFT 8
0022 #define OSC_SWEN BIT(0)
0023
0024
0025 #define PLL_RANGE_MASK 0x07
0026 #define PLL_RANGE_SHIFT 0
0027 #define PLL_ICLK_MASK 0x01
0028 #define PLL_ICLK_SHIFT 7
0029 #define PLL_IDIV_MASK 0x07
0030 #define PLL_IDIV_SHIFT 8
0031 #define PLL_ODIV_MASK 0x07
0032 #define PLL_ODIV_SHIFT 24
0033 #define PLL_MULT_MASK 0x7F
0034 #define PLL_MULT_SHIFT 16
0035 #define PLL_MULT_MAX 128
0036 #define PLL_ODIV_MIN 1
0037 #define PLL_ODIV_MAX 5
0038
0039
0040 #define PB_DIV_MASK 0x7f
0041 #define PB_DIV_SHIFT 0
0042 #define PB_DIV_READY BIT(11)
0043 #define PB_DIV_ENABLE BIT(15)
0044 #define PB_DIV_MAX 128
0045 #define PB_DIV_MIN 0
0046
0047
0048 #define REFO_SEL_MASK 0x0f
0049 #define REFO_SEL_SHIFT 0
0050 #define REFO_ACTIVE BIT(8)
0051 #define REFO_DIVSW_EN BIT(9)
0052 #define REFO_OE BIT(12)
0053 #define REFO_ON BIT(15)
0054 #define REFO_DIV_SHIFT 16
0055 #define REFO_DIV_MASK 0x7fff
0056
0057
0058 #define REFO_TRIM_REG 0x10
0059 #define REFO_TRIM_MASK 0x1ff
0060 #define REFO_TRIM_SHIFT 23
0061 #define REFO_TRIM_MAX 511
0062
0063
0064 #define SLEW_BUSY BIT(0)
0065 #define SLEW_DOWNEN BIT(1)
0066 #define SLEW_UPEN BIT(2)
0067 #define SLEW_DIV 0x07
0068 #define SLEW_DIV_SHIFT 8
0069 #define SLEW_SYSDIV 0x0f
0070 #define SLEW_SYSDIV_SHIFT 20
0071
0072
0073 #define LOCK_TIMEOUT_US USEC_PER_MSEC
0074
0075
0076 static struct clk_hw *pic32_sclk_hw;
0077
0078
0079 #define cpu_nop5() \
0080 do { \
0081 __asm__ __volatile__("nop"); \
0082 __asm__ __volatile__("nop"); \
0083 __asm__ __volatile__("nop"); \
0084 __asm__ __volatile__("nop"); \
0085 __asm__ __volatile__("nop"); \
0086 } while (0)
0087
0088
0089 struct pic32_periph_clk {
0090 struct clk_hw hw;
0091 void __iomem *ctrl_reg;
0092 struct pic32_clk_common *core;
0093 };
0094
0095 #define clkhw_to_pbclk(_hw) container_of(_hw, struct pic32_periph_clk, hw)
0096
0097 static int pbclk_is_enabled(struct clk_hw *hw)
0098 {
0099 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
0100
0101 return readl(pb->ctrl_reg) & PB_DIV_ENABLE;
0102 }
0103
0104 static int pbclk_enable(struct clk_hw *hw)
0105 {
0106 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
0107
0108 writel(PB_DIV_ENABLE, PIC32_SET(pb->ctrl_reg));
0109 return 0;
0110 }
0111
0112 static void pbclk_disable(struct clk_hw *hw)
0113 {
0114 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
0115
0116 writel(PB_DIV_ENABLE, PIC32_CLR(pb->ctrl_reg));
0117 }
0118
0119 static unsigned long calc_best_divided_rate(unsigned long rate,
0120 unsigned long parent_rate,
0121 u32 divider_max,
0122 u32 divider_min)
0123 {
0124 unsigned long divided_rate, divided_rate_down, best_rate;
0125 unsigned long div, div_up;
0126
0127
0128
0129
0130
0131 div = parent_rate / rate;
0132 div = clamp_val(div, divider_min, divider_max);
0133 div_up = clamp_val(div + 1, divider_min, divider_max);
0134
0135 divided_rate = parent_rate / div;
0136 divided_rate_down = parent_rate / div_up;
0137 if (abs(rate - divided_rate_down) < abs(rate - divided_rate))
0138 best_rate = divided_rate_down;
0139 else
0140 best_rate = divided_rate;
0141
0142 return best_rate;
0143 }
0144
0145 static inline u32 pbclk_read_pbdiv(struct pic32_periph_clk *pb)
0146 {
0147 return ((readl(pb->ctrl_reg) >> PB_DIV_SHIFT) & PB_DIV_MASK) + 1;
0148 }
0149
0150 static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
0151 unsigned long parent_rate)
0152 {
0153 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
0154
0155 return parent_rate / pbclk_read_pbdiv(pb);
0156 }
0157
0158 static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
0159 unsigned long *parent_rate)
0160 {
0161 return calc_best_divided_rate(rate, *parent_rate,
0162 PB_DIV_MAX, PB_DIV_MIN);
0163 }
0164
0165 static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
0166 unsigned long parent_rate)
0167 {
0168 struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
0169 unsigned long flags;
0170 u32 v, div;
0171 int err;
0172
0173
0174 err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
0175 1, LOCK_TIMEOUT_US);
0176 if (err)
0177 return err;
0178
0179
0180 div = DIV_ROUND_CLOSEST(parent_rate, rate);
0181
0182 spin_lock_irqsave(&pb->core->reg_lock, flags);
0183
0184
0185 v = readl(pb->ctrl_reg);
0186 v &= ~PB_DIV_MASK;
0187 v |= (div - 1);
0188
0189 pic32_syskey_unlock();
0190
0191 writel(v, pb->ctrl_reg);
0192
0193 spin_unlock_irqrestore(&pb->core->reg_lock, flags);
0194
0195
0196 err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
0197 1, LOCK_TIMEOUT_US);
0198 if (err)
0199 return err;
0200
0201
0202 return (pbclk_read_pbdiv(pb) == div) ? 0 : -EBUSY;
0203 }
0204
0205 const struct clk_ops pic32_pbclk_ops = {
0206 .enable = pbclk_enable,
0207 .disable = pbclk_disable,
0208 .is_enabled = pbclk_is_enabled,
0209 .recalc_rate = pbclk_recalc_rate,
0210 .round_rate = pbclk_round_rate,
0211 .set_rate = pbclk_set_rate,
0212 };
0213
0214 struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *desc,
0215 struct pic32_clk_common *core)
0216 {
0217 struct pic32_periph_clk *pbclk;
0218 struct clk *clk;
0219
0220 pbclk = devm_kzalloc(core->dev, sizeof(*pbclk), GFP_KERNEL);
0221 if (!pbclk)
0222 return ERR_PTR(-ENOMEM);
0223
0224 pbclk->hw.init = &desc->init_data;
0225 pbclk->core = core;
0226 pbclk->ctrl_reg = desc->ctrl_reg + core->iobase;
0227
0228 clk = devm_clk_register(core->dev, &pbclk->hw);
0229 if (IS_ERR(clk)) {
0230 dev_err(core->dev, "%s: clk_register() failed\n", __func__);
0231 devm_kfree(core->dev, pbclk);
0232 }
0233
0234 return clk;
0235 }
0236
0237
0238 struct pic32_ref_osc {
0239 struct clk_hw hw;
0240 void __iomem *ctrl_reg;
0241 const u32 *parent_map;
0242 struct pic32_clk_common *core;
0243 };
0244
0245 #define clkhw_to_refosc(_hw) container_of(_hw, struct pic32_ref_osc, hw)
0246
0247 static int roclk_is_enabled(struct clk_hw *hw)
0248 {
0249 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
0250
0251 return readl(refo->ctrl_reg) & REFO_ON;
0252 }
0253
0254 static int roclk_enable(struct clk_hw *hw)
0255 {
0256 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
0257
0258 writel(REFO_ON | REFO_OE, PIC32_SET(refo->ctrl_reg));
0259 return 0;
0260 }
0261
0262 static void roclk_disable(struct clk_hw *hw)
0263 {
0264 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
0265
0266 writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
0267 }
0268
0269 static int roclk_init(struct clk_hw *hw)
0270 {
0271
0272 roclk_disable(hw);
0273
0274 return 0;
0275 }
0276
0277 static u8 roclk_get_parent(struct clk_hw *hw)
0278 {
0279 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
0280 u32 v, i;
0281
0282 v = (readl(refo->ctrl_reg) >> REFO_SEL_SHIFT) & REFO_SEL_MASK;
0283
0284 if (!refo->parent_map)
0285 return v;
0286
0287 for (i = 0; i < clk_hw_get_num_parents(hw); i++)
0288 if (refo->parent_map[i] == v)
0289 return i;
0290
0291 return -EINVAL;
0292 }
0293
0294 static unsigned long roclk_calc_rate(unsigned long parent_rate,
0295 u32 rodiv, u32 rotrim)
0296 {
0297 u64 rate64;
0298
0299
0300
0301
0302
0303
0304 if (rotrim) {
0305 rodiv = (rodiv << 9) + rotrim;
0306 rate64 = parent_rate;
0307 rate64 <<= 8;
0308 do_div(rate64, rodiv);
0309 } else if (rodiv) {
0310 rate64 = parent_rate / (rodiv << 1);
0311 } else {
0312 rate64 = parent_rate;
0313 }
0314 return rate64;
0315 }
0316
0317 static void roclk_calc_div_trim(unsigned long rate,
0318 unsigned long parent_rate,
0319 u32 *rodiv_p, u32 *rotrim_p)
0320 {
0321 u32 div, rotrim, rodiv;
0322 u64 frac;
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335 if (parent_rate <= rate) {
0336 div = 0;
0337 frac = 0;
0338 rodiv = 0;
0339 rotrim = 0;
0340 } else {
0341 div = parent_rate / (rate << 1);
0342 frac = parent_rate;
0343 frac <<= 8;
0344 do_div(frac, rate);
0345 frac -= (u64)(div << 9);
0346
0347 rodiv = (div > REFO_DIV_MASK) ? REFO_DIV_MASK : div;
0348 rotrim = (frac >= REFO_TRIM_MAX) ? REFO_TRIM_MAX : frac;
0349 }
0350
0351 if (rodiv_p)
0352 *rodiv_p = rodiv;
0353
0354 if (rotrim_p)
0355 *rotrim_p = rotrim;
0356 }
0357
0358 static unsigned long roclk_recalc_rate(struct clk_hw *hw,
0359 unsigned long parent_rate)
0360 {
0361 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
0362 u32 v, rodiv, rotrim;
0363
0364
0365 v = readl(refo->ctrl_reg);
0366 rodiv = (v >> REFO_DIV_SHIFT) & REFO_DIV_MASK;
0367
0368
0369 v = readl(refo->ctrl_reg + REFO_TRIM_REG);
0370 rotrim = (v >> REFO_TRIM_SHIFT) & REFO_TRIM_MASK;
0371
0372 return roclk_calc_rate(parent_rate, rodiv, rotrim);
0373 }
0374
0375 static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
0376 unsigned long *parent_rate)
0377 {
0378 u32 rotrim, rodiv;
0379
0380
0381 roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
0382
0383
0384 return roclk_calc_rate(*parent_rate, rodiv, rotrim);
0385 }
0386
0387 static int roclk_determine_rate(struct clk_hw *hw,
0388 struct clk_rate_request *req)
0389 {
0390 struct clk_hw *parent_clk, *best_parent_clk = NULL;
0391 unsigned int i, delta, best_delta = -1;
0392 unsigned long parent_rate, best_parent_rate = 0;
0393 unsigned long best = 0, nearest_rate;
0394
0395
0396 for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
0397
0398 parent_clk = clk_hw_get_parent_by_index(hw, i);
0399 if (!parent_clk)
0400 continue;
0401
0402
0403 parent_rate = clk_hw_get_rate(parent_clk);
0404 if (req->rate > parent_rate)
0405 continue;
0406
0407 nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
0408 delta = abs(nearest_rate - req->rate);
0409 if ((nearest_rate >= req->rate) && (delta < best_delta)) {
0410 best_parent_clk = parent_clk;
0411 best_parent_rate = parent_rate;
0412 best = nearest_rate;
0413 best_delta = delta;
0414
0415 if (delta == 0)
0416 break;
0417 }
0418 }
0419
0420
0421 if (!best_parent_clk) {
0422 pr_err("%s:%s, no parent found for rate %lu.\n",
0423 __func__, clk_hw_get_name(hw), req->rate);
0424 return clk_hw_get_rate(hw);
0425 }
0426
0427 pr_debug("%s,rate %lu, best_parent(%s, %lu), best %lu, delta %d\n",
0428 clk_hw_get_name(hw), req->rate,
0429 clk_hw_get_name(best_parent_clk), best_parent_rate,
0430 best, best_delta);
0431
0432 if (req->best_parent_rate)
0433 req->best_parent_rate = best_parent_rate;
0434
0435 if (req->best_parent_hw)
0436 req->best_parent_hw = best_parent_clk;
0437
0438 return best;
0439 }
0440
0441 static int roclk_set_parent(struct clk_hw *hw, u8 index)
0442 {
0443 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
0444 unsigned long flags;
0445 u32 v;
0446 int err;
0447
0448 if (refo->parent_map)
0449 index = refo->parent_map[index];
0450
0451
0452 err = readl_poll_timeout(refo->ctrl_reg, v, !(v & REFO_ACTIVE),
0453 1, LOCK_TIMEOUT_US);
0454 if (err) {
0455 pr_err("%s: poll failed, clk active\n", clk_hw_get_name(hw));
0456 return err;
0457 }
0458
0459 spin_lock_irqsave(&refo->core->reg_lock, flags);
0460
0461 pic32_syskey_unlock();
0462
0463
0464 v = readl(refo->ctrl_reg);
0465 v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
0466 v |= index << REFO_SEL_SHIFT;
0467
0468 writel(v, refo->ctrl_reg);
0469
0470 spin_unlock_irqrestore(&refo->core->reg_lock, flags);
0471
0472 return 0;
0473 }
0474
0475 static int roclk_set_rate_and_parent(struct clk_hw *hw,
0476 unsigned long rate,
0477 unsigned long parent_rate,
0478 u8 index)
0479 {
0480 struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
0481 unsigned long flags;
0482 u32 trim, rodiv, v;
0483 int err;
0484
0485
0486 roclk_calc_div_trim(rate, parent_rate, &rodiv, &trim);
0487
0488 pr_debug("parent_rate = %lu, rate = %lu, div = %d, trim = %d\n",
0489 parent_rate, rate, rodiv, trim);
0490
0491
0492 err = readl_poll_timeout(refo->ctrl_reg, v,
0493 !(v & (REFO_ACTIVE | REFO_DIVSW_EN)),
0494 1, LOCK_TIMEOUT_US);
0495 if (err) {
0496 pr_err("%s: poll timedout, clock is still active\n", __func__);
0497 return err;
0498 }
0499
0500 spin_lock_irqsave(&refo->core->reg_lock, flags);
0501 v = readl(refo->ctrl_reg);
0502
0503 pic32_syskey_unlock();
0504
0505
0506 if (refo->parent_map)
0507 index = refo->parent_map[index];
0508
0509 v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
0510 v |= index << REFO_SEL_SHIFT;
0511
0512
0513 v &= ~(REFO_DIV_MASK << REFO_DIV_SHIFT);
0514 v |= rodiv << REFO_DIV_SHIFT;
0515 writel(v, refo->ctrl_reg);
0516
0517
0518 v = readl(refo->ctrl_reg + REFO_TRIM_REG);
0519 v &= ~(REFO_TRIM_MASK << REFO_TRIM_SHIFT);
0520 v |= trim << REFO_TRIM_SHIFT;
0521 writel(v, refo->ctrl_reg + REFO_TRIM_REG);
0522
0523
0524 writel(REFO_ON | REFO_DIVSW_EN, PIC32_SET(refo->ctrl_reg));
0525
0526
0527 err = readl_poll_timeout_atomic(refo->ctrl_reg, v, !(v & REFO_DIVSW_EN),
0528 1, LOCK_TIMEOUT_US);
0529
0530 writel(REFO_ON, PIC32_CLR(refo->ctrl_reg));
0531
0532 spin_unlock_irqrestore(&refo->core->reg_lock, flags);
0533
0534 return err;
0535 }
0536
0537 static int roclk_set_rate(struct clk_hw *hw, unsigned long rate,
0538 unsigned long parent_rate)
0539 {
0540 u8 index = roclk_get_parent(hw);
0541
0542 return roclk_set_rate_and_parent(hw, rate, parent_rate, index);
0543 }
0544
0545 const struct clk_ops pic32_roclk_ops = {
0546 .enable = roclk_enable,
0547 .disable = roclk_disable,
0548 .is_enabled = roclk_is_enabled,
0549 .get_parent = roclk_get_parent,
0550 .set_parent = roclk_set_parent,
0551 .determine_rate = roclk_determine_rate,
0552 .recalc_rate = roclk_recalc_rate,
0553 .set_rate_and_parent = roclk_set_rate_and_parent,
0554 .set_rate = roclk_set_rate,
0555 .init = roclk_init,
0556 };
0557
0558 struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
0559 struct pic32_clk_common *core)
0560 {
0561 struct pic32_ref_osc *refo;
0562 struct clk *clk;
0563
0564 refo = devm_kzalloc(core->dev, sizeof(*refo), GFP_KERNEL);
0565 if (!refo)
0566 return ERR_PTR(-ENOMEM);
0567
0568 refo->core = core;
0569 refo->hw.init = &data->init_data;
0570 refo->ctrl_reg = data->ctrl_reg + core->iobase;
0571 refo->parent_map = data->parent_map;
0572
0573 clk = devm_clk_register(core->dev, &refo->hw);
0574 if (IS_ERR(clk))
0575 dev_err(core->dev, "%s: clk_register() failed\n", __func__);
0576
0577 return clk;
0578 }
0579
0580 struct pic32_sys_pll {
0581 struct clk_hw hw;
0582 void __iomem *ctrl_reg;
0583 void __iomem *status_reg;
0584 u32 lock_mask;
0585 u32 idiv;
0586 struct pic32_clk_common *core;
0587 };
0588
0589 #define clkhw_to_spll(_hw) container_of(_hw, struct pic32_sys_pll, hw)
0590
0591 static inline u32 spll_odiv_to_divider(u32 odiv)
0592 {
0593 odiv = clamp_val(odiv, PLL_ODIV_MIN, PLL_ODIV_MAX);
0594
0595 return 1 << odiv;
0596 }
0597
0598 static unsigned long spll_calc_mult_div(struct pic32_sys_pll *pll,
0599 unsigned long rate,
0600 unsigned long parent_rate,
0601 u32 *mult_p, u32 *odiv_p)
0602 {
0603 u32 mul, div, best_mul = 1, best_div = 1;
0604 unsigned long new_rate, best_rate = rate;
0605 unsigned int best_delta = -1, delta, match_found = 0;
0606 u64 rate64;
0607
0608 parent_rate /= pll->idiv;
0609
0610 for (mul = 1; mul <= PLL_MULT_MAX; mul++) {
0611 for (div = PLL_ODIV_MIN; div <= PLL_ODIV_MAX; div++) {
0612 rate64 = parent_rate;
0613 rate64 *= mul;
0614 do_div(rate64, 1 << div);
0615 new_rate = rate64;
0616 delta = abs(rate - new_rate);
0617 if ((new_rate >= rate) && (delta < best_delta)) {
0618 best_delta = delta;
0619 best_rate = new_rate;
0620 best_mul = mul;
0621 best_div = div;
0622 match_found = 1;
0623 }
0624 }
0625 }
0626
0627 if (!match_found) {
0628 pr_warn("spll: no match found\n");
0629 return 0;
0630 }
0631
0632 pr_debug("rate %lu, par_rate %lu/mult %u, div %u, best_rate %lu\n",
0633 rate, parent_rate, best_mul, best_div, best_rate);
0634
0635 if (mult_p)
0636 *mult_p = best_mul - 1;
0637
0638 if (odiv_p)
0639 *odiv_p = best_div;
0640
0641 return best_rate;
0642 }
0643
0644 static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
0645 unsigned long parent_rate)
0646 {
0647 struct pic32_sys_pll *pll = clkhw_to_spll(hw);
0648 unsigned long pll_in_rate;
0649 u32 mult, odiv, div, v;
0650 u64 rate64;
0651
0652 v = readl(pll->ctrl_reg);
0653 odiv = ((v >> PLL_ODIV_SHIFT) & PLL_ODIV_MASK);
0654 mult = ((v >> PLL_MULT_SHIFT) & PLL_MULT_MASK) + 1;
0655 div = spll_odiv_to_divider(odiv);
0656
0657
0658
0659
0660 pll_in_rate = parent_rate / pll->idiv;
0661 rate64 = pll_in_rate;
0662 rate64 *= mult;
0663 do_div(rate64, div);
0664
0665 return rate64;
0666 }
0667
0668 static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
0669 unsigned long *parent_rate)
0670 {
0671 struct pic32_sys_pll *pll = clkhw_to_spll(hw);
0672
0673 return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
0674 }
0675
0676 static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
0677 unsigned long parent_rate)
0678 {
0679 struct pic32_sys_pll *pll = clkhw_to_spll(hw);
0680 unsigned long ret, flags;
0681 u32 mult, odiv, v;
0682 int err;
0683
0684 ret = spll_calc_mult_div(pll, rate, parent_rate, &mult, &odiv);
0685 if (!ret)
0686 return -EINVAL;
0687
0688
0689
0690
0691
0692
0693
0694 if (unlikely(clk_hw_get_parent(pic32_sclk_hw) == hw)) {
0695 pr_err("%s: failed, clk in-use\n", __func__);
0696 return -EBUSY;
0697 }
0698
0699 spin_lock_irqsave(&pll->core->reg_lock, flags);
0700
0701
0702 v = readl(pll->ctrl_reg);
0703 v &= ~(PLL_MULT_MASK << PLL_MULT_SHIFT);
0704 v &= ~(PLL_ODIV_MASK << PLL_ODIV_SHIFT);
0705 v |= (mult << PLL_MULT_SHIFT) | (odiv << PLL_ODIV_SHIFT);
0706
0707
0708 pic32_syskey_unlock();
0709
0710 writel(v, pll->ctrl_reg);
0711 cpu_relax();
0712
0713
0714 cpu_nop5();
0715 cpu_nop5();
0716
0717
0718 err = readl_poll_timeout_atomic(pll->status_reg, v,
0719 v & pll->lock_mask, 1, 100);
0720 spin_unlock_irqrestore(&pll->core->reg_lock, flags);
0721
0722 return err;
0723 }
0724
0725
0726 const struct clk_ops pic32_spll_ops = {
0727 .recalc_rate = spll_clk_recalc_rate,
0728 .round_rate = spll_clk_round_rate,
0729 .set_rate = spll_clk_set_rate,
0730 };
0731
0732 struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
0733 struct pic32_clk_common *core)
0734 {
0735 struct pic32_sys_pll *spll;
0736 struct clk *clk;
0737
0738 spll = devm_kzalloc(core->dev, sizeof(*spll), GFP_KERNEL);
0739 if (!spll)
0740 return ERR_PTR(-ENOMEM);
0741
0742 spll->core = core;
0743 spll->hw.init = &data->init_data;
0744 spll->ctrl_reg = data->ctrl_reg + core->iobase;
0745 spll->status_reg = data->status_reg + core->iobase;
0746 spll->lock_mask = data->lock_mask;
0747
0748
0749 spll->idiv = (readl(spll->ctrl_reg) >> PLL_IDIV_SHIFT) & PLL_IDIV_MASK;
0750 spll->idiv += 1;
0751
0752 clk = devm_clk_register(core->dev, &spll->hw);
0753 if (IS_ERR(clk))
0754 dev_err(core->dev, "sys_pll: clk_register() failed\n");
0755
0756 return clk;
0757 }
0758
0759
0760
0761 struct pic32_sys_clk {
0762 struct clk_hw hw;
0763 void __iomem *mux_reg;
0764 void __iomem *slew_reg;
0765 u32 slew_div;
0766 const u32 *parent_map;
0767 struct pic32_clk_common *core;
0768 };
0769
0770 #define clkhw_to_sys_clk(_hw) container_of(_hw, struct pic32_sys_clk, hw)
0771
0772 static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
0773 {
0774 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
0775 u32 div;
0776
0777 div = (readl(sclk->slew_reg) >> SLEW_SYSDIV_SHIFT) & SLEW_SYSDIV;
0778 div += 1;
0779
0780 return parent_rate / div;
0781 }
0782
0783 static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
0784 unsigned long *parent_rate)
0785 {
0786 return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
0787 }
0788
0789 static int sclk_set_rate(struct clk_hw *hw,
0790 unsigned long rate, unsigned long parent_rate)
0791 {
0792 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
0793 unsigned long flags;
0794 u32 v, div;
0795 int err;
0796
0797 div = parent_rate / rate;
0798
0799 spin_lock_irqsave(&sclk->core->reg_lock, flags);
0800
0801
0802 v = readl(sclk->slew_reg);
0803 v &= ~(SLEW_SYSDIV << SLEW_SYSDIV_SHIFT);
0804 v |= (div - 1) << SLEW_SYSDIV_SHIFT;
0805
0806 pic32_syskey_unlock();
0807
0808 writel(v, sclk->slew_reg);
0809
0810
0811 err = readl_poll_timeout_atomic(sclk->slew_reg, v,
0812 !(v & SLEW_BUSY), 1, LOCK_TIMEOUT_US);
0813
0814 spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
0815
0816 return err;
0817 }
0818
0819 static u8 sclk_get_parent(struct clk_hw *hw)
0820 {
0821 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
0822 u32 i, v;
0823
0824 v = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
0825
0826 if (!sclk->parent_map)
0827 return v;
0828
0829 for (i = 0; i < clk_hw_get_num_parents(hw); i++)
0830 if (sclk->parent_map[i] == v)
0831 return i;
0832 return -EINVAL;
0833 }
0834
0835 static int sclk_set_parent(struct clk_hw *hw, u8 index)
0836 {
0837 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
0838 unsigned long flags;
0839 u32 nosc, cosc, v;
0840 int err;
0841
0842 spin_lock_irqsave(&sclk->core->reg_lock, flags);
0843
0844
0845 nosc = sclk->parent_map ? sclk->parent_map[index] : index;
0846
0847
0848 v = readl(sclk->mux_reg);
0849 v &= ~(OSC_NEW_MASK << OSC_NEW_SHIFT);
0850 v |= nosc << OSC_NEW_SHIFT;
0851
0852 pic32_syskey_unlock();
0853
0854 writel(v, sclk->mux_reg);
0855
0856
0857 writel(OSC_SWEN, PIC32_SET(sclk->mux_reg));
0858 cpu_relax();
0859
0860
0861 cpu_nop5();
0862
0863
0864 err = readl_poll_timeout_atomic(sclk->slew_reg, v,
0865 !(v & OSC_SWEN), 1, LOCK_TIMEOUT_US);
0866
0867 spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
0868
0869
0870
0871
0872
0873
0874
0875 cosc = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
0876 if (cosc != nosc) {
0877 pr_err("%s: err, failed to set_parent() to %d, current %d\n",
0878 clk_hw_get_name(hw), nosc, cosc);
0879 err = -EBUSY;
0880 }
0881
0882 return err;
0883 }
0884
0885 static int sclk_init(struct clk_hw *hw)
0886 {
0887 struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
0888 unsigned long flags;
0889 u32 v;
0890
0891
0892 pic32_sclk_hw = hw;
0893
0894
0895 if (sclk->slew_div) {
0896 spin_lock_irqsave(&sclk->core->reg_lock, flags);
0897 v = readl(sclk->slew_reg);
0898 v &= ~(SLEW_DIV << SLEW_DIV_SHIFT);
0899 v |= sclk->slew_div << SLEW_DIV_SHIFT;
0900 v |= SLEW_DOWNEN | SLEW_UPEN;
0901 writel(v, sclk->slew_reg);
0902 spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
0903 }
0904
0905 return 0;
0906 }
0907
0908
0909 const struct clk_ops pic32_sclk_ops = {
0910 .get_parent = sclk_get_parent,
0911 .set_parent = sclk_set_parent,
0912 .round_rate = sclk_round_rate,
0913 .set_rate = sclk_set_rate,
0914 .recalc_rate = sclk_get_rate,
0915 .init = sclk_init,
0916 .determine_rate = __clk_mux_determine_rate,
0917 };
0918
0919
0920 const struct clk_ops pic32_sclk_no_div_ops = {
0921 .get_parent = sclk_get_parent,
0922 .set_parent = sclk_set_parent,
0923 .init = sclk_init,
0924 .determine_rate = __clk_mux_determine_rate,
0925 };
0926
0927 struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
0928 struct pic32_clk_common *core)
0929 {
0930 struct pic32_sys_clk *sclk;
0931 struct clk *clk;
0932
0933 sclk = devm_kzalloc(core->dev, sizeof(*sclk), GFP_KERNEL);
0934 if (!sclk)
0935 return ERR_PTR(-ENOMEM);
0936
0937 sclk->core = core;
0938 sclk->hw.init = &data->init_data;
0939 sclk->mux_reg = data->mux_reg + core->iobase;
0940 sclk->slew_reg = data->slew_reg + core->iobase;
0941 sclk->slew_div = data->slew_div;
0942 sclk->parent_map = data->parent_map;
0943
0944 clk = devm_clk_register(core->dev, &sclk->hw);
0945 if (IS_ERR(clk))
0946 dev_err(core->dev, "%s: clk register failed\n", __func__);
0947
0948 return clk;
0949 }
0950
0951
0952 struct pic32_sec_osc {
0953 struct clk_hw hw;
0954 void __iomem *enable_reg;
0955 void __iomem *status_reg;
0956 u32 enable_mask;
0957 u32 status_mask;
0958 unsigned long fixed_rate;
0959 struct pic32_clk_common *core;
0960 };
0961
0962 #define clkhw_to_sosc(_hw) container_of(_hw, struct pic32_sec_osc, hw)
0963 static int sosc_clk_enable(struct clk_hw *hw)
0964 {
0965 struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
0966 u32 v;
0967
0968
0969 pic32_syskey_unlock();
0970 writel(sosc->enable_mask, PIC32_SET(sosc->enable_reg));
0971
0972
0973 return readl_poll_timeout_atomic(sosc->status_reg, v,
0974 v & sosc->status_mask, 1, 100);
0975 }
0976
0977 static void sosc_clk_disable(struct clk_hw *hw)
0978 {
0979 struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
0980
0981 pic32_syskey_unlock();
0982 writel(sosc->enable_mask, PIC32_CLR(sosc->enable_reg));
0983 }
0984
0985 static int sosc_clk_is_enabled(struct clk_hw *hw)
0986 {
0987 struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
0988 u32 enabled, ready;
0989
0990
0991 enabled = readl(sosc->enable_reg) & sosc->enable_mask;
0992 ready = readl(sosc->status_reg) & sosc->status_mask;
0993
0994 return enabled && ready;
0995 }
0996
0997 static unsigned long sosc_clk_calc_rate(struct clk_hw *hw,
0998 unsigned long parent_rate)
0999 {
1000 return clkhw_to_sosc(hw)->fixed_rate;
1001 }
1002
1003 const struct clk_ops pic32_sosc_ops = {
1004 .enable = sosc_clk_enable,
1005 .disable = sosc_clk_disable,
1006 .is_enabled = sosc_clk_is_enabled,
1007 .recalc_rate = sosc_clk_calc_rate,
1008 };
1009
1010 struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
1011 struct pic32_clk_common *core)
1012 {
1013 struct pic32_sec_osc *sosc;
1014
1015 sosc = devm_kzalloc(core->dev, sizeof(*sosc), GFP_KERNEL);
1016 if (!sosc)
1017 return ERR_PTR(-ENOMEM);
1018
1019 sosc->core = core;
1020 sosc->hw.init = &data->init_data;
1021 sosc->fixed_rate = data->fixed_rate;
1022 sosc->enable_mask = data->enable_mask;
1023 sosc->status_mask = data->status_mask;
1024 sosc->enable_reg = data->enable_reg + core->iobase;
1025 sosc->status_reg = data->status_reg + core->iobase;
1026
1027 return devm_clk_register(core->dev, &sosc->hw);
1028 }