0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bitops.h>
0010 #include <linux/clk.h>
0011 #include <linux/clk-provider.h>
0012 #include <linux/clkdev.h>
0013 #include <linux/delay.h>
0014 #include <linux/io.h>
0015 #include <linux/iopoll.h>
0016 #include <linux/math64.h>
0017 #include <linux/of.h>
0018 #include <linux/of_address.h>
0019 #include <linux/slab.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/time.h>
0022
0023 #include "cgu.h"
0024
0025 #define MHZ (1000 * 1000)
0026
0027 static inline const struct ingenic_cgu_clk_info *
0028 to_clk_info(struct ingenic_clk *clk)
0029 {
0030 return &clk->cgu->clock_info[clk->idx];
0031 }
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 static inline bool
0044 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
0045 const struct ingenic_cgu_gate_info *info)
0046 {
0047 return !!(readl(cgu->base + info->reg) & BIT(info->bit))
0048 ^ info->clear_to_gate;
0049 }
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 static inline void
0062 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
0063 const struct ingenic_cgu_gate_info *info, bool val)
0064 {
0065 u32 clkgr = readl(cgu->base + info->reg);
0066
0067 if (val ^ info->clear_to_gate)
0068 clkgr |= BIT(info->bit);
0069 else
0070 clkgr &= ~BIT(info->bit);
0071
0072 writel(clkgr, cgu->base + info->reg);
0073 }
0074
0075
0076
0077
0078
0079 static unsigned long
0080 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
0081 {
0082 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0083 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0084 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0085 const struct ingenic_cgu_pll_info *pll_info;
0086 unsigned m, n, od_enc, od;
0087 bool bypass;
0088 u32 ctl;
0089
0090 BUG_ON(clk_info->type != CGU_CLK_PLL);
0091 pll_info = &clk_info->pll;
0092
0093 ctl = readl(cgu->base + pll_info->reg);
0094
0095 m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
0096 m += pll_info->m_offset;
0097 n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
0098 n += pll_info->n_offset;
0099 od_enc = ctl >> pll_info->od_shift;
0100 od_enc &= GENMASK(pll_info->od_bits - 1, 0);
0101
0102 if (pll_info->bypass_bit >= 0) {
0103 ctl = readl(cgu->base + pll_info->bypass_reg);
0104
0105 bypass = !!(ctl & BIT(pll_info->bypass_bit));
0106
0107 if (bypass)
0108 return parent_rate;
0109 }
0110
0111 for (od = 0; od < pll_info->od_max; od++) {
0112 if (pll_info->od_encoding[od] == od_enc)
0113 break;
0114 }
0115 BUG_ON(od == pll_info->od_max);
0116 od++;
0117
0118 return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
0119 n * od);
0120 }
0121
0122 static void
0123 ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
0124 unsigned long rate, unsigned long parent_rate,
0125 unsigned int *pm, unsigned int *pn, unsigned int *pod)
0126 {
0127 unsigned int m, n, od = 1;
0128
0129
0130
0131
0132
0133 n = parent_rate / (10 * MHZ);
0134 n = min_t(unsigned int, n, 1 << pll_info->n_bits);
0135 n = max_t(unsigned int, n, pll_info->n_offset);
0136
0137 m = (rate / MHZ) * od * n / (parent_rate / MHZ);
0138 m = min_t(unsigned int, m, 1 << pll_info->m_bits);
0139 m = max_t(unsigned int, m, pll_info->m_offset);
0140
0141 *pm = m;
0142 *pn = n;
0143 *pod = od;
0144 }
0145
0146 static unsigned long
0147 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
0148 unsigned long rate, unsigned long parent_rate,
0149 unsigned int *pm, unsigned int *pn, unsigned int *pod)
0150 {
0151 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
0152 unsigned int m, n, od;
0153
0154 if (pll_info->calc_m_n_od)
0155 (*pll_info->calc_m_n_od)(pll_info, rate, parent_rate, &m, &n, &od);
0156 else
0157 ingenic_pll_calc_m_n_od(pll_info, rate, parent_rate, &m, &n, &od);
0158
0159 if (pm)
0160 *pm = m;
0161 if (pn)
0162 *pn = n;
0163 if (pod)
0164 *pod = od;
0165
0166 return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
0167 n * od);
0168 }
0169
0170 static long
0171 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
0172 unsigned long *prate)
0173 {
0174 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0175 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0176
0177 return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
0178 }
0179
0180 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
0181 const struct ingenic_cgu_pll_info *pll_info)
0182 {
0183 u32 ctl;
0184
0185 return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
0186 ctl & BIT(pll_info->stable_bit),
0187 0, 100 * USEC_PER_MSEC);
0188 }
0189
0190 static int
0191 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
0192 unsigned long parent_rate)
0193 {
0194 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0195 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0196 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0197 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
0198 unsigned long rate, flags;
0199 unsigned int m, n, od;
0200 int ret = 0;
0201 u32 ctl;
0202
0203 rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
0204 &m, &n, &od);
0205 if (rate != req_rate)
0206 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
0207 clk_info->name, req_rate, rate);
0208
0209 spin_lock_irqsave(&cgu->lock, flags);
0210 ctl = readl(cgu->base + pll_info->reg);
0211
0212 ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
0213 ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
0214
0215 ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
0216 ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
0217
0218 ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
0219 ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
0220
0221 writel(ctl, cgu->base + pll_info->reg);
0222
0223
0224 if (ctl & BIT(pll_info->enable_bit))
0225 ret = ingenic_pll_check_stable(cgu, pll_info);
0226
0227 spin_unlock_irqrestore(&cgu->lock, flags);
0228
0229 return ret;
0230 }
0231
0232 static int ingenic_pll_enable(struct clk_hw *hw)
0233 {
0234 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0235 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0236 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0237 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
0238 unsigned long flags;
0239 int ret;
0240 u32 ctl;
0241
0242 spin_lock_irqsave(&cgu->lock, flags);
0243 if (pll_info->bypass_bit >= 0) {
0244 ctl = readl(cgu->base + pll_info->bypass_reg);
0245
0246 ctl &= ~BIT(pll_info->bypass_bit);
0247
0248 writel(ctl, cgu->base + pll_info->bypass_reg);
0249 }
0250
0251 ctl = readl(cgu->base + pll_info->reg);
0252
0253 ctl |= BIT(pll_info->enable_bit);
0254
0255 writel(ctl, cgu->base + pll_info->reg);
0256
0257 ret = ingenic_pll_check_stable(cgu, pll_info);
0258 spin_unlock_irqrestore(&cgu->lock, flags);
0259
0260 return ret;
0261 }
0262
0263 static void ingenic_pll_disable(struct clk_hw *hw)
0264 {
0265 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0266 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0267 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0268 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
0269 unsigned long flags;
0270 u32 ctl;
0271
0272 spin_lock_irqsave(&cgu->lock, flags);
0273 ctl = readl(cgu->base + pll_info->reg);
0274
0275 ctl &= ~BIT(pll_info->enable_bit);
0276
0277 writel(ctl, cgu->base + pll_info->reg);
0278 spin_unlock_irqrestore(&cgu->lock, flags);
0279 }
0280
0281 static int ingenic_pll_is_enabled(struct clk_hw *hw)
0282 {
0283 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0284 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0285 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0286 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
0287 u32 ctl;
0288
0289 ctl = readl(cgu->base + pll_info->reg);
0290
0291 return !!(ctl & BIT(pll_info->enable_bit));
0292 }
0293
0294 static const struct clk_ops ingenic_pll_ops = {
0295 .recalc_rate = ingenic_pll_recalc_rate,
0296 .round_rate = ingenic_pll_round_rate,
0297 .set_rate = ingenic_pll_set_rate,
0298
0299 .enable = ingenic_pll_enable,
0300 .disable = ingenic_pll_disable,
0301 .is_enabled = ingenic_pll_is_enabled,
0302 };
0303
0304
0305
0306
0307
0308 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
0309 {
0310 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0311 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0312 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0313 u32 reg;
0314 u8 i, hw_idx, idx = 0;
0315
0316 if (clk_info->type & CGU_CLK_MUX) {
0317 reg = readl(cgu->base + clk_info->mux.reg);
0318 hw_idx = (reg >> clk_info->mux.shift) &
0319 GENMASK(clk_info->mux.bits - 1, 0);
0320
0321
0322
0323
0324
0325 for (i = 0; i < hw_idx; i++) {
0326 if (clk_info->parents[i] != -1)
0327 idx++;
0328 }
0329 }
0330
0331 return idx;
0332 }
0333
0334 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
0335 {
0336 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0337 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0338 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0339 unsigned long flags;
0340 u8 curr_idx, hw_idx, num_poss;
0341 u32 reg, mask;
0342
0343 if (clk_info->type & CGU_CLK_MUX) {
0344
0345
0346
0347
0348
0349
0350 hw_idx = curr_idx = 0;
0351 num_poss = 1 << clk_info->mux.bits;
0352 for (; hw_idx < num_poss; hw_idx++) {
0353 if (clk_info->parents[hw_idx] == -1)
0354 continue;
0355 if (curr_idx == idx)
0356 break;
0357 curr_idx++;
0358 }
0359
0360
0361 BUG_ON(curr_idx != idx);
0362
0363 mask = GENMASK(clk_info->mux.bits - 1, 0);
0364 mask <<= clk_info->mux.shift;
0365
0366 spin_lock_irqsave(&cgu->lock, flags);
0367
0368
0369 reg = readl(cgu->base + clk_info->mux.reg);
0370 reg &= ~mask;
0371 reg |= hw_idx << clk_info->mux.shift;
0372 writel(reg, cgu->base + clk_info->mux.reg);
0373
0374 spin_unlock_irqrestore(&cgu->lock, flags);
0375 return 0;
0376 }
0377
0378 return idx ? -EINVAL : 0;
0379 }
0380
0381 static unsigned long
0382 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
0383 {
0384 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0385 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0386 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0387 unsigned long rate = parent_rate;
0388 u32 div_reg, div;
0389 u8 parent;
0390
0391 if (clk_info->type & CGU_CLK_DIV) {
0392 parent = ingenic_clk_get_parent(hw);
0393
0394 if (!(clk_info->div.bypass_mask & BIT(parent))) {
0395 div_reg = readl(cgu->base + clk_info->div.reg);
0396 div = (div_reg >> clk_info->div.shift) &
0397 GENMASK(clk_info->div.bits - 1, 0);
0398
0399 if (clk_info->div.div_table)
0400 div = clk_info->div.div_table[div];
0401 else
0402 div = (div + 1) * clk_info->div.div;
0403
0404 rate /= div;
0405 }
0406 } else if (clk_info->type & CGU_CLK_FIXDIV) {
0407 rate /= clk_info->fixdiv.div;
0408 }
0409
0410 return rate;
0411 }
0412
0413 static unsigned int
0414 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
0415 unsigned int div)
0416 {
0417 unsigned int i, best_i = 0, best = (unsigned int)-1;
0418
0419 for (i = 0; i < (1 << clk_info->div.bits)
0420 && clk_info->div.div_table[i]; i++) {
0421 if (clk_info->div.div_table[i] >= div &&
0422 clk_info->div.div_table[i] < best) {
0423 best = clk_info->div.div_table[i];
0424 best_i = i;
0425
0426 if (div == best)
0427 break;
0428 }
0429 }
0430
0431 return best_i;
0432 }
0433
0434 static unsigned
0435 ingenic_clk_calc_div(struct clk_hw *hw,
0436 const struct ingenic_cgu_clk_info *clk_info,
0437 unsigned long parent_rate, unsigned long req_rate)
0438 {
0439 unsigned int div, hw_div;
0440 u8 parent;
0441
0442 parent = ingenic_clk_get_parent(hw);
0443 if (clk_info->div.bypass_mask & BIT(parent))
0444 return 1;
0445
0446
0447 div = DIV_ROUND_UP(parent_rate, req_rate);
0448
0449 if (clk_info->div.div_table) {
0450 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
0451
0452 return clk_info->div.div_table[hw_div];
0453 }
0454
0455
0456 div = clamp_t(unsigned int, div, clk_info->div.div,
0457 clk_info->div.div << clk_info->div.bits);
0458
0459
0460
0461
0462
0463
0464 div = DIV_ROUND_UP(div, clk_info->div.div);
0465 div *= clk_info->div.div;
0466
0467 return div;
0468 }
0469
0470 static long
0471 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
0472 unsigned long *parent_rate)
0473 {
0474 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0475 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0476 unsigned int div = 1;
0477
0478 if (clk_info->type & CGU_CLK_DIV)
0479 div = ingenic_clk_calc_div(hw, clk_info, *parent_rate, req_rate);
0480 else if (clk_info->type & CGU_CLK_FIXDIV)
0481 div = clk_info->fixdiv.div;
0482 else if (clk_hw_can_set_rate_parent(hw))
0483 *parent_rate = req_rate;
0484
0485 return DIV_ROUND_UP(*parent_rate, div);
0486 }
0487
0488 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
0489 const struct ingenic_cgu_clk_info *clk_info)
0490 {
0491 u32 reg;
0492
0493 return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
0494 !(reg & BIT(clk_info->div.busy_bit)),
0495 0, 100 * USEC_PER_MSEC);
0496 }
0497
0498 static int
0499 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
0500 unsigned long parent_rate)
0501 {
0502 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0503 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0504 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0505 unsigned long rate, flags;
0506 unsigned int hw_div, div;
0507 u32 reg, mask;
0508 int ret = 0;
0509
0510 if (clk_info->type & CGU_CLK_DIV) {
0511 div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
0512 rate = DIV_ROUND_UP(parent_rate, div);
0513
0514 if (rate != req_rate)
0515 return -EINVAL;
0516
0517 if (clk_info->div.div_table)
0518 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
0519 else
0520 hw_div = ((div / clk_info->div.div) - 1);
0521
0522 spin_lock_irqsave(&cgu->lock, flags);
0523 reg = readl(cgu->base + clk_info->div.reg);
0524
0525
0526 mask = GENMASK(clk_info->div.bits - 1, 0);
0527 reg &= ~(mask << clk_info->div.shift);
0528 reg |= hw_div << clk_info->div.shift;
0529
0530
0531 if (clk_info->div.stop_bit != -1)
0532 reg &= ~BIT(clk_info->div.stop_bit);
0533
0534
0535 if (clk_info->div.ce_bit != -1)
0536 reg |= BIT(clk_info->div.ce_bit);
0537
0538
0539 writel(reg, cgu->base + clk_info->div.reg);
0540
0541
0542 if (clk_info->div.busy_bit != -1)
0543 ret = ingenic_clk_check_stable(cgu, clk_info);
0544
0545 spin_unlock_irqrestore(&cgu->lock, flags);
0546 return ret;
0547 }
0548
0549 return -EINVAL;
0550 }
0551
0552 static int ingenic_clk_enable(struct clk_hw *hw)
0553 {
0554 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0555 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0556 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0557 unsigned long flags;
0558
0559 if (clk_info->type & CGU_CLK_GATE) {
0560
0561 spin_lock_irqsave(&cgu->lock, flags);
0562 ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
0563 spin_unlock_irqrestore(&cgu->lock, flags);
0564
0565 if (clk_info->gate.delay_us)
0566 udelay(clk_info->gate.delay_us);
0567 }
0568
0569 return 0;
0570 }
0571
0572 static void ingenic_clk_disable(struct clk_hw *hw)
0573 {
0574 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0575 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0576 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0577 unsigned long flags;
0578
0579 if (clk_info->type & CGU_CLK_GATE) {
0580
0581 spin_lock_irqsave(&cgu->lock, flags);
0582 ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
0583 spin_unlock_irqrestore(&cgu->lock, flags);
0584 }
0585 }
0586
0587 static int ingenic_clk_is_enabled(struct clk_hw *hw)
0588 {
0589 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
0590 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
0591 struct ingenic_cgu *cgu = ingenic_clk->cgu;
0592 int enabled = 1;
0593
0594 if (clk_info->type & CGU_CLK_GATE)
0595 enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
0596
0597 return enabled;
0598 }
0599
0600 static const struct clk_ops ingenic_clk_ops = {
0601 .get_parent = ingenic_clk_get_parent,
0602 .set_parent = ingenic_clk_set_parent,
0603
0604 .recalc_rate = ingenic_clk_recalc_rate,
0605 .round_rate = ingenic_clk_round_rate,
0606 .set_rate = ingenic_clk_set_rate,
0607
0608 .enable = ingenic_clk_enable,
0609 .disable = ingenic_clk_disable,
0610 .is_enabled = ingenic_clk_is_enabled,
0611 };
0612
0613
0614
0615
0616
0617 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
0618 {
0619 const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
0620 struct clk_init_data clk_init;
0621 struct ingenic_clk *ingenic_clk = NULL;
0622 struct clk *clk, *parent;
0623 const char *parent_names[4];
0624 unsigned caps, i, num_possible;
0625 int err = -EINVAL;
0626
0627 BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
0628
0629 if (clk_info->type == CGU_CLK_EXT) {
0630 clk = of_clk_get_by_name(cgu->np, clk_info->name);
0631 if (IS_ERR(clk)) {
0632 pr_err("%s: no external clock '%s' provided\n",
0633 __func__, clk_info->name);
0634 err = -ENODEV;
0635 goto out;
0636 }
0637 err = clk_register_clkdev(clk, clk_info->name, NULL);
0638 if (err) {
0639 clk_put(clk);
0640 goto out;
0641 }
0642 cgu->clocks.clks[idx] = clk;
0643 return 0;
0644 }
0645
0646 if (!clk_info->type) {
0647 pr_err("%s: no clock type specified for '%s'\n", __func__,
0648 clk_info->name);
0649 goto out;
0650 }
0651
0652 ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
0653 if (!ingenic_clk) {
0654 err = -ENOMEM;
0655 goto out;
0656 }
0657
0658 ingenic_clk->hw.init = &clk_init;
0659 ingenic_clk->cgu = cgu;
0660 ingenic_clk->idx = idx;
0661
0662 clk_init.name = clk_info->name;
0663 clk_init.flags = clk_info->flags;
0664 clk_init.parent_names = parent_names;
0665
0666 caps = clk_info->type;
0667
0668 if (caps & CGU_CLK_DIV) {
0669 caps &= ~CGU_CLK_DIV;
0670 } else if (!(caps & CGU_CLK_CUSTOM)) {
0671
0672 clk_init.flags |= CLK_SET_RATE_PARENT;
0673 }
0674
0675 if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
0676 clk_init.num_parents = 0;
0677
0678 if (caps & CGU_CLK_MUX)
0679 num_possible = 1 << clk_info->mux.bits;
0680 else
0681 num_possible = ARRAY_SIZE(clk_info->parents);
0682
0683 for (i = 0; i < num_possible; i++) {
0684 if (clk_info->parents[i] == -1)
0685 continue;
0686
0687 parent = cgu->clocks.clks[clk_info->parents[i]];
0688 parent_names[clk_init.num_parents] =
0689 __clk_get_name(parent);
0690 clk_init.num_parents++;
0691 }
0692
0693 BUG_ON(!clk_init.num_parents);
0694 BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
0695 } else {
0696 BUG_ON(clk_info->parents[0] == -1);
0697 clk_init.num_parents = 1;
0698 parent = cgu->clocks.clks[clk_info->parents[0]];
0699 parent_names[0] = __clk_get_name(parent);
0700 }
0701
0702 if (caps & CGU_CLK_CUSTOM) {
0703 clk_init.ops = clk_info->custom.clk_ops;
0704
0705 caps &= ~CGU_CLK_CUSTOM;
0706
0707 if (caps) {
0708 pr_err("%s: custom clock may not be combined with type 0x%x\n",
0709 __func__, caps);
0710 goto out;
0711 }
0712 } else if (caps & CGU_CLK_PLL) {
0713 clk_init.ops = &ingenic_pll_ops;
0714
0715 caps &= ~CGU_CLK_PLL;
0716
0717 if (caps) {
0718 pr_err("%s: PLL may not be combined with type 0x%x\n",
0719 __func__, caps);
0720 goto out;
0721 }
0722 } else {
0723 clk_init.ops = &ingenic_clk_ops;
0724 }
0725
0726
0727 caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
0728
0729 if (caps & CGU_CLK_MUX) {
0730 if (!(caps & CGU_CLK_MUX_GLITCHFREE))
0731 clk_init.flags |= CLK_SET_PARENT_GATE;
0732
0733 caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
0734 }
0735
0736 if (caps) {
0737 pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
0738 goto out;
0739 }
0740
0741 clk = clk_register(NULL, &ingenic_clk->hw);
0742 if (IS_ERR(clk)) {
0743 pr_err("%s: failed to register clock '%s'\n", __func__,
0744 clk_info->name);
0745 err = PTR_ERR(clk);
0746 goto out;
0747 }
0748
0749 err = clk_register_clkdev(clk, clk_info->name, NULL);
0750 if (err)
0751 goto out;
0752
0753 cgu->clocks.clks[idx] = clk;
0754 out:
0755 if (err)
0756 kfree(ingenic_clk);
0757 return err;
0758 }
0759
0760 struct ingenic_cgu *
0761 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
0762 unsigned num_clocks, struct device_node *np)
0763 {
0764 struct ingenic_cgu *cgu;
0765
0766 cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
0767 if (!cgu)
0768 goto err_out;
0769
0770 cgu->base = of_iomap(np, 0);
0771 if (!cgu->base) {
0772 pr_err("%s: failed to map CGU registers\n", __func__);
0773 goto err_out_free;
0774 }
0775
0776 cgu->np = np;
0777 cgu->clock_info = clock_info;
0778 cgu->clocks.clk_num = num_clocks;
0779
0780 spin_lock_init(&cgu->lock);
0781
0782 return cgu;
0783
0784 err_out_free:
0785 kfree(cgu);
0786 err_out:
0787 return NULL;
0788 }
0789
0790 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
0791 {
0792 unsigned i;
0793 int err;
0794
0795 cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
0796 GFP_KERNEL);
0797 if (!cgu->clocks.clks) {
0798 err = -ENOMEM;
0799 goto err_out;
0800 }
0801
0802 for (i = 0; i < cgu->clocks.clk_num; i++) {
0803 err = ingenic_register_clock(cgu, i);
0804 if (err)
0805 goto err_out_unregister;
0806 }
0807
0808 err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
0809 &cgu->clocks);
0810 if (err)
0811 goto err_out_unregister;
0812
0813 return 0;
0814
0815 err_out_unregister:
0816 for (i = 0; i < cgu->clocks.clk_num; i++) {
0817 if (!cgu->clocks.clks[i])
0818 continue;
0819 if (cgu->clock_info[i].type & CGU_CLK_EXT)
0820 clk_put(cgu->clocks.clks[i]);
0821 else
0822 clk_unregister(cgu->clocks.clks[i]);
0823 }
0824 kfree(cgu->clocks.clks);
0825 err_out:
0826 return err;
0827 }