0001
0002
0003
0004
0005
0006
0007 #include <linux/clk-provider.h>
0008 #include <linux/device.h>
0009 #include <linux/of.h>
0010
0011 #include "clk-cgu.h"
0012
0013 #define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
0014 #define GATE_HW_REG_EN(reg) ((reg) + 0x4)
0015 #define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
0016 #define MAX_DDIV_REG 8
0017 #define MAX_DIVIDER_VAL 64
0018
0019 #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
0020 #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
0021 #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
0022 #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
0023
0024 static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
0025 const struct lgm_clk_branch *list)
0026 {
0027 unsigned long flags;
0028
0029 if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
0030 spin_lock_irqsave(&ctx->lock, flags);
0031 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
0032 list->div_width, list->div_val);
0033 spin_unlock_irqrestore(&ctx->lock, flags);
0034 }
0035
0036 return clk_hw_register_fixed_rate(NULL, list->name,
0037 list->parent_data[0].name,
0038 list->flags, list->mux_flags);
0039 }
0040
0041 static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
0042 {
0043 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
0044 unsigned long flags;
0045 u32 val;
0046
0047 spin_lock_irqsave(&mux->lock, flags);
0048 if (mux->flags & MUX_CLK_SW)
0049 val = mux->reg;
0050 else
0051 val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
0052 mux->width);
0053 spin_unlock_irqrestore(&mux->lock, flags);
0054 return clk_mux_val_to_index(hw, NULL, mux->flags, val);
0055 }
0056
0057 static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
0058 {
0059 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
0060 unsigned long flags;
0061 u32 val;
0062
0063 val = clk_mux_index_to_val(NULL, mux->flags, index);
0064 spin_lock_irqsave(&mux->lock, flags);
0065 if (mux->flags & MUX_CLK_SW)
0066 mux->reg = val;
0067 else
0068 lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
0069 mux->width, val);
0070 spin_unlock_irqrestore(&mux->lock, flags);
0071
0072 return 0;
0073 }
0074
0075 static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
0076 struct clk_rate_request *req)
0077 {
0078 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
0079
0080 return clk_mux_determine_rate_flags(hw, req, mux->flags);
0081 }
0082
0083 static const struct clk_ops lgm_clk_mux_ops = {
0084 .get_parent = lgm_clk_mux_get_parent,
0085 .set_parent = lgm_clk_mux_set_parent,
0086 .determine_rate = lgm_clk_mux_determine_rate,
0087 };
0088
0089 static struct clk_hw *
0090 lgm_clk_register_mux(struct lgm_clk_provider *ctx,
0091 const struct lgm_clk_branch *list)
0092 {
0093 unsigned long flags, cflags = list->mux_flags;
0094 struct device *dev = ctx->dev;
0095 u8 shift = list->mux_shift;
0096 u8 width = list->mux_width;
0097 struct clk_init_data init = {};
0098 struct lgm_clk_mux *mux;
0099 u32 reg = list->mux_off;
0100 struct clk_hw *hw;
0101 int ret;
0102
0103 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
0104 if (!mux)
0105 return ERR_PTR(-ENOMEM);
0106
0107 init.name = list->name;
0108 init.ops = &lgm_clk_mux_ops;
0109 init.flags = list->flags;
0110 init.parent_data = list->parent_data;
0111 init.num_parents = list->num_parents;
0112
0113 mux->membase = ctx->membase;
0114 mux->lock = ctx->lock;
0115 mux->reg = reg;
0116 mux->shift = shift;
0117 mux->width = width;
0118 mux->flags = cflags;
0119 mux->hw.init = &init;
0120
0121 hw = &mux->hw;
0122 ret = devm_clk_hw_register(dev, hw);
0123 if (ret)
0124 return ERR_PTR(ret);
0125
0126 if (cflags & CLOCK_FLAG_VAL_INIT) {
0127 spin_lock_irqsave(&mux->lock, flags);
0128 lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
0129 spin_unlock_irqrestore(&mux->lock, flags);
0130 }
0131
0132 return hw;
0133 }
0134
0135 static unsigned long
0136 lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
0137 {
0138 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
0139 unsigned long flags;
0140 unsigned int val;
0141
0142 spin_lock_irqsave(÷r->lock, flags);
0143 val = lgm_get_clk_val(divider->membase, divider->reg,
0144 divider->shift, divider->width);
0145 spin_unlock_irqrestore(÷r->lock, flags);
0146
0147 return divider_recalc_rate(hw, parent_rate, val, divider->table,
0148 divider->flags, divider->width);
0149 }
0150
0151 static long
0152 lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
0153 unsigned long *prate)
0154 {
0155 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
0156
0157 return divider_round_rate(hw, rate, prate, divider->table,
0158 divider->width, divider->flags);
0159 }
0160
0161 static int
0162 lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
0163 unsigned long prate)
0164 {
0165 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
0166 unsigned long flags;
0167 int value;
0168
0169 value = divider_get_val(rate, prate, divider->table,
0170 divider->width, divider->flags);
0171 if (value < 0)
0172 return value;
0173
0174 spin_lock_irqsave(÷r->lock, flags);
0175 lgm_set_clk_val(divider->membase, divider->reg,
0176 divider->shift, divider->width, value);
0177 spin_unlock_irqrestore(÷r->lock, flags);
0178
0179 return 0;
0180 }
0181
0182 static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
0183 {
0184 struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
0185 unsigned long flags;
0186
0187 spin_lock_irqsave(&div->lock, flags);
0188 lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
0189 div->width_gate, enable);
0190 spin_unlock_irqrestore(&div->lock, flags);
0191 return 0;
0192 }
0193
0194 static int lgm_clk_divider_enable(struct clk_hw *hw)
0195 {
0196 return lgm_clk_divider_enable_disable(hw, 1);
0197 }
0198
0199 static void lgm_clk_divider_disable(struct clk_hw *hw)
0200 {
0201 lgm_clk_divider_enable_disable(hw, 0);
0202 }
0203
0204 static const struct clk_ops lgm_clk_divider_ops = {
0205 .recalc_rate = lgm_clk_divider_recalc_rate,
0206 .round_rate = lgm_clk_divider_round_rate,
0207 .set_rate = lgm_clk_divider_set_rate,
0208 .enable = lgm_clk_divider_enable,
0209 .disable = lgm_clk_divider_disable,
0210 };
0211
0212 static struct clk_hw *
0213 lgm_clk_register_divider(struct lgm_clk_provider *ctx,
0214 const struct lgm_clk_branch *list)
0215 {
0216 unsigned long flags, cflags = list->div_flags;
0217 struct device *dev = ctx->dev;
0218 struct lgm_clk_divider *div;
0219 struct clk_init_data init = {};
0220 u8 shift = list->div_shift;
0221 u8 width = list->div_width;
0222 u8 shift_gate = list->div_shift_gate;
0223 u8 width_gate = list->div_width_gate;
0224 u32 reg = list->div_off;
0225 struct clk_hw *hw;
0226 int ret;
0227
0228 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
0229 if (!div)
0230 return ERR_PTR(-ENOMEM);
0231
0232 init.name = list->name;
0233 init.ops = &lgm_clk_divider_ops;
0234 init.flags = list->flags;
0235 init.parent_data = list->parent_data;
0236 init.num_parents = 1;
0237
0238 div->membase = ctx->membase;
0239 div->lock = ctx->lock;
0240 div->reg = reg;
0241 div->shift = shift;
0242 div->width = width;
0243 div->shift_gate = shift_gate;
0244 div->width_gate = width_gate;
0245 div->flags = cflags;
0246 div->table = list->div_table;
0247 div->hw.init = &init;
0248
0249 hw = &div->hw;
0250 ret = devm_clk_hw_register(dev, hw);
0251 if (ret)
0252 return ERR_PTR(ret);
0253
0254 if (cflags & CLOCK_FLAG_VAL_INIT) {
0255 spin_lock_irqsave(&div->lock, flags);
0256 lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
0257 spin_unlock_irqrestore(&div->lock, flags);
0258 }
0259
0260 return hw;
0261 }
0262
0263 static struct clk_hw *
0264 lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
0265 const struct lgm_clk_branch *list)
0266 {
0267 unsigned long flags;
0268 struct clk_hw *hw;
0269
0270 hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
0271 list->parent_data[0].name, list->flags,
0272 list->mult, list->div);
0273 if (IS_ERR(hw))
0274 return ERR_CAST(hw);
0275
0276 if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
0277 spin_lock_irqsave(&ctx->lock, flags);
0278 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
0279 list->div_width, list->div_val);
0280 spin_unlock_irqrestore(&ctx->lock, flags);
0281 }
0282
0283 return hw;
0284 }
0285
0286 static int lgm_clk_gate_enable(struct clk_hw *hw)
0287 {
0288 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
0289 unsigned long flags;
0290 unsigned int reg;
0291
0292 spin_lock_irqsave(&gate->lock, flags);
0293 reg = GATE_HW_REG_EN(gate->reg);
0294 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
0295 spin_unlock_irqrestore(&gate->lock, flags);
0296
0297 return 0;
0298 }
0299
0300 static void lgm_clk_gate_disable(struct clk_hw *hw)
0301 {
0302 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
0303 unsigned long flags;
0304 unsigned int reg;
0305
0306 spin_lock_irqsave(&gate->lock, flags);
0307 reg = GATE_HW_REG_DIS(gate->reg);
0308 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
0309 spin_unlock_irqrestore(&gate->lock, flags);
0310 }
0311
0312 static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
0313 {
0314 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
0315 unsigned int reg, ret;
0316 unsigned long flags;
0317
0318 spin_lock_irqsave(&gate->lock, flags);
0319 reg = GATE_HW_REG_STAT(gate->reg);
0320 ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
0321 spin_unlock_irqrestore(&gate->lock, flags);
0322
0323 return ret;
0324 }
0325
0326 static const struct clk_ops lgm_clk_gate_ops = {
0327 .enable = lgm_clk_gate_enable,
0328 .disable = lgm_clk_gate_disable,
0329 .is_enabled = lgm_clk_gate_is_enabled,
0330 };
0331
0332 static struct clk_hw *
0333 lgm_clk_register_gate(struct lgm_clk_provider *ctx,
0334 const struct lgm_clk_branch *list)
0335 {
0336 unsigned long flags, cflags = list->gate_flags;
0337 const char *pname = list->parent_data[0].name;
0338 struct device *dev = ctx->dev;
0339 u8 shift = list->gate_shift;
0340 struct clk_init_data init = {};
0341 struct lgm_clk_gate *gate;
0342 u32 reg = list->gate_off;
0343 struct clk_hw *hw;
0344 int ret;
0345
0346 gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
0347 if (!gate)
0348 return ERR_PTR(-ENOMEM);
0349
0350 init.name = list->name;
0351 init.ops = &lgm_clk_gate_ops;
0352 init.flags = list->flags;
0353 init.parent_names = pname ? &pname : NULL;
0354 init.num_parents = pname ? 1 : 0;
0355
0356 gate->membase = ctx->membase;
0357 gate->lock = ctx->lock;
0358 gate->reg = reg;
0359 gate->shift = shift;
0360 gate->flags = cflags;
0361 gate->hw.init = &init;
0362
0363 hw = &gate->hw;
0364 ret = devm_clk_hw_register(dev, hw);
0365 if (ret)
0366 return ERR_PTR(ret);
0367
0368 if (cflags & CLOCK_FLAG_VAL_INIT) {
0369 spin_lock_irqsave(&gate->lock, flags);
0370 lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
0371 spin_unlock_irqrestore(&gate->lock, flags);
0372 }
0373
0374 return hw;
0375 }
0376
0377 int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
0378 const struct lgm_clk_branch *list,
0379 unsigned int nr_clk)
0380 {
0381 struct clk_hw *hw;
0382 unsigned int idx;
0383
0384 for (idx = 0; idx < nr_clk; idx++, list++) {
0385 switch (list->type) {
0386 case CLK_TYPE_FIXED:
0387 hw = lgm_clk_register_fixed(ctx, list);
0388 break;
0389 case CLK_TYPE_MUX:
0390 hw = lgm_clk_register_mux(ctx, list);
0391 break;
0392 case CLK_TYPE_DIVIDER:
0393 hw = lgm_clk_register_divider(ctx, list);
0394 break;
0395 case CLK_TYPE_FIXED_FACTOR:
0396 hw = lgm_clk_register_fixed_factor(ctx, list);
0397 break;
0398 case CLK_TYPE_GATE:
0399 hw = lgm_clk_register_gate(ctx, list);
0400 break;
0401 default:
0402 dev_err(ctx->dev, "invalid clk type\n");
0403 return -EINVAL;
0404 }
0405
0406 if (IS_ERR(hw)) {
0407 dev_err(ctx->dev,
0408 "register clk: %s, type: %u failed!\n",
0409 list->name, list->type);
0410 return -EIO;
0411 }
0412 ctx->clk_data.hws[list->id] = hw;
0413 }
0414
0415 return 0;
0416 }
0417
0418 static unsigned long
0419 lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
0420 {
0421 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
0422 unsigned int div0, div1, exdiv;
0423 u64 prate;
0424
0425 div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
0426 ddiv->shift0, ddiv->width0) + 1;
0427 div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
0428 ddiv->shift1, ddiv->width1) + 1;
0429 exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
0430 ddiv->shift2, ddiv->width2);
0431 prate = (u64)parent_rate;
0432 do_div(prate, div0);
0433 do_div(prate, div1);
0434
0435 if (exdiv) {
0436 do_div(prate, ddiv->div);
0437 prate *= ddiv->mult;
0438 }
0439
0440 return prate;
0441 }
0442
0443 static int lgm_clk_ddiv_enable(struct clk_hw *hw)
0444 {
0445 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
0446 unsigned long flags;
0447
0448 spin_lock_irqsave(&ddiv->lock, flags);
0449 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
0450 ddiv->width_gate, 1);
0451 spin_unlock_irqrestore(&ddiv->lock, flags);
0452 return 0;
0453 }
0454
0455 static void lgm_clk_ddiv_disable(struct clk_hw *hw)
0456 {
0457 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
0458 unsigned long flags;
0459
0460 spin_lock_irqsave(&ddiv->lock, flags);
0461 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
0462 ddiv->width_gate, 0);
0463 spin_unlock_irqrestore(&ddiv->lock, flags);
0464 }
0465
0466 static int
0467 lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
0468 {
0469 u32 idx, temp;
0470
0471 *ddiv1 = 1;
0472 *ddiv2 = 1;
0473
0474 if (div > MAX_DIVIDER_VAL)
0475 div = MAX_DIVIDER_VAL;
0476
0477 if (div > 1) {
0478 for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
0479 temp = DIV_ROUND_UP_ULL((u64)div, idx);
0480 if (div % idx == 0 && temp <= MAX_DDIV_REG)
0481 break;
0482 }
0483
0484 if (idx > MAX_DDIV_REG)
0485 return -EINVAL;
0486
0487 *ddiv1 = temp;
0488 *ddiv2 = idx;
0489 }
0490
0491 return 0;
0492 }
0493
0494 static int
0495 lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
0496 unsigned long prate)
0497 {
0498 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
0499 u32 div, ddiv1, ddiv2;
0500 unsigned long flags;
0501
0502 div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
0503
0504 spin_lock_irqsave(&ddiv->lock, flags);
0505 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
0506 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
0507 div = div * 2;
0508 }
0509
0510 if (div <= 0) {
0511 spin_unlock_irqrestore(&ddiv->lock, flags);
0512 return -EINVAL;
0513 }
0514
0515 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
0516 spin_unlock_irqrestore(&ddiv->lock, flags);
0517 return -EINVAL;
0518 }
0519
0520 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
0521 ddiv1 - 1);
0522
0523 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
0524 ddiv2 - 1);
0525 spin_unlock_irqrestore(&ddiv->lock, flags);
0526
0527 return 0;
0528 }
0529
0530 static long
0531 lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
0532 unsigned long *prate)
0533 {
0534 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
0535 u32 div, ddiv1, ddiv2;
0536 unsigned long flags;
0537 u64 rate64;
0538
0539 div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
0540
0541
0542 spin_lock_irqsave(&ddiv->lock, flags);
0543 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
0544 div = div * 2;
0545 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
0546 }
0547 spin_unlock_irqrestore(&ddiv->lock, flags);
0548
0549 if (div <= 0)
0550 return *prate;
0551
0552 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
0553 if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
0554 return -EINVAL;
0555
0556 rate64 = *prate;
0557 do_div(rate64, ddiv1);
0558 do_div(rate64, ddiv2);
0559
0560
0561 spin_lock_irqsave(&ddiv->lock, flags);
0562 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
0563 rate64 = rate64 * 2;
0564 rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
0565 }
0566 spin_unlock_irqrestore(&ddiv->lock, flags);
0567
0568 return rate64;
0569 }
0570
0571 static const struct clk_ops lgm_clk_ddiv_ops = {
0572 .recalc_rate = lgm_clk_ddiv_recalc_rate,
0573 .enable = lgm_clk_ddiv_enable,
0574 .disable = lgm_clk_ddiv_disable,
0575 .set_rate = lgm_clk_ddiv_set_rate,
0576 .round_rate = lgm_clk_ddiv_round_rate,
0577 };
0578
0579 int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
0580 const struct lgm_clk_ddiv_data *list,
0581 unsigned int nr_clk)
0582 {
0583 struct device *dev = ctx->dev;
0584 struct clk_hw *hw;
0585 unsigned int idx;
0586 int ret;
0587
0588 for (idx = 0; idx < nr_clk; idx++, list++) {
0589 struct clk_init_data init = {};
0590 struct lgm_clk_ddiv *ddiv;
0591
0592 ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
0593 if (!ddiv)
0594 return -ENOMEM;
0595
0596 init.name = list->name;
0597 init.ops = &lgm_clk_ddiv_ops;
0598 init.flags = list->flags;
0599 init.parent_data = list->parent_data;
0600 init.num_parents = 1;
0601
0602 ddiv->membase = ctx->membase;
0603 ddiv->lock = ctx->lock;
0604 ddiv->reg = list->reg;
0605 ddiv->shift0 = list->shift0;
0606 ddiv->width0 = list->width0;
0607 ddiv->shift1 = list->shift1;
0608 ddiv->width1 = list->width1;
0609 ddiv->shift_gate = list->shift_gate;
0610 ddiv->width_gate = list->width_gate;
0611 ddiv->shift2 = list->ex_shift;
0612 ddiv->width2 = list->ex_width;
0613 ddiv->flags = list->div_flags;
0614 ddiv->mult = 2;
0615 ddiv->div = 5;
0616 ddiv->hw.init = &init;
0617
0618 hw = &ddiv->hw;
0619 ret = devm_clk_hw_register(dev, hw);
0620 if (ret) {
0621 dev_err(dev, "register clk: %s failed!\n", list->name);
0622 return ret;
0623 }
0624 ctx->clk_data.hws[list->id] = hw;
0625 }
0626
0627 return 0;
0628 }