0001
0002
0003
0004
0005
0006
0007 #include <linux/clk.h>
0008 #include <linux/delay.h>
0009 #include <linux/device.h>
0010 #include <linux/err.h>
0011 #include <linux/io.h>
0012 #include <linux/of.h>
0013 #include <linux/of_address.h>
0014 #include <linux/slab.h>
0015 #include <linux/spinlock.h>
0016
0017 #include "clk-stm32-core.h"
0018 #include "reset-stm32.h"
0019
0020 static DEFINE_SPINLOCK(rlock);
0021
0022 static int stm32_rcc_clock_init(struct device *dev,
0023 const struct of_device_id *match,
0024 void __iomem *base)
0025 {
0026 const struct stm32_rcc_match_data *data = match->data;
0027 struct clk_hw_onecell_data *clk_data = data->hw_clks;
0028 struct device_node *np = dev_of_node(dev);
0029 struct clk_hw **hws;
0030 int n, max_binding;
0031
0032 max_binding = data->maxbinding;
0033
0034 clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, max_binding), GFP_KERNEL);
0035 if (!clk_data)
0036 return -ENOMEM;
0037
0038 clk_data->num = max_binding;
0039
0040 hws = clk_data->hws;
0041
0042 for (n = 0; n < max_binding; n++)
0043 hws[n] = ERR_PTR(-ENOENT);
0044
0045 for (n = 0; n < data->num_clocks; n++) {
0046 const struct clock_config *cfg_clock = &data->tab_clocks[n];
0047 struct clk_hw *hw = ERR_PTR(-ENOENT);
0048
0049 if (data->check_security &&
0050 data->check_security(base, cfg_clock))
0051 continue;
0052
0053 if (cfg_clock->func)
0054 hw = (*cfg_clock->func)(dev, data, base, &rlock,
0055 cfg_clock);
0056
0057 if (IS_ERR(hw)) {
0058 dev_err(dev, "Can't register clk %d: %ld\n", n,
0059 PTR_ERR(hw));
0060 return PTR_ERR(hw);
0061 }
0062
0063 if (cfg_clock->id != NO_ID)
0064 hws[cfg_clock->id] = hw;
0065 }
0066
0067 return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
0068 }
0069
0070 int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
0071 void __iomem *base)
0072 {
0073 const struct of_device_id *match;
0074 int err;
0075
0076 match = of_match_node(match_data, dev_of_node(dev));
0077 if (!match) {
0078 dev_err(dev, "match data not found\n");
0079 return -ENODEV;
0080 }
0081
0082
0083 err = stm32_rcc_reset_init(dev, match, base);
0084 if (err) {
0085 pr_err("stm32 reset failed to initialize\n");
0086 return err;
0087 }
0088
0089
0090 err = stm32_rcc_clock_init(dev, match, base);
0091 if (err) {
0092 pr_err("stm32 clock failed to initialize\n");
0093 return err;
0094 }
0095
0096 return 0;
0097 }
0098
0099 static u8 stm32_mux_get_parent(void __iomem *base,
0100 struct clk_stm32_clock_data *data,
0101 u16 mux_id)
0102 {
0103 const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
0104 u32 mask = BIT(mux->width) - 1;
0105 u32 val;
0106
0107 val = readl(base + mux->offset) >> mux->shift;
0108 val &= mask;
0109
0110 return val;
0111 }
0112
0113 static int stm32_mux_set_parent(void __iomem *base,
0114 struct clk_stm32_clock_data *data,
0115 u16 mux_id, u8 index)
0116 {
0117 const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
0118
0119 u32 mask = BIT(mux->width) - 1;
0120 u32 reg = readl(base + mux->offset);
0121 u32 val = index << mux->shift;
0122
0123 reg &= ~(mask << mux->shift);
0124 reg |= val;
0125
0126 writel(reg, base + mux->offset);
0127
0128 return 0;
0129 }
0130
0131 static void stm32_gate_endisable(void __iomem *base,
0132 struct clk_stm32_clock_data *data,
0133 u16 gate_id, int enable)
0134 {
0135 const struct stm32_gate_cfg *gate = &data->gates[gate_id];
0136 void __iomem *addr = base + gate->offset;
0137
0138 if (enable) {
0139 if (data->gate_cpt[gate_id]++ > 0)
0140 return;
0141
0142 if (gate->set_clr != 0)
0143 writel(BIT(gate->bit_idx), addr);
0144 else
0145 writel(readl(addr) | BIT(gate->bit_idx), addr);
0146 } else {
0147 if (--data->gate_cpt[gate_id] > 0)
0148 return;
0149
0150 if (gate->set_clr != 0)
0151 writel(BIT(gate->bit_idx), addr + gate->set_clr);
0152 else
0153 writel(readl(addr) & ~BIT(gate->bit_idx), addr);
0154 }
0155 }
0156
0157 static void stm32_gate_disable_unused(void __iomem *base,
0158 struct clk_stm32_clock_data *data,
0159 u16 gate_id)
0160 {
0161 const struct stm32_gate_cfg *gate = &data->gates[gate_id];
0162 void __iomem *addr = base + gate->offset;
0163
0164 if (data->gate_cpt[gate_id] > 0)
0165 return;
0166
0167 if (gate->set_clr != 0)
0168 writel(BIT(gate->bit_idx), addr + gate->set_clr);
0169 else
0170 writel(readl(addr) & ~BIT(gate->bit_idx), addr);
0171 }
0172
0173 static int stm32_gate_is_enabled(void __iomem *base,
0174 struct clk_stm32_clock_data *data,
0175 u16 gate_id)
0176 {
0177 const struct stm32_gate_cfg *gate = &data->gates[gate_id];
0178
0179 return (readl(base + gate->offset) & BIT(gate->bit_idx)) != 0;
0180 }
0181
0182 static unsigned int _get_table_div(const struct clk_div_table *table,
0183 unsigned int val)
0184 {
0185 const struct clk_div_table *clkt;
0186
0187 for (clkt = table; clkt->div; clkt++)
0188 if (clkt->val == val)
0189 return clkt->div;
0190 return 0;
0191 }
0192
0193 static unsigned int _get_div(const struct clk_div_table *table,
0194 unsigned int val, unsigned long flags, u8 width)
0195 {
0196 if (flags & CLK_DIVIDER_ONE_BASED)
0197 return val;
0198 if (flags & CLK_DIVIDER_POWER_OF_TWO)
0199 return 1 << val;
0200 if (table)
0201 return _get_table_div(table, val);
0202 return val + 1;
0203 }
0204
0205 static unsigned long stm32_divider_get_rate(void __iomem *base,
0206 struct clk_stm32_clock_data *data,
0207 u16 div_id,
0208 unsigned long parent_rate)
0209 {
0210 const struct stm32_div_cfg *divider = &data->dividers[div_id];
0211 unsigned int val;
0212 unsigned int div;
0213
0214 val = readl(base + divider->offset) >> divider->shift;
0215 val &= clk_div_mask(divider->width);
0216 div = _get_div(divider->table, val, divider->flags, divider->width);
0217
0218 if (!div) {
0219 WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
0220 "%d: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
0221 div_id);
0222 return parent_rate;
0223 }
0224
0225 return DIV_ROUND_UP_ULL((u64)parent_rate, div);
0226 }
0227
0228 static int stm32_divider_set_rate(void __iomem *base,
0229 struct clk_stm32_clock_data *data,
0230 u16 div_id, unsigned long rate,
0231 unsigned long parent_rate)
0232 {
0233 const struct stm32_div_cfg *divider = &data->dividers[div_id];
0234 int value;
0235 u32 val;
0236
0237 value = divider_get_val(rate, parent_rate, divider->table,
0238 divider->width, divider->flags);
0239 if (value < 0)
0240 return value;
0241
0242 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
0243 val = clk_div_mask(divider->width) << (divider->shift + 16);
0244 } else {
0245 val = readl(base + divider->offset);
0246 val &= ~(clk_div_mask(divider->width) << divider->shift);
0247 }
0248
0249 val |= (u32)value << divider->shift;
0250
0251 writel(val, base + divider->offset);
0252
0253 return 0;
0254 }
0255
0256 static u8 clk_stm32_mux_get_parent(struct clk_hw *hw)
0257 {
0258 struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
0259
0260 return stm32_mux_get_parent(mux->base, mux->clock_data, mux->mux_id);
0261 }
0262
0263 static int clk_stm32_mux_set_parent(struct clk_hw *hw, u8 index)
0264 {
0265 struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
0266 unsigned long flags = 0;
0267
0268 spin_lock_irqsave(mux->lock, flags);
0269
0270 stm32_mux_set_parent(mux->base, mux->clock_data, mux->mux_id, index);
0271
0272 spin_unlock_irqrestore(mux->lock, flags);
0273
0274 return 0;
0275 }
0276
0277 const struct clk_ops clk_stm32_mux_ops = {
0278 .get_parent = clk_stm32_mux_get_parent,
0279 .set_parent = clk_stm32_mux_set_parent,
0280 };
0281
0282 static void clk_stm32_gate_endisable(struct clk_hw *hw, int enable)
0283 {
0284 struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
0285 unsigned long flags = 0;
0286
0287 spin_lock_irqsave(gate->lock, flags);
0288
0289 stm32_gate_endisable(gate->base, gate->clock_data, gate->gate_id, enable);
0290
0291 spin_unlock_irqrestore(gate->lock, flags);
0292 }
0293
0294 static int clk_stm32_gate_enable(struct clk_hw *hw)
0295 {
0296 clk_stm32_gate_endisable(hw, 1);
0297
0298 return 0;
0299 }
0300
0301 static void clk_stm32_gate_disable(struct clk_hw *hw)
0302 {
0303 clk_stm32_gate_endisable(hw, 0);
0304 }
0305
0306 static int clk_stm32_gate_is_enabled(struct clk_hw *hw)
0307 {
0308 struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
0309
0310 return stm32_gate_is_enabled(gate->base, gate->clock_data, gate->gate_id);
0311 }
0312
0313 static void clk_stm32_gate_disable_unused(struct clk_hw *hw)
0314 {
0315 struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
0316 unsigned long flags = 0;
0317
0318 spin_lock_irqsave(gate->lock, flags);
0319
0320 stm32_gate_disable_unused(gate->base, gate->clock_data, gate->gate_id);
0321
0322 spin_unlock_irqrestore(gate->lock, flags);
0323 }
0324
0325 const struct clk_ops clk_stm32_gate_ops = {
0326 .enable = clk_stm32_gate_enable,
0327 .disable = clk_stm32_gate_disable,
0328 .is_enabled = clk_stm32_gate_is_enabled,
0329 .disable_unused = clk_stm32_gate_disable_unused,
0330 };
0331
0332 static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
0333 unsigned long parent_rate)
0334 {
0335 struct clk_stm32_div *div = to_clk_stm32_divider(hw);
0336 unsigned long flags = 0;
0337 int ret;
0338
0339 if (div->div_id == NO_STM32_DIV)
0340 return rate;
0341
0342 spin_lock_irqsave(div->lock, flags);
0343
0344 ret = stm32_divider_set_rate(div->base, div->clock_data, div->div_id, rate, parent_rate);
0345
0346 spin_unlock_irqrestore(div->lock, flags);
0347
0348 return ret;
0349 }
0350
0351 static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
0352 unsigned long *prate)
0353 {
0354 struct clk_stm32_div *div = to_clk_stm32_divider(hw);
0355 const struct stm32_div_cfg *divider;
0356
0357 if (div->div_id == NO_STM32_DIV)
0358 return rate;
0359
0360 divider = &div->clock_data->dividers[div->div_id];
0361
0362
0363 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
0364 u32 val;
0365
0366 val = readl(div->base + divider->offset) >> divider->shift;
0367 val &= clk_div_mask(divider->width);
0368
0369 return divider_ro_round_rate(hw, rate, prate, divider->table,
0370 divider->width, divider->flags,
0371 val);
0372 }
0373
0374 return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
0375 rate, prate, divider->table,
0376 divider->width, divider->flags);
0377 }
0378
0379 static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
0380 unsigned long parent_rate)
0381 {
0382 struct clk_stm32_div *div = to_clk_stm32_divider(hw);
0383
0384 if (div->div_id == NO_STM32_DIV)
0385 return parent_rate;
0386
0387 return stm32_divider_get_rate(div->base, div->clock_data, div->div_id, parent_rate);
0388 }
0389
0390 const struct clk_ops clk_stm32_divider_ops = {
0391 .recalc_rate = clk_stm32_divider_recalc_rate,
0392 .round_rate = clk_stm32_divider_round_rate,
0393 .set_rate = clk_stm32_divider_set_rate,
0394 };
0395
0396 static int clk_stm32_composite_set_rate(struct clk_hw *hw, unsigned long rate,
0397 unsigned long parent_rate)
0398 {
0399 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0400 unsigned long flags = 0;
0401 int ret;
0402
0403 if (composite->div_id == NO_STM32_DIV)
0404 return rate;
0405
0406 spin_lock_irqsave(composite->lock, flags);
0407
0408 ret = stm32_divider_set_rate(composite->base, composite->clock_data,
0409 composite->div_id, rate, parent_rate);
0410
0411 spin_unlock_irqrestore(composite->lock, flags);
0412
0413 return ret;
0414 }
0415
0416 static unsigned long clk_stm32_composite_recalc_rate(struct clk_hw *hw,
0417 unsigned long parent_rate)
0418 {
0419 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0420
0421 if (composite->div_id == NO_STM32_DIV)
0422 return parent_rate;
0423
0424 return stm32_divider_get_rate(composite->base, composite->clock_data,
0425 composite->div_id, parent_rate);
0426 }
0427
0428 static long clk_stm32_composite_round_rate(struct clk_hw *hw, unsigned long rate,
0429 unsigned long *prate)
0430 {
0431 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0432
0433 const struct stm32_div_cfg *divider;
0434
0435 if (composite->div_id == NO_STM32_DIV)
0436 return rate;
0437
0438 divider = &composite->clock_data->dividers[composite->div_id];
0439
0440
0441 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
0442 u32 val;
0443
0444 val = readl(composite->base + divider->offset) >> divider->shift;
0445 val &= clk_div_mask(divider->width);
0446
0447 return divider_ro_round_rate(hw, rate, prate, divider->table,
0448 divider->width, divider->flags,
0449 val);
0450 }
0451
0452 return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
0453 rate, prate, divider->table,
0454 divider->width, divider->flags);
0455 }
0456
0457 static u8 clk_stm32_composite_get_parent(struct clk_hw *hw)
0458 {
0459 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0460
0461 return stm32_mux_get_parent(composite->base, composite->clock_data, composite->mux_id);
0462 }
0463
0464 static int clk_stm32_composite_set_parent(struct clk_hw *hw, u8 index)
0465 {
0466 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0467 unsigned long flags = 0;
0468
0469 spin_lock_irqsave(composite->lock, flags);
0470
0471 stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, index);
0472
0473 spin_unlock_irqrestore(composite->lock, flags);
0474
0475 if (composite->clock_data->is_multi_mux) {
0476 struct clk_hw *other_mux_hw = composite->clock_data->is_multi_mux(hw);
0477
0478 if (other_mux_hw) {
0479 struct clk_hw *hwp = clk_hw_get_parent_by_index(hw, index);
0480
0481 clk_hw_reparent(other_mux_hw, hwp);
0482 }
0483 }
0484
0485 return 0;
0486 }
0487
0488 static int clk_stm32_composite_is_enabled(struct clk_hw *hw)
0489 {
0490 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0491
0492 if (composite->gate_id == NO_STM32_GATE)
0493 return (__clk_get_enable_count(hw->clk) > 0);
0494
0495 return stm32_gate_is_enabled(composite->base, composite->clock_data, composite->gate_id);
0496 }
0497
0498 #define MUX_SAFE_POSITION 0
0499
0500 static int clk_stm32_has_safe_mux(struct clk_hw *hw)
0501 {
0502 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0503 const struct stm32_mux_cfg *mux = &composite->clock_data->muxes[composite->mux_id];
0504
0505 return !!(mux->flags & MUX_SAFE);
0506 }
0507
0508 static void clk_stm32_set_safe_position_mux(struct clk_hw *hw)
0509 {
0510 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0511
0512 if (!clk_stm32_composite_is_enabled(hw)) {
0513 unsigned long flags = 0;
0514
0515 if (composite->clock_data->is_multi_mux) {
0516 struct clk_hw *other_mux_hw = NULL;
0517
0518 other_mux_hw = composite->clock_data->is_multi_mux(hw);
0519
0520 if (!other_mux_hw || clk_stm32_composite_is_enabled(other_mux_hw))
0521 return;
0522 }
0523
0524 spin_lock_irqsave(composite->lock, flags);
0525
0526 stm32_mux_set_parent(composite->base, composite->clock_data,
0527 composite->mux_id, MUX_SAFE_POSITION);
0528
0529 spin_unlock_irqrestore(composite->lock, flags);
0530 }
0531 }
0532
0533 static void clk_stm32_safe_restore_position_mux(struct clk_hw *hw)
0534 {
0535 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0536 int sel = clk_hw_get_parent_index(hw);
0537 unsigned long flags = 0;
0538
0539 spin_lock_irqsave(composite->lock, flags);
0540
0541 stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, sel);
0542
0543 spin_unlock_irqrestore(composite->lock, flags);
0544 }
0545
0546 static void clk_stm32_composite_gate_endisable(struct clk_hw *hw, int enable)
0547 {
0548 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0549 unsigned long flags = 0;
0550
0551 spin_lock_irqsave(composite->lock, flags);
0552
0553 stm32_gate_endisable(composite->base, composite->clock_data, composite->gate_id, enable);
0554
0555 spin_unlock_irqrestore(composite->lock, flags);
0556 }
0557
0558 static int clk_stm32_composite_gate_enable(struct clk_hw *hw)
0559 {
0560 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0561
0562 if (composite->gate_id == NO_STM32_GATE)
0563 return 0;
0564
0565 clk_stm32_composite_gate_endisable(hw, 1);
0566
0567 if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw))
0568 clk_stm32_safe_restore_position_mux(hw);
0569
0570 return 0;
0571 }
0572
0573 static void clk_stm32_composite_gate_disable(struct clk_hw *hw)
0574 {
0575 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0576
0577 if (composite->gate_id == NO_STM32_GATE)
0578 return;
0579
0580 clk_stm32_composite_gate_endisable(hw, 0);
0581
0582 if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw))
0583 clk_stm32_set_safe_position_mux(hw);
0584 }
0585
0586 static void clk_stm32_composite_disable_unused(struct clk_hw *hw)
0587 {
0588 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
0589 unsigned long flags = 0;
0590
0591 if (composite->gate_id == NO_STM32_GATE)
0592 return;
0593
0594 spin_lock_irqsave(composite->lock, flags);
0595
0596 stm32_gate_disable_unused(composite->base, composite->clock_data, composite->gate_id);
0597
0598 spin_unlock_irqrestore(composite->lock, flags);
0599 }
0600
0601 const struct clk_ops clk_stm32_composite_ops = {
0602 .set_rate = clk_stm32_composite_set_rate,
0603 .recalc_rate = clk_stm32_composite_recalc_rate,
0604 .round_rate = clk_stm32_composite_round_rate,
0605 .get_parent = clk_stm32_composite_get_parent,
0606 .set_parent = clk_stm32_composite_set_parent,
0607 .enable = clk_stm32_composite_gate_enable,
0608 .disable = clk_stm32_composite_gate_disable,
0609 .is_enabled = clk_stm32_composite_is_enabled,
0610 .disable_unused = clk_stm32_composite_disable_unused,
0611 };
0612
0613 struct clk_hw *clk_stm32_mux_register(struct device *dev,
0614 const struct stm32_rcc_match_data *data,
0615 void __iomem *base,
0616 spinlock_t *lock,
0617 const struct clock_config *cfg)
0618 {
0619 struct clk_stm32_mux *mux = cfg->clock_cfg;
0620 struct clk_hw *hw = &mux->hw;
0621 int err;
0622
0623 mux->base = base;
0624 mux->lock = lock;
0625 mux->clock_data = data->clock_data;
0626
0627 err = clk_hw_register(dev, hw);
0628 if (err)
0629 return ERR_PTR(err);
0630
0631 return hw;
0632 }
0633
0634 struct clk_hw *clk_stm32_gate_register(struct device *dev,
0635 const struct stm32_rcc_match_data *data,
0636 void __iomem *base,
0637 spinlock_t *lock,
0638 const struct clock_config *cfg)
0639 {
0640 struct clk_stm32_gate *gate = cfg->clock_cfg;
0641 struct clk_hw *hw = &gate->hw;
0642 int err;
0643
0644 gate->base = base;
0645 gate->lock = lock;
0646 gate->clock_data = data->clock_data;
0647
0648 err = clk_hw_register(dev, hw);
0649 if (err)
0650 return ERR_PTR(err);
0651
0652 return hw;
0653 }
0654
0655 struct clk_hw *clk_stm32_div_register(struct device *dev,
0656 const struct stm32_rcc_match_data *data,
0657 void __iomem *base,
0658 spinlock_t *lock,
0659 const struct clock_config *cfg)
0660 {
0661 struct clk_stm32_div *div = cfg->clock_cfg;
0662 struct clk_hw *hw = &div->hw;
0663 int err;
0664
0665 div->base = base;
0666 div->lock = lock;
0667 div->clock_data = data->clock_data;
0668
0669 err = clk_hw_register(dev, hw);
0670 if (err)
0671 return ERR_PTR(err);
0672
0673 return hw;
0674 }
0675
0676 struct clk_hw *clk_stm32_composite_register(struct device *dev,
0677 const struct stm32_rcc_match_data *data,
0678 void __iomem *base,
0679 spinlock_t *lock,
0680 const struct clock_config *cfg)
0681 {
0682 struct clk_stm32_composite *composite = cfg->clock_cfg;
0683 struct clk_hw *hw = &composite->hw;
0684 int err;
0685
0686 composite->base = base;
0687 composite->lock = lock;
0688 composite->clock_data = data->clock_data;
0689
0690 err = clk_hw_register(dev, hw);
0691 if (err)
0692 return ERR_PTR(err);
0693
0694 return hw;
0695 }