0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/clk.h>
0015 #include <linux/clk-provider.h>
0016 #include <linux/clk/renesas.h>
0017 #include <linux/delay.h>
0018 #include <linux/device.h>
0019 #include <linux/init.h>
0020 #include <linux/iopoll.h>
0021 #include <linux/mod_devicetable.h>
0022 #include <linux/module.h>
0023 #include <linux/of_address.h>
0024 #include <linux/of_device.h>
0025 #include <linux/platform_device.h>
0026 #include <linux/pm_clock.h>
0027 #include <linux/pm_domain.h>
0028 #include <linux/reset-controller.h>
0029 #include <linux/slab.h>
0030 #include <linux/units.h>
0031
0032 #include <dt-bindings/clock/renesas-cpg-mssr.h>
0033
0034 #include "rzg2l-cpg.h"
0035
0036 #ifdef DEBUG
0037 #define WARN_DEBUG(x) WARN_ON(x)
0038 #else
0039 #define WARN_DEBUG(x) do { } while (0)
0040 #endif
0041
0042 #define DIV_RSMASK(v, s, m) ((v >> s) & m)
0043 #define GET_SHIFT(val) ((val >> 12) & 0xff)
0044 #define GET_WIDTH(val) ((val >> 8) & 0xf)
0045
0046 #define KDIV(val) DIV_RSMASK(val, 16, 0xffff)
0047 #define MDIV(val) DIV_RSMASK(val, 6, 0x3ff)
0048 #define PDIV(val) DIV_RSMASK(val, 0, 0x3f)
0049 #define SDIV(val) DIV_RSMASK(val, 0, 0x7)
0050
0051 #define CLK_ON_R(reg) (reg)
0052 #define CLK_MON_R(reg) (0x180 + (reg))
0053 #define CLK_RST_R(reg) (reg)
0054 #define CLK_MRST_R(reg) (0x180 + (reg))
0055
0056 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
0057 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
0058 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
0059
0060 #define MAX_VCLK_FREQ (148500000)
0061
0062 struct sd_hw_data {
0063 struct clk_hw hw;
0064 u32 conf;
0065 struct rzg2l_cpg_priv *priv;
0066 };
0067
0068 #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
0069
0070 struct rzg2l_pll5_param {
0071 u32 pl5_fracin;
0072 u8 pl5_refdiv;
0073 u8 pl5_intin;
0074 u8 pl5_postdiv1;
0075 u8 pl5_postdiv2;
0076 u8 pl5_spread;
0077 };
0078
0079 struct rzg2l_pll5_mux_dsi_div_param {
0080 u8 clksrc;
0081 u8 dsi_div_a;
0082 u8 dsi_div_b;
0083 };
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 struct rzg2l_cpg_priv {
0101 struct reset_controller_dev rcdev;
0102 struct device *dev;
0103 void __iomem *base;
0104 spinlock_t rmw_lock;
0105
0106 struct clk **clks;
0107 unsigned int num_core_clks;
0108 unsigned int num_mod_clks;
0109 unsigned int num_resets;
0110 unsigned int last_dt_core_clk;
0111
0112 const struct rzg2l_cpg_info *info;
0113
0114 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
0115 };
0116
0117 static void rzg2l_cpg_del_clk_provider(void *data)
0118 {
0119 of_clk_del_provider(data);
0120 }
0121
0122 static struct clk * __init
0123 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
0124 struct clk **clks,
0125 void __iomem *base,
0126 struct rzg2l_cpg_priv *priv)
0127 {
0128 struct device *dev = priv->dev;
0129 const struct clk *parent;
0130 const char *parent_name;
0131 struct clk_hw *clk_hw;
0132
0133 parent = clks[core->parent & 0xffff];
0134 if (IS_ERR(parent))
0135 return ERR_CAST(parent);
0136
0137 parent_name = __clk_get_name(parent);
0138
0139 if (core->dtable)
0140 clk_hw = clk_hw_register_divider_table(dev, core->name,
0141 parent_name, 0,
0142 base + GET_REG_OFFSET(core->conf),
0143 GET_SHIFT(core->conf),
0144 GET_WIDTH(core->conf),
0145 core->flag,
0146 core->dtable,
0147 &priv->rmw_lock);
0148 else
0149 clk_hw = clk_hw_register_divider(dev, core->name,
0150 parent_name, 0,
0151 base + GET_REG_OFFSET(core->conf),
0152 GET_SHIFT(core->conf),
0153 GET_WIDTH(core->conf),
0154 core->flag, &priv->rmw_lock);
0155
0156 if (IS_ERR(clk_hw))
0157 return ERR_CAST(clk_hw);
0158
0159 return clk_hw->clk;
0160 }
0161
0162 static struct clk * __init
0163 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
0164 void __iomem *base,
0165 struct rzg2l_cpg_priv *priv)
0166 {
0167 const struct clk_hw *clk_hw;
0168
0169 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
0170 core->parent_names, core->num_parents,
0171 core->flag,
0172 base + GET_REG_OFFSET(core->conf),
0173 GET_SHIFT(core->conf),
0174 GET_WIDTH(core->conf),
0175 core->mux_flags, &priv->rmw_lock);
0176 if (IS_ERR(clk_hw))
0177 return ERR_CAST(clk_hw);
0178
0179 return clk_hw->clk;
0180 }
0181
0182 static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
0183 struct clk_rate_request *req)
0184 {
0185 return clk_mux_determine_rate_flags(hw, req, 0);
0186 }
0187
0188 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
0189 {
0190 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
0191 struct rzg2l_cpg_priv *priv = hwdata->priv;
0192 u32 off = GET_REG_OFFSET(hwdata->conf);
0193 u32 shift = GET_SHIFT(hwdata->conf);
0194 const u32 clk_src_266 = 2;
0195 u32 bitmask;
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
0209 if (index != clk_src_266) {
0210 u32 msk, val;
0211 int ret;
0212
0213 writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
0214
0215 msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
0216
0217 ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
0218 !(val & msk), 100,
0219 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
0220 if (ret) {
0221 dev_err(priv->dev, "failed to switch clk source\n");
0222 return ret;
0223 }
0224 }
0225
0226 writel(bitmask | ((index + 1) << shift), priv->base + off);
0227
0228 return 0;
0229 }
0230
0231 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
0232 {
0233 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
0234 struct rzg2l_cpg_priv *priv = hwdata->priv;
0235 u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
0236
0237 val >>= GET_SHIFT(hwdata->conf);
0238 val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
0239 if (val) {
0240 val--;
0241 } else {
0242
0243 rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
0244 }
0245
0246 return val;
0247 }
0248
0249 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
0250 .determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
0251 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
0252 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
0253 };
0254
0255 static struct clk * __init
0256 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
0257 void __iomem *base,
0258 struct rzg2l_cpg_priv *priv)
0259 {
0260 struct sd_hw_data *clk_hw_data;
0261 struct clk_init_data init;
0262 struct clk_hw *clk_hw;
0263 int ret;
0264
0265 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
0266 if (!clk_hw_data)
0267 return ERR_PTR(-ENOMEM);
0268
0269 clk_hw_data->priv = priv;
0270 clk_hw_data->conf = core->conf;
0271
0272 init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
0273 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
0274 init.flags = 0;
0275 init.num_parents = core->num_parents;
0276 init.parent_names = core->parent_names;
0277
0278 clk_hw = &clk_hw_data->hw;
0279 clk_hw->init = &init;
0280
0281 ret = devm_clk_hw_register(priv->dev, clk_hw);
0282 if (ret)
0283 return ERR_PTR(ret);
0284
0285 return clk_hw->clk;
0286 }
0287
0288 static unsigned long
0289 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
0290 unsigned long rate)
0291 {
0292 unsigned long foutpostdiv_rate;
0293
0294 params->pl5_intin = rate / MEGA;
0295 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
0296 params->pl5_refdiv = 2;
0297 params->pl5_postdiv1 = 1;
0298 params->pl5_postdiv2 = 1;
0299 params->pl5_spread = 0x16;
0300
0301 foutpostdiv_rate =
0302 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
0303 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
0304 (params->pl5_postdiv1 * params->pl5_postdiv2);
0305
0306 return foutpostdiv_rate;
0307 }
0308
0309 struct dsi_div_hw_data {
0310 struct clk_hw hw;
0311 u32 conf;
0312 unsigned long rate;
0313 struct rzg2l_cpg_priv *priv;
0314 };
0315
0316 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
0317
0318 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
0319 unsigned long parent_rate)
0320 {
0321 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
0322 unsigned long rate = dsi_div->rate;
0323
0324 if (!rate)
0325 rate = parent_rate;
0326
0327 return rate;
0328 }
0329
0330 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
0331 unsigned long rate)
0332 {
0333 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
0334 struct rzg2l_cpg_priv *priv = dsi_div->priv;
0335 struct rzg2l_pll5_param params;
0336 unsigned long parent_rate;
0337
0338 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
0339
0340 if (priv->mux_dsi_div_params.clksrc)
0341 parent_rate /= 2;
0342
0343 return parent_rate;
0344 }
0345
0346 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
0347 struct clk_rate_request *req)
0348 {
0349 if (req->rate > MAX_VCLK_FREQ)
0350 req->rate = MAX_VCLK_FREQ;
0351
0352 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
0353
0354 return 0;
0355 }
0356
0357 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
0358 unsigned long rate,
0359 unsigned long parent_rate)
0360 {
0361 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
0362 struct rzg2l_cpg_priv *priv = dsi_div->priv;
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372 if (!rate || rate > MAX_VCLK_FREQ)
0373 return -EINVAL;
0374
0375 dsi_div->rate = rate;
0376 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
0377 (priv->mux_dsi_div_params.dsi_div_a << 0) |
0378 (priv->mux_dsi_div_params.dsi_div_b << 8),
0379 priv->base + CPG_PL5_SDIV);
0380
0381 return 0;
0382 }
0383
0384 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
0385 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
0386 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
0387 .set_rate = rzg2l_cpg_dsi_div_set_rate,
0388 };
0389
0390 static struct clk * __init
0391 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
0392 struct clk **clks,
0393 struct rzg2l_cpg_priv *priv)
0394 {
0395 struct dsi_div_hw_data *clk_hw_data;
0396 const struct clk *parent;
0397 const char *parent_name;
0398 struct clk_init_data init;
0399 struct clk_hw *clk_hw;
0400 int ret;
0401
0402 parent = clks[core->parent & 0xffff];
0403 if (IS_ERR(parent))
0404 return ERR_CAST(parent);
0405
0406 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
0407 if (!clk_hw_data)
0408 return ERR_PTR(-ENOMEM);
0409
0410 clk_hw_data->priv = priv;
0411
0412 parent_name = __clk_get_name(parent);
0413 init.name = core->name;
0414 init.ops = &rzg2l_cpg_dsi_div_ops;
0415 init.flags = CLK_SET_RATE_PARENT;
0416 init.parent_names = &parent_name;
0417 init.num_parents = 1;
0418
0419 clk_hw = &clk_hw_data->hw;
0420 clk_hw->init = &init;
0421
0422 ret = devm_clk_hw_register(priv->dev, clk_hw);
0423 if (ret)
0424 return ERR_PTR(ret);
0425
0426 return clk_hw->clk;
0427 }
0428
0429 struct pll5_mux_hw_data {
0430 struct clk_hw hw;
0431 u32 conf;
0432 unsigned long rate;
0433 struct rzg2l_cpg_priv *priv;
0434 };
0435
0436 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
0437
0438 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
0439 struct clk_rate_request *req)
0440 {
0441 struct clk_hw *parent;
0442 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
0443 struct rzg2l_cpg_priv *priv = hwdata->priv;
0444
0445 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
0446 req->best_parent_hw = parent;
0447 req->best_parent_rate = req->rate;
0448
0449 return 0;
0450 }
0451
0452 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
0453 {
0454 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
0455 struct rzg2l_cpg_priv *priv = hwdata->priv;
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
0468 priv->base + CPG_OTHERFUNC1_REG);
0469
0470 return 0;
0471 }
0472
0473 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
0474 {
0475 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
0476 struct rzg2l_cpg_priv *priv = hwdata->priv;
0477
0478 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
0479 }
0480
0481 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
0482 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
0483 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
0484 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
0485 };
0486
0487 static struct clk * __init
0488 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
0489 struct rzg2l_cpg_priv *priv)
0490 {
0491 struct pll5_mux_hw_data *clk_hw_data;
0492 struct clk_init_data init;
0493 struct clk_hw *clk_hw;
0494 int ret;
0495
0496 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
0497 if (!clk_hw_data)
0498 return ERR_PTR(-ENOMEM);
0499
0500 clk_hw_data->priv = priv;
0501 clk_hw_data->conf = core->conf;
0502
0503 init.name = core->name;
0504 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
0505 init.flags = CLK_SET_RATE_PARENT;
0506 init.num_parents = core->num_parents;
0507 init.parent_names = core->parent_names;
0508
0509 clk_hw = &clk_hw_data->hw;
0510 clk_hw->init = &init;
0511
0512 ret = devm_clk_hw_register(priv->dev, clk_hw);
0513 if (ret)
0514 return ERR_PTR(ret);
0515
0516 return clk_hw->clk;
0517 }
0518
0519 struct sipll5 {
0520 struct clk_hw hw;
0521 u32 conf;
0522 unsigned long foutpostdiv_rate;
0523 struct rzg2l_cpg_priv *priv;
0524 };
0525
0526 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
0527
0528 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
0529 unsigned long rate)
0530 {
0531 struct sipll5 *sipll5 = to_sipll5(hw);
0532 struct rzg2l_cpg_priv *priv = sipll5->priv;
0533 unsigned long vclk;
0534
0535 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
0536 (priv->mux_dsi_div_params.dsi_div_b + 1));
0537
0538 if (priv->mux_dsi_div_params.clksrc)
0539 vclk /= 2;
0540
0541 return vclk;
0542 }
0543
0544 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
0545 unsigned long parent_rate)
0546 {
0547 struct sipll5 *sipll5 = to_sipll5(hw);
0548 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
0549
0550 if (!pll5_rate)
0551 pll5_rate = parent_rate;
0552
0553 return pll5_rate;
0554 }
0555
0556 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
0557 unsigned long rate,
0558 unsigned long *parent_rate)
0559 {
0560 return rate;
0561 }
0562
0563 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
0564 unsigned long rate,
0565 unsigned long parent_rate)
0566 {
0567 struct sipll5 *sipll5 = to_sipll5(hw);
0568 struct rzg2l_cpg_priv *priv = sipll5->priv;
0569 struct rzg2l_pll5_param params;
0570 unsigned long vclk_rate;
0571 int ret;
0572 u32 val;
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586 if (!rate)
0587 return -EINVAL;
0588
0589 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
0590 sipll5->foutpostdiv_rate =
0591 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
0592
0593
0594 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
0595 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
0596 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
0597 if (ret) {
0598 dev_err(priv->dev, "failed to release pll5 lock");
0599 return ret;
0600 }
0601
0602
0603 writel(CPG_SIPLL5_CLK1_POSTDIV1_WEN | CPG_SIPLL5_CLK1_POSTDIV2_WEN |
0604 CPG_SIPLL5_CLK1_REFDIV_WEN | (params.pl5_postdiv1 << 0) |
0605 (params.pl5_postdiv2 << 4) | (params.pl5_refdiv << 8),
0606 priv->base + CPG_SIPLL5_CLK1);
0607
0608
0609 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
0610
0611
0612 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
0613 priv->base + CPG_SIPLL5_CLK4);
0614
0615
0616 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
0617
0618
0619 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
0620 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
0621 priv->base + CPG_SIPLL5_STBY);
0622
0623
0624 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
0625 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
0626 if (ret) {
0627 dev_err(priv->dev, "failed to lock pll5");
0628 return ret;
0629 }
0630
0631 return 0;
0632 }
0633
0634 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
0635 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
0636 .round_rate = rzg2l_cpg_sipll5_round_rate,
0637 .set_rate = rzg2l_cpg_sipll5_set_rate,
0638 };
0639
0640 static struct clk * __init
0641 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
0642 struct clk **clks,
0643 struct rzg2l_cpg_priv *priv)
0644 {
0645 const struct clk *parent;
0646 struct clk_init_data init;
0647 const char *parent_name;
0648 struct sipll5 *sipll5;
0649 struct clk_hw *clk_hw;
0650 int ret;
0651
0652 parent = clks[core->parent & 0xffff];
0653 if (IS_ERR(parent))
0654 return ERR_CAST(parent);
0655
0656 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
0657 if (!sipll5)
0658 return ERR_PTR(-ENOMEM);
0659
0660 init.name = core->name;
0661 parent_name = __clk_get_name(parent);
0662 init.ops = &rzg2l_cpg_sipll5_ops;
0663 init.flags = 0;
0664 init.parent_names = &parent_name;
0665 init.num_parents = 1;
0666
0667 sipll5->hw.init = &init;
0668 sipll5->conf = core->conf;
0669 sipll5->priv = priv;
0670
0671 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
0672 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
0673
0674 clk_hw = &sipll5->hw;
0675 clk_hw->init = &init;
0676
0677 ret = devm_clk_hw_register(priv->dev, clk_hw);
0678 if (ret)
0679 return ERR_PTR(ret);
0680
0681 priv->mux_dsi_div_params.clksrc = 1;
0682 priv->mux_dsi_div_params.dsi_div_a = 1;
0683 priv->mux_dsi_div_params.dsi_div_b = 2;
0684
0685 return clk_hw->clk;
0686 }
0687
0688 struct pll_clk {
0689 struct clk_hw hw;
0690 unsigned int conf;
0691 unsigned int type;
0692 void __iomem *base;
0693 struct rzg2l_cpg_priv *priv;
0694 };
0695
0696 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
0697
0698 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
0699 unsigned long parent_rate)
0700 {
0701 struct pll_clk *pll_clk = to_pll(hw);
0702 struct rzg2l_cpg_priv *priv = pll_clk->priv;
0703 unsigned int val1, val2;
0704 unsigned int mult = 1;
0705 unsigned int div = 1;
0706
0707 if (pll_clk->type != CLK_TYPE_SAM_PLL)
0708 return parent_rate;
0709
0710 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
0711 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
0712 mult = MDIV(val1) + KDIV(val1) / 65536;
0713 div = PDIV(val1) << SDIV(val2);
0714
0715 return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
0716 }
0717
0718 static const struct clk_ops rzg2l_cpg_pll_ops = {
0719 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
0720 };
0721
0722 static struct clk * __init
0723 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
0724 struct clk **clks,
0725 void __iomem *base,
0726 struct rzg2l_cpg_priv *priv)
0727 {
0728 struct device *dev = priv->dev;
0729 const struct clk *parent;
0730 struct clk_init_data init;
0731 const char *parent_name;
0732 struct pll_clk *pll_clk;
0733
0734 parent = clks[core->parent & 0xffff];
0735 if (IS_ERR(parent))
0736 return ERR_CAST(parent);
0737
0738 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
0739 if (!pll_clk)
0740 return ERR_PTR(-ENOMEM);
0741
0742 parent_name = __clk_get_name(parent);
0743 init.name = core->name;
0744 init.ops = &rzg2l_cpg_pll_ops;
0745 init.flags = 0;
0746 init.parent_names = &parent_name;
0747 init.num_parents = 1;
0748
0749 pll_clk->hw.init = &init;
0750 pll_clk->conf = core->conf;
0751 pll_clk->base = base;
0752 pll_clk->priv = priv;
0753 pll_clk->type = core->type;
0754
0755 return clk_register(NULL, &pll_clk->hw);
0756 }
0757
0758 static struct clk
0759 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
0760 void *data)
0761 {
0762 unsigned int clkidx = clkspec->args[1];
0763 struct rzg2l_cpg_priv *priv = data;
0764 struct device *dev = priv->dev;
0765 const char *type;
0766 struct clk *clk;
0767
0768 switch (clkspec->args[0]) {
0769 case CPG_CORE:
0770 type = "core";
0771 if (clkidx > priv->last_dt_core_clk) {
0772 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
0773 return ERR_PTR(-EINVAL);
0774 }
0775 clk = priv->clks[clkidx];
0776 break;
0777
0778 case CPG_MOD:
0779 type = "module";
0780 if (clkidx >= priv->num_mod_clks) {
0781 dev_err(dev, "Invalid %s clock index %u\n", type,
0782 clkidx);
0783 return ERR_PTR(-EINVAL);
0784 }
0785 clk = priv->clks[priv->num_core_clks + clkidx];
0786 break;
0787
0788 default:
0789 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
0790 return ERR_PTR(-EINVAL);
0791 }
0792
0793 if (IS_ERR(clk))
0794 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
0795 PTR_ERR(clk));
0796 else
0797 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
0798 clkspec->args[0], clkspec->args[1], clk,
0799 clk_get_rate(clk));
0800 return clk;
0801 }
0802
0803 static void __init
0804 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
0805 const struct rzg2l_cpg_info *info,
0806 struct rzg2l_cpg_priv *priv)
0807 {
0808 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
0809 struct device *dev = priv->dev;
0810 unsigned int id = core->id, div = core->div;
0811 const char *parent_name;
0812
0813 WARN_DEBUG(id >= priv->num_core_clks);
0814 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
0815
0816 if (!core->name) {
0817
0818 return;
0819 }
0820
0821 switch (core->type) {
0822 case CLK_TYPE_IN:
0823 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
0824 break;
0825 case CLK_TYPE_FF:
0826 WARN_DEBUG(core->parent >= priv->num_core_clks);
0827 parent = priv->clks[core->parent];
0828 if (IS_ERR(parent)) {
0829 clk = parent;
0830 goto fail;
0831 }
0832
0833 parent_name = __clk_get_name(parent);
0834 clk = clk_register_fixed_factor(NULL, core->name,
0835 parent_name, CLK_SET_RATE_PARENT,
0836 core->mult, div);
0837 break;
0838 case CLK_TYPE_SAM_PLL:
0839 clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
0840 priv->base, priv);
0841 break;
0842 case CLK_TYPE_SIPLL5:
0843 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
0844 break;
0845 case CLK_TYPE_DIV:
0846 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
0847 priv->base, priv);
0848 break;
0849 case CLK_TYPE_MUX:
0850 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
0851 break;
0852 case CLK_TYPE_SD_MUX:
0853 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
0854 break;
0855 case CLK_TYPE_PLL5_4_MUX:
0856 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
0857 break;
0858 case CLK_TYPE_DSI_DIV:
0859 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
0860 break;
0861 default:
0862 goto fail;
0863 }
0864
0865 if (IS_ERR_OR_NULL(clk))
0866 goto fail;
0867
0868 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
0869 priv->clks[id] = clk;
0870 return;
0871
0872 fail:
0873 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
0874 core->name, PTR_ERR(clk));
0875 }
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 struct mstp_clock {
0888 struct clk_hw hw;
0889 u16 off;
0890 u8 bit;
0891 bool enabled;
0892 struct rzg2l_cpg_priv *priv;
0893 struct mstp_clock *sibling;
0894 };
0895
0896 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
0897
0898 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
0899 {
0900 struct mstp_clock *clock = to_mod_clock(hw);
0901 struct rzg2l_cpg_priv *priv = clock->priv;
0902 unsigned int reg = clock->off;
0903 struct device *dev = priv->dev;
0904 unsigned long flags;
0905 unsigned int i;
0906 u32 bitmask = BIT(clock->bit);
0907 u32 value;
0908
0909 if (!clock->off) {
0910 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
0911 return 0;
0912 }
0913
0914 dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
0915 enable ? "ON" : "OFF");
0916 spin_lock_irqsave(&priv->rmw_lock, flags);
0917
0918 if (enable)
0919 value = (bitmask << 16) | bitmask;
0920 else
0921 value = bitmask << 16;
0922 writel(value, priv->base + CLK_ON_R(reg));
0923
0924 spin_unlock_irqrestore(&priv->rmw_lock, flags);
0925
0926 if (!enable)
0927 return 0;
0928
0929 if (!priv->info->has_clk_mon_regs)
0930 return 0;
0931
0932 for (i = 1000; i > 0; --i) {
0933 if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
0934 break;
0935 cpu_relax();
0936 }
0937
0938 if (!i) {
0939 dev_err(dev, "Failed to enable CLK_ON %p\n",
0940 priv->base + CLK_ON_R(reg));
0941 return -ETIMEDOUT;
0942 }
0943
0944 return 0;
0945 }
0946
0947 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
0948 {
0949 struct mstp_clock *clock = to_mod_clock(hw);
0950
0951 if (clock->sibling) {
0952 struct rzg2l_cpg_priv *priv = clock->priv;
0953 unsigned long flags;
0954 bool enabled;
0955
0956 spin_lock_irqsave(&priv->rmw_lock, flags);
0957 enabled = clock->sibling->enabled;
0958 clock->enabled = true;
0959 spin_unlock_irqrestore(&priv->rmw_lock, flags);
0960 if (enabled)
0961 return 0;
0962 }
0963
0964 return rzg2l_mod_clock_endisable(hw, true);
0965 }
0966
0967 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
0968 {
0969 struct mstp_clock *clock = to_mod_clock(hw);
0970
0971 if (clock->sibling) {
0972 struct rzg2l_cpg_priv *priv = clock->priv;
0973 unsigned long flags;
0974 bool enabled;
0975
0976 spin_lock_irqsave(&priv->rmw_lock, flags);
0977 enabled = clock->sibling->enabled;
0978 clock->enabled = false;
0979 spin_unlock_irqrestore(&priv->rmw_lock, flags);
0980 if (enabled)
0981 return;
0982 }
0983
0984 rzg2l_mod_clock_endisable(hw, false);
0985 }
0986
0987 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
0988 {
0989 struct mstp_clock *clock = to_mod_clock(hw);
0990 struct rzg2l_cpg_priv *priv = clock->priv;
0991 u32 bitmask = BIT(clock->bit);
0992 u32 value;
0993
0994 if (!clock->off) {
0995 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
0996 return 1;
0997 }
0998
0999 if (clock->sibling)
1000 return clock->enabled;
1001
1002 if (priv->info->has_clk_mon_regs)
1003 value = readl(priv->base + CLK_MON_R(clock->off));
1004 else
1005 value = readl(priv->base + clock->off);
1006
1007 return value & bitmask;
1008 }
1009
1010 static const struct clk_ops rzg2l_mod_clock_ops = {
1011 .enable = rzg2l_mod_clock_enable,
1012 .disable = rzg2l_mod_clock_disable,
1013 .is_enabled = rzg2l_mod_clock_is_enabled,
1014 };
1015
1016 static struct mstp_clock
1017 *rzg2l_mod_clock__get_sibling(struct mstp_clock *clock,
1018 struct rzg2l_cpg_priv *priv)
1019 {
1020 struct clk_hw *hw;
1021 unsigned int i;
1022
1023 for (i = 0; i < priv->num_mod_clks; i++) {
1024 struct mstp_clock *clk;
1025
1026 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1027 continue;
1028
1029 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1030 clk = to_mod_clock(hw);
1031 if (clock->off == clk->off && clock->bit == clk->bit)
1032 return clk;
1033 }
1034
1035 return NULL;
1036 }
1037
1038 static void __init
1039 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1040 const struct rzg2l_cpg_info *info,
1041 struct rzg2l_cpg_priv *priv)
1042 {
1043 struct mstp_clock *clock = NULL;
1044 struct device *dev = priv->dev;
1045 unsigned int id = mod->id;
1046 struct clk_init_data init;
1047 struct clk *parent, *clk;
1048 const char *parent_name;
1049 unsigned int i;
1050
1051 WARN_DEBUG(id < priv->num_core_clks);
1052 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1053 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1054 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1055
1056 if (!mod->name) {
1057
1058 return;
1059 }
1060
1061 parent = priv->clks[mod->parent];
1062 if (IS_ERR(parent)) {
1063 clk = parent;
1064 goto fail;
1065 }
1066
1067 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1068 if (!clock) {
1069 clk = ERR_PTR(-ENOMEM);
1070 goto fail;
1071 }
1072
1073 init.name = mod->name;
1074 init.ops = &rzg2l_mod_clock_ops;
1075 init.flags = CLK_SET_RATE_PARENT;
1076 for (i = 0; i < info->num_crit_mod_clks; i++)
1077 if (id == info->crit_mod_clks[i]) {
1078 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1079 mod->name);
1080 init.flags |= CLK_IS_CRITICAL;
1081 break;
1082 }
1083
1084 parent_name = __clk_get_name(parent);
1085 init.parent_names = &parent_name;
1086 init.num_parents = 1;
1087
1088 clock->off = mod->off;
1089 clock->bit = mod->bit;
1090 clock->priv = priv;
1091 clock->hw.init = &init;
1092
1093 clk = clk_register(NULL, &clock->hw);
1094 if (IS_ERR(clk))
1095 goto fail;
1096
1097 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1098 priv->clks[id] = clk;
1099
1100 if (mod->is_coupled) {
1101 struct mstp_clock *sibling;
1102
1103 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1104 sibling = rzg2l_mod_clock__get_sibling(clock, priv);
1105 if (sibling) {
1106 clock->sibling = sibling;
1107 sibling->sibling = clock;
1108 }
1109 }
1110
1111 return;
1112
1113 fail:
1114 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1115 mod->name, PTR_ERR(clk));
1116 }
1117
1118 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1119
1120 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1121 unsigned long id)
1122 {
1123 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1124 const struct rzg2l_cpg_info *info = priv->info;
1125 unsigned int reg = info->resets[id].off;
1126 u32 dis = BIT(info->resets[id].bit);
1127 u32 we = dis << 16;
1128
1129 dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1130
1131
1132 writel(we, priv->base + CLK_RST_R(reg));
1133
1134
1135 udelay(35);
1136
1137
1138 writel(we | dis, priv->base + CLK_RST_R(reg));
1139
1140 return 0;
1141 }
1142
1143 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1144 unsigned long id)
1145 {
1146 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1147 const struct rzg2l_cpg_info *info = priv->info;
1148 unsigned int reg = info->resets[id].off;
1149 u32 value = BIT(info->resets[id].bit) << 16;
1150
1151 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1152
1153 writel(value, priv->base + CLK_RST_R(reg));
1154 return 0;
1155 }
1156
1157 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1158 unsigned long id)
1159 {
1160 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1161 const struct rzg2l_cpg_info *info = priv->info;
1162 unsigned int reg = info->resets[id].off;
1163 u32 dis = BIT(info->resets[id].bit);
1164 u32 value = (dis << 16) | dis;
1165
1166 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1167 CLK_RST_R(reg));
1168
1169 writel(value, priv->base + CLK_RST_R(reg));
1170 return 0;
1171 }
1172
1173 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1174 unsigned long id)
1175 {
1176 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1177 const struct rzg2l_cpg_info *info = priv->info;
1178 unsigned int reg = info->resets[id].off;
1179 u32 bitmask = BIT(info->resets[id].bit);
1180 s8 monbit = info->resets[id].monbit;
1181
1182 if (info->has_clk_mon_regs) {
1183 return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
1184 } else if (monbit >= 0) {
1185 u32 monbitmask = BIT(monbit);
1186
1187 return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
1188 }
1189 return -ENOTSUPP;
1190 }
1191
1192 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1193 .reset = rzg2l_cpg_reset,
1194 .assert = rzg2l_cpg_assert,
1195 .deassert = rzg2l_cpg_deassert,
1196 .status = rzg2l_cpg_status,
1197 };
1198
1199 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1200 const struct of_phandle_args *reset_spec)
1201 {
1202 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1203 const struct rzg2l_cpg_info *info = priv->info;
1204 unsigned int id = reset_spec->args[0];
1205
1206 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1207 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1208 return -EINVAL;
1209 }
1210
1211 return id;
1212 }
1213
1214 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1215 {
1216 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1217 priv->rcdev.of_node = priv->dev->of_node;
1218 priv->rcdev.dev = priv->dev;
1219 priv->rcdev.of_reset_n_cells = 1;
1220 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1221 priv->rcdev.nr_resets = priv->num_resets;
1222
1223 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1224 }
1225
1226 static bool rzg2l_cpg_is_pm_clk(const struct of_phandle_args *clkspec)
1227 {
1228 if (clkspec->args_count != 2)
1229 return false;
1230
1231 switch (clkspec->args[0]) {
1232 case CPG_MOD:
1233 return true;
1234
1235 default:
1236 return false;
1237 }
1238 }
1239
1240 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device *dev)
1241 {
1242 struct device_node *np = dev->of_node;
1243 struct of_phandle_args clkspec;
1244 bool once = true;
1245 struct clk *clk;
1246 int error;
1247 int i = 0;
1248
1249 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1250 &clkspec)) {
1251 if (rzg2l_cpg_is_pm_clk(&clkspec)) {
1252 if (once) {
1253 once = false;
1254 error = pm_clk_create(dev);
1255 if (error) {
1256 of_node_put(clkspec.np);
1257 goto err;
1258 }
1259 }
1260 clk = of_clk_get_from_provider(&clkspec);
1261 of_node_put(clkspec.np);
1262 if (IS_ERR(clk)) {
1263 error = PTR_ERR(clk);
1264 goto fail_destroy;
1265 }
1266
1267 error = pm_clk_add_clk(dev, clk);
1268 if (error) {
1269 dev_err(dev, "pm_clk_add_clk failed %d\n",
1270 error);
1271 goto fail_put;
1272 }
1273 } else {
1274 of_node_put(clkspec.np);
1275 }
1276 i++;
1277 }
1278
1279 return 0;
1280
1281 fail_put:
1282 clk_put(clk);
1283
1284 fail_destroy:
1285 pm_clk_destroy(dev);
1286 err:
1287 return error;
1288 }
1289
1290 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1291 {
1292 if (!pm_clk_no_clocks(dev))
1293 pm_clk_destroy(dev);
1294 }
1295
1296 static void rzg2l_cpg_genpd_remove(void *data)
1297 {
1298 pm_genpd_remove(data);
1299 }
1300
1301 static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
1302 {
1303 struct device_node *np = dev->of_node;
1304 struct generic_pm_domain *genpd;
1305 int ret;
1306
1307 genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL);
1308 if (!genpd)
1309 return -ENOMEM;
1310
1311 genpd->name = np->name;
1312 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1313 GENPD_FLAG_ACTIVE_WAKEUP;
1314 genpd->attach_dev = rzg2l_cpg_attach_dev;
1315 genpd->detach_dev = rzg2l_cpg_detach_dev;
1316 ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1317 if (ret)
1318 return ret;
1319
1320 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1321 if (ret)
1322 return ret;
1323
1324 return of_genpd_add_provider_simple(np, genpd);
1325 }
1326
1327 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1328 {
1329 struct device *dev = &pdev->dev;
1330 struct device_node *np = dev->of_node;
1331 const struct rzg2l_cpg_info *info;
1332 struct rzg2l_cpg_priv *priv;
1333 unsigned int nclks, i;
1334 struct clk **clks;
1335 int error;
1336
1337 info = of_device_get_match_data(dev);
1338
1339 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1340 if (!priv)
1341 return -ENOMEM;
1342
1343 priv->dev = dev;
1344 priv->info = info;
1345 spin_lock_init(&priv->rmw_lock);
1346
1347 priv->base = devm_platform_ioremap_resource(pdev, 0);
1348 if (IS_ERR(priv->base))
1349 return PTR_ERR(priv->base);
1350
1351 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1352 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1353 if (!clks)
1354 return -ENOMEM;
1355
1356 dev_set_drvdata(dev, priv);
1357 priv->clks = clks;
1358 priv->num_core_clks = info->num_total_core_clks;
1359 priv->num_mod_clks = info->num_hw_mod_clks;
1360 priv->num_resets = info->num_resets;
1361 priv->last_dt_core_clk = info->last_dt_core_clk;
1362
1363 for (i = 0; i < nclks; i++)
1364 clks[i] = ERR_PTR(-ENOENT);
1365
1366 for (i = 0; i < info->num_core_clks; i++)
1367 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1368
1369 for (i = 0; i < info->num_mod_clks; i++)
1370 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1371
1372 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1373 if (error)
1374 return error;
1375
1376 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1377 if (error)
1378 return error;
1379
1380 error = rzg2l_cpg_add_clk_domain(dev);
1381 if (error)
1382 return error;
1383
1384 error = rzg2l_cpg_reset_controller_register(priv);
1385 if (error)
1386 return error;
1387
1388 return 0;
1389 }
1390
1391 static const struct of_device_id rzg2l_cpg_match[] = {
1392 #ifdef CONFIG_CLK_R9A07G043
1393 {
1394 .compatible = "renesas,r9a07g043-cpg",
1395 .data = &r9a07g043_cpg_info,
1396 },
1397 #endif
1398 #ifdef CONFIG_CLK_R9A07G044
1399 {
1400 .compatible = "renesas,r9a07g044-cpg",
1401 .data = &r9a07g044_cpg_info,
1402 },
1403 #endif
1404 #ifdef CONFIG_CLK_R9A07G054
1405 {
1406 .compatible = "renesas,r9a07g054-cpg",
1407 .data = &r9a07g054_cpg_info,
1408 },
1409 #endif
1410 #ifdef CONFIG_CLK_R9A09G011
1411 {
1412 .compatible = "renesas,r9a09g011-cpg",
1413 .data = &r9a09g011_cpg_info,
1414 },
1415 #endif
1416 { }
1417 };
1418
1419 static struct platform_driver rzg2l_cpg_driver = {
1420 .driver = {
1421 .name = "rzg2l-cpg",
1422 .of_match_table = rzg2l_cpg_match,
1423 },
1424 };
1425
1426 static int __init rzg2l_cpg_init(void)
1427 {
1428 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1429 }
1430
1431 subsys_initcall(rzg2l_cpg_init);
1432
1433 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1434 MODULE_LICENSE("GPL v2");