0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/clk-provider.h>
0021 #include <linux/io.h>
0022 #include <linux/mfd/syscon.h>
0023 #include <linux/of.h>
0024 #include <linux/of_device.h>
0025 #include <linux/platform_device.h>
0026 #include <linux/regmap.h>
0027 #include <linux/slab.h>
0028 #include <linux/jiffies.h>
0029
0030 #define TBG_SEL 0x0
0031 #define DIV_SEL0 0x4
0032 #define DIV_SEL1 0x8
0033 #define DIV_SEL2 0xC
0034 #define CLK_SEL 0x10
0035 #define CLK_DIS 0x14
0036
0037 #define ARMADA_37XX_DVFS_LOAD_1 1
0038 #define LOAD_LEVEL_NR 4
0039
0040 #define ARMADA_37XX_NB_L0L1 0x18
0041 #define ARMADA_37XX_NB_L2L3 0x1C
0042 #define ARMADA_37XX_NB_TBG_DIV_OFF 13
0043 #define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
0044 #define ARMADA_37XX_NB_CLK_SEL_OFF 11
0045 #define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
0046 #define ARMADA_37XX_NB_TBG_SEL_OFF 9
0047 #define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
0048 #define ARMADA_37XX_NB_CONFIG_SHIFT 16
0049 #define ARMADA_37XX_NB_DYN_MOD 0x24
0050 #define ARMADA_37XX_NB_DFS_EN 31
0051 #define ARMADA_37XX_NB_CPU_LOAD 0x30
0052 #define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
0053 #define ARMADA_37XX_DVFS_LOAD_0 0
0054 #define ARMADA_37XX_DVFS_LOAD_1 1
0055 #define ARMADA_37XX_DVFS_LOAD_2 2
0056 #define ARMADA_37XX_DVFS_LOAD_3 3
0057
0058 struct clk_periph_driver_data {
0059 struct clk_hw_onecell_data *hw_data;
0060 spinlock_t lock;
0061 void __iomem *reg;
0062
0063
0064 u32 tbg_sel;
0065 u32 div_sel0;
0066 u32 div_sel1;
0067 u32 div_sel2;
0068 u32 clk_sel;
0069 u32 clk_dis;
0070 };
0071
0072 struct clk_double_div {
0073 struct clk_hw hw;
0074 void __iomem *reg1;
0075 u8 shift1;
0076 void __iomem *reg2;
0077 u8 shift2;
0078 };
0079
0080 struct clk_pm_cpu {
0081 struct clk_hw hw;
0082 void __iomem *reg_mux;
0083 u8 shift_mux;
0084 u32 mask_mux;
0085 void __iomem *reg_div;
0086 u8 shift_div;
0087 struct regmap *nb_pm_base;
0088 unsigned long l1_expiration;
0089 };
0090
0091 #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
0092 #define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw)
0093
0094 struct clk_periph_data {
0095 const char *name;
0096 const char * const *parent_names;
0097 int num_parents;
0098 struct clk_hw *mux_hw;
0099 struct clk_hw *rate_hw;
0100 struct clk_hw *gate_hw;
0101 struct clk_hw *muxrate_hw;
0102 bool is_double_div;
0103 };
0104
0105 static const struct clk_div_table clk_table6[] = {
0106 { .val = 1, .div = 1, },
0107 { .val = 2, .div = 2, },
0108 { .val = 3, .div = 3, },
0109 { .val = 4, .div = 4, },
0110 { .val = 5, .div = 5, },
0111 { .val = 6, .div = 6, },
0112 { .val = 0, .div = 0, },
0113 };
0114
0115 static const struct clk_div_table clk_table1[] = {
0116 { .val = 0, .div = 1, },
0117 { .val = 1, .div = 2, },
0118 { .val = 0, .div = 0, },
0119 };
0120
0121 static const struct clk_div_table clk_table2[] = {
0122 { .val = 0, .div = 2, },
0123 { .val = 1, .div = 4, },
0124 { .val = 0, .div = 0, },
0125 };
0126
0127 static const struct clk_ops clk_double_div_ops;
0128 static const struct clk_ops clk_pm_cpu_ops;
0129
0130 #define PERIPH_GATE(_name, _bit) \
0131 struct clk_gate gate_##_name = { \
0132 .reg = (void *)CLK_DIS, \
0133 .bit_idx = _bit, \
0134 .hw.init = &(struct clk_init_data){ \
0135 .ops = &clk_gate_ops, \
0136 } \
0137 };
0138
0139 #define PERIPH_MUX(_name, _shift) \
0140 struct clk_mux mux_##_name = { \
0141 .reg = (void *)TBG_SEL, \
0142 .shift = _shift, \
0143 .mask = 3, \
0144 .hw.init = &(struct clk_init_data){ \
0145 .ops = &clk_mux_ro_ops, \
0146 } \
0147 };
0148
0149 #define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2) \
0150 struct clk_double_div rate_##_name = { \
0151 .reg1 = (void *)_reg1, \
0152 .reg2 = (void *)_reg2, \
0153 .shift1 = _shift1, \
0154 .shift2 = _shift2, \
0155 .hw.init = &(struct clk_init_data){ \
0156 .ops = &clk_double_div_ops, \
0157 } \
0158 };
0159
0160 #define PERIPH_DIV(_name, _reg, _shift, _table) \
0161 struct clk_divider rate_##_name = { \
0162 .reg = (void *)_reg, \
0163 .table = _table, \
0164 .shift = _shift, \
0165 .hw.init = &(struct clk_init_data){ \
0166 .ops = &clk_divider_ro_ops, \
0167 } \
0168 };
0169
0170 #define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2) \
0171 struct clk_pm_cpu muxrate_##_name = { \
0172 .reg_mux = (void *)TBG_SEL, \
0173 .mask_mux = 3, \
0174 .shift_mux = _shift1, \
0175 .reg_div = (void *)_reg, \
0176 .shift_div = _shift2, \
0177 .hw.init = &(struct clk_init_data){ \
0178 .ops = &clk_pm_cpu_ops, \
0179 } \
0180 };
0181
0182 #define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
0183 static PERIPH_GATE(_name, _bit); \
0184 static PERIPH_MUX(_name, _shift); \
0185 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
0186
0187 #define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table) \
0188 static PERIPH_GATE(_name, _bit); \
0189 static PERIPH_MUX(_name, _shift); \
0190 static PERIPH_DIV(_name, _reg, _shift1, _table);
0191
0192 #define PERIPH_CLK_GATE_DIV(_name, _bit, _reg, _shift, _table) \
0193 static PERIPH_GATE(_name, _bit); \
0194 static PERIPH_DIV(_name, _reg, _shift, _table);
0195
0196 #define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
0197 static PERIPH_MUX(_name, _shift); \
0198 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
0199
0200 #define REF_CLK_FULL(_name) \
0201 { .name = #_name, \
0202 .parent_names = (const char *[]){ "TBG-A-P", \
0203 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
0204 .num_parents = 4, \
0205 .mux_hw = &mux_##_name.hw, \
0206 .gate_hw = &gate_##_name.hw, \
0207 .rate_hw = &rate_##_name.hw, \
0208 }
0209
0210 #define REF_CLK_FULL_DD(_name) \
0211 { .name = #_name, \
0212 .parent_names = (const char *[]){ "TBG-A-P", \
0213 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
0214 .num_parents = 4, \
0215 .mux_hw = &mux_##_name.hw, \
0216 .gate_hw = &gate_##_name.hw, \
0217 .rate_hw = &rate_##_name.hw, \
0218 .is_double_div = true, \
0219 }
0220
0221 #define REF_CLK_GATE(_name, _parent_name) \
0222 { .name = #_name, \
0223 .parent_names = (const char *[]){ _parent_name}, \
0224 .num_parents = 1, \
0225 .gate_hw = &gate_##_name.hw, \
0226 }
0227
0228 #define REF_CLK_GATE_DIV(_name, _parent_name) \
0229 { .name = #_name, \
0230 .parent_names = (const char *[]){ _parent_name}, \
0231 .num_parents = 1, \
0232 .gate_hw = &gate_##_name.hw, \
0233 .rate_hw = &rate_##_name.hw, \
0234 }
0235
0236 #define REF_CLK_PM_CPU(_name) \
0237 { .name = #_name, \
0238 .parent_names = (const char *[]){ "TBG-A-P", \
0239 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
0240 .num_parents = 4, \
0241 .muxrate_hw = &muxrate_##_name.hw, \
0242 }
0243
0244 #define REF_CLK_MUX_DD(_name) \
0245 { .name = #_name, \
0246 .parent_names = (const char *[]){ "TBG-A-P", \
0247 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
0248 .num_parents = 4, \
0249 .mux_hw = &mux_##_name.hw, \
0250 .rate_hw = &rate_##_name.hw, \
0251 .is_double_div = true, \
0252 }
0253
0254
0255 PERIPH_CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13);
0256 PERIPH_CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7);
0257 PERIPH_CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0);
0258 PERIPH_CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6);
0259 PERIPH_CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12);
0260 PERIPH_CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, clk_table6);
0261 static PERIPH_GATE(avs, 11);
0262 PERIPH_CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0);
0263 PERIPH_CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24);
0264 static PERIPH_GATE(i2c_2, 16);
0265 static PERIPH_GATE(i2c_1, 17);
0266 PERIPH_CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, clk_table2);
0267 PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12);
0268 PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6);
0269 PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6);
0270 PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19);
0271 static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
0272
0273 static struct clk_periph_data data_nb[] = {
0274 REF_CLK_FULL_DD(mmc),
0275 REF_CLK_FULL_DD(sata_host),
0276 REF_CLK_FULL_DD(sec_at),
0277 REF_CLK_FULL_DD(sec_dap),
0278 REF_CLK_FULL_DD(tscem),
0279 REF_CLK_FULL(tscem_tmx),
0280 REF_CLK_GATE(avs, "xtal"),
0281 REF_CLK_FULL_DD(sqf),
0282 REF_CLK_FULL_DD(pwm),
0283 REF_CLK_GATE(i2c_2, "xtal"),
0284 REF_CLK_GATE(i2c_1, "xtal"),
0285 REF_CLK_GATE_DIV(ddr_phy, "TBG-A-S"),
0286 REF_CLK_FULL_DD(ddr_fclk),
0287 REF_CLK_FULL(trace),
0288 REF_CLK_FULL(counter),
0289 REF_CLK_FULL_DD(eip97),
0290 REF_CLK_PM_CPU(cpu),
0291 { },
0292 };
0293
0294
0295 PERIPH_CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9);
0296 PERIPH_CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21);
0297 PERIPH_CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9);
0298 static PERIPH_GATE(gbe1_50, 0);
0299 static PERIPH_GATE(gbe0_50, 1);
0300 static PERIPH_GATE(gbe1_125, 2);
0301 static PERIPH_GATE(gbe0_125, 3);
0302 PERIPH_CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, clk_table1);
0303 PERIPH_CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, clk_table1);
0304 PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
0305 PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
0306 PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
0307 PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
0308 static PERIPH_GATE(pcie, 14);
0309
0310 static struct clk_periph_data data_sb[] = {
0311 REF_CLK_MUX_DD(gbe_50),
0312 REF_CLK_MUX_DD(gbe_core),
0313 REF_CLK_MUX_DD(gbe_125),
0314 REF_CLK_GATE(gbe1_50, "gbe_50"),
0315 REF_CLK_GATE(gbe0_50, "gbe_50"),
0316 REF_CLK_GATE(gbe1_125, "gbe_125"),
0317 REF_CLK_GATE(gbe0_125, "gbe_125"),
0318 REF_CLK_GATE_DIV(gbe1_core, "gbe_core"),
0319 REF_CLK_GATE_DIV(gbe0_core, "gbe_core"),
0320 REF_CLK_GATE_DIV(gbe_bm, "gbe_core"),
0321 REF_CLK_FULL_DD(sdio),
0322 REF_CLK_FULL_DD(usb32_usb2_sys),
0323 REF_CLK_FULL_DD(usb32_ss_sys),
0324 REF_CLK_GATE(pcie, "gbe_core"),
0325 { },
0326 };
0327
0328 static unsigned int get_div(void __iomem *reg, int shift)
0329 {
0330 u32 val;
0331
0332 val = (readl(reg) >> shift) & 0x7;
0333 if (val > 6)
0334 return 0;
0335 return val;
0336 }
0337
0338 static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw,
0339 unsigned long parent_rate)
0340 {
0341 struct clk_double_div *double_div = to_clk_double_div(hw);
0342 unsigned int div;
0343
0344 div = get_div(double_div->reg1, double_div->shift1);
0345 div *= get_div(double_div->reg2, double_div->shift2);
0346
0347 return DIV_ROUND_UP_ULL((u64)parent_rate, div);
0348 }
0349
0350 static const struct clk_ops clk_double_div_ops = {
0351 .recalc_rate = clk_double_div_recalc_rate,
0352 };
0353
0354 static void armada_3700_pm_dvfs_update_regs(unsigned int load_level,
0355 unsigned int *reg,
0356 unsigned int *offset)
0357 {
0358 if (load_level <= ARMADA_37XX_DVFS_LOAD_1)
0359 *reg = ARMADA_37XX_NB_L0L1;
0360 else
0361 *reg = ARMADA_37XX_NB_L2L3;
0362
0363 if (load_level == ARMADA_37XX_DVFS_LOAD_0 ||
0364 load_level == ARMADA_37XX_DVFS_LOAD_2)
0365 *offset += ARMADA_37XX_NB_CONFIG_SHIFT;
0366 }
0367
0368 static bool armada_3700_pm_dvfs_is_enabled(struct regmap *base)
0369 {
0370 unsigned int val, reg = ARMADA_37XX_NB_DYN_MOD;
0371
0372 if (IS_ERR(base))
0373 return false;
0374
0375 regmap_read(base, reg, &val);
0376
0377 return !!(val & BIT(ARMADA_37XX_NB_DFS_EN));
0378 }
0379
0380 static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap *base)
0381 {
0382 unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
0383 unsigned int offset = ARMADA_37XX_NB_TBG_DIV_OFF;
0384 unsigned int load_level, div;
0385
0386
0387
0388
0389
0390
0391 regmap_read(base, reg, &load_level);
0392
0393
0394
0395
0396
0397 load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
0398 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
0399
0400 regmap_read(base, reg, &div);
0401
0402 return (div >> offset) & ARMADA_37XX_NB_TBG_DIV_MASK;
0403 }
0404
0405 static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
0406 {
0407 unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
0408 unsigned int offset = ARMADA_37XX_NB_TBG_SEL_OFF;
0409 unsigned int load_level, sel;
0410
0411
0412
0413
0414
0415
0416 regmap_read(base, reg, &load_level);
0417
0418
0419
0420
0421
0422 load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
0423 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
0424
0425 regmap_read(base, reg, &sel);
0426
0427 return (sel >> offset) & ARMADA_37XX_NB_TBG_SEL_MASK;
0428 }
0429
0430 static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
0431 {
0432 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
0433 u32 val;
0434
0435 if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
0436 val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base);
0437 } else {
0438 val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux;
0439 val &= pm_cpu->mask_mux;
0440 }
0441
0442 return val;
0443 }
0444
0445 static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
0446 unsigned long parent_rate)
0447 {
0448 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
0449 unsigned int div;
0450
0451 if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base))
0452 div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base);
0453 else
0454 div = get_div(pm_cpu->reg_div, pm_cpu->shift_div);
0455 return DIV_ROUND_UP_ULL((u64)parent_rate, div);
0456 }
0457
0458 static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
0459 unsigned long *parent_rate)
0460 {
0461 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
0462 struct regmap *base = pm_cpu->nb_pm_base;
0463 unsigned int div = *parent_rate / rate;
0464 unsigned int load_level;
0465
0466 if (!armada_3700_pm_dvfs_is_enabled(base))
0467 return -EINVAL;
0468
0469 for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
0470 unsigned int reg, val, offset = ARMADA_37XX_NB_TBG_DIV_OFF;
0471
0472 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
0473
0474 regmap_read(base, reg, &val);
0475
0476 val >>= offset;
0477 val &= ARMADA_37XX_NB_TBG_DIV_MASK;
0478 if (val == div)
0479
0480
0481
0482
0483
0484 return *parent_rate / div;
0485 }
0486
0487
0488 return -EINVAL;
0489 }
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509 static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
0510 unsigned int new_level, unsigned long rate,
0511 struct regmap *base)
0512 {
0513 unsigned int cur_level;
0514
0515 regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
0516 cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
0517
0518 if (cur_level == new_level)
0519 return;
0520
0521
0522
0523
0524
0525
0526 if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
0527 if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
0528 pm_cpu->l1_expiration = jiffies;
0529 else
0530 pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
0531 return;
0532 }
0533
0534
0535
0536
0537
0538 if (rate < 1000*1000*1000)
0539 goto invalidate_l1_exp;
0540
0541
0542
0543
0544
0545 if (pm_cpu->l1_expiration && time_is_before_eq_jiffies(pm_cpu->l1_expiration))
0546 goto invalidate_l1_exp;
0547
0548 regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
0549 ARMADA_37XX_NB_CPU_LOAD_MASK,
0550 ARMADA_37XX_DVFS_LOAD_1);
0551 msleep(20);
0552
0553 invalidate_l1_exp:
0554 pm_cpu->l1_expiration = 0;
0555 }
0556
0557 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
0558 unsigned long parent_rate)
0559 {
0560 struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
0561 struct regmap *base = pm_cpu->nb_pm_base;
0562 unsigned int div = parent_rate / rate;
0563 unsigned int load_level;
0564
0565
0566 if (!armada_3700_pm_dvfs_is_enabled(base))
0567 return -EINVAL;
0568
0569 for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
0570 unsigned int reg, mask, val,
0571 offset = ARMADA_37XX_NB_TBG_DIV_OFF;
0572
0573 armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
0574
0575 regmap_read(base, reg, &val);
0576 val >>= offset;
0577 val &= ARMADA_37XX_NB_TBG_DIV_MASK;
0578
0579 if (val == div) {
0580
0581
0582
0583
0584
0585 reg = ARMADA_37XX_NB_CPU_LOAD;
0586 mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
0587
0588
0589 if (parent_rate >= 1000*1000*1000)
0590 clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
0591
0592 regmap_update_bits(base, reg, mask, load_level);
0593
0594 return rate;
0595 }
0596 }
0597
0598
0599 return -EINVAL;
0600 }
0601
0602 static const struct clk_ops clk_pm_cpu_ops = {
0603 .get_parent = clk_pm_cpu_get_parent,
0604 .round_rate = clk_pm_cpu_round_rate,
0605 .set_rate = clk_pm_cpu_set_rate,
0606 .recalc_rate = clk_pm_cpu_recalc_rate,
0607 };
0608
0609 static const struct of_device_id armada_3700_periph_clock_of_match[] = {
0610 { .compatible = "marvell,armada-3700-periph-clock-nb",
0611 .data = data_nb, },
0612 { .compatible = "marvell,armada-3700-periph-clock-sb",
0613 .data = data_sb, },
0614 { }
0615 };
0616
0617 static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
0618 void __iomem *reg, spinlock_t *lock,
0619 struct device *dev, struct clk_hw **hw)
0620 {
0621 const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
0622 *rate_ops = NULL;
0623 struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *rate_hw = NULL;
0624
0625 if (data->mux_hw) {
0626 struct clk_mux *mux;
0627
0628 mux_hw = data->mux_hw;
0629 mux = to_clk_mux(mux_hw);
0630 mux->lock = lock;
0631 mux_ops = mux_hw->init->ops;
0632 mux->reg = reg + (u64)mux->reg;
0633 }
0634
0635 if (data->gate_hw) {
0636 struct clk_gate *gate;
0637
0638 gate_hw = data->gate_hw;
0639 gate = to_clk_gate(gate_hw);
0640 gate->lock = lock;
0641 gate_ops = gate_hw->init->ops;
0642 gate->reg = reg + (u64)gate->reg;
0643 gate->flags = CLK_GATE_SET_TO_DISABLE;
0644 }
0645
0646 if (data->rate_hw) {
0647 rate_hw = data->rate_hw;
0648 rate_ops = rate_hw->init->ops;
0649 if (data->is_double_div) {
0650 struct clk_double_div *rate;
0651
0652 rate = to_clk_double_div(rate_hw);
0653 rate->reg1 = reg + (u64)rate->reg1;
0654 rate->reg2 = reg + (u64)rate->reg2;
0655 } else {
0656 struct clk_divider *rate = to_clk_divider(rate_hw);
0657 const struct clk_div_table *clkt;
0658 int table_size = 0;
0659
0660 rate->reg = reg + (u64)rate->reg;
0661 for (clkt = rate->table; clkt->div; clkt++)
0662 table_size++;
0663 rate->width = order_base_2(table_size);
0664 rate->lock = lock;
0665 }
0666 }
0667
0668 if (data->muxrate_hw) {
0669 struct clk_pm_cpu *pmcpu_clk;
0670 struct clk_hw *muxrate_hw = data->muxrate_hw;
0671 struct regmap *map;
0672
0673 pmcpu_clk = to_clk_pm_cpu(muxrate_hw);
0674 pmcpu_clk->reg_mux = reg + (u64)pmcpu_clk->reg_mux;
0675 pmcpu_clk->reg_div = reg + (u64)pmcpu_clk->reg_div;
0676
0677 mux_hw = muxrate_hw;
0678 rate_hw = muxrate_hw;
0679 mux_ops = muxrate_hw->init->ops;
0680 rate_ops = muxrate_hw->init->ops;
0681
0682 map = syscon_regmap_lookup_by_compatible(
0683 "marvell,armada-3700-nb-pm");
0684 pmcpu_clk->nb_pm_base = map;
0685 }
0686
0687 *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
0688 data->num_parents, mux_hw,
0689 mux_ops, rate_hw, rate_ops,
0690 gate_hw, gate_ops, CLK_IGNORE_UNUSED);
0691
0692 return PTR_ERR_OR_ZERO(*hw);
0693 }
0694
0695 static int __maybe_unused armada_3700_periph_clock_suspend(struct device *dev)
0696 {
0697 struct clk_periph_driver_data *data = dev_get_drvdata(dev);
0698
0699 data->tbg_sel = readl(data->reg + TBG_SEL);
0700 data->div_sel0 = readl(data->reg + DIV_SEL0);
0701 data->div_sel1 = readl(data->reg + DIV_SEL1);
0702 data->div_sel2 = readl(data->reg + DIV_SEL2);
0703 data->clk_sel = readl(data->reg + CLK_SEL);
0704 data->clk_dis = readl(data->reg + CLK_DIS);
0705
0706 return 0;
0707 }
0708
0709 static int __maybe_unused armada_3700_periph_clock_resume(struct device *dev)
0710 {
0711 struct clk_periph_driver_data *data = dev_get_drvdata(dev);
0712
0713
0714 writel(data->clk_dis, data->reg + CLK_DIS);
0715 writel(data->div_sel0, data->reg + DIV_SEL0);
0716 writel(data->div_sel1, data->reg + DIV_SEL1);
0717 writel(data->div_sel2, data->reg + DIV_SEL2);
0718 writel(data->tbg_sel, data->reg + TBG_SEL);
0719 writel(data->clk_sel, data->reg + CLK_SEL);
0720
0721 return 0;
0722 }
0723
0724 static const struct dev_pm_ops armada_3700_periph_clock_pm_ops = {
0725 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
0726 armada_3700_periph_clock_resume)
0727 };
0728
0729 static int armada_3700_periph_clock_probe(struct platform_device *pdev)
0730 {
0731 struct clk_periph_driver_data *driver_data;
0732 struct device_node *np = pdev->dev.of_node;
0733 const struct clk_periph_data *data;
0734 struct device *dev = &pdev->dev;
0735 int num_periph = 0, i, ret;
0736 struct resource *res;
0737
0738 data = of_device_get_match_data(dev);
0739 if (!data)
0740 return -ENODEV;
0741
0742 while (data[num_periph].name)
0743 num_periph++;
0744
0745 driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
0746 if (!driver_data)
0747 return -ENOMEM;
0748
0749 driver_data->hw_data = devm_kzalloc(dev,
0750 struct_size(driver_data->hw_data,
0751 hws, num_periph),
0752 GFP_KERNEL);
0753 if (!driver_data->hw_data)
0754 return -ENOMEM;
0755 driver_data->hw_data->num = num_periph;
0756
0757 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0758 driver_data->reg = devm_ioremap_resource(dev, res);
0759 if (IS_ERR(driver_data->reg))
0760 return PTR_ERR(driver_data->reg);
0761
0762 spin_lock_init(&driver_data->lock);
0763
0764 for (i = 0; i < num_periph; i++) {
0765 struct clk_hw **hw = &driver_data->hw_data->hws[i];
0766 if (armada_3700_add_composite_clk(&data[i], driver_data->reg,
0767 &driver_data->lock, dev, hw))
0768 dev_err(dev, "Can't register periph clock %s\n",
0769 data[i].name);
0770 }
0771
0772 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
0773 driver_data->hw_data);
0774 if (ret) {
0775 for (i = 0; i < num_periph; i++)
0776 clk_hw_unregister(driver_data->hw_data->hws[i]);
0777 return ret;
0778 }
0779
0780 platform_set_drvdata(pdev, driver_data);
0781 return 0;
0782 }
0783
0784 static int armada_3700_periph_clock_remove(struct platform_device *pdev)
0785 {
0786 struct clk_periph_driver_data *data = platform_get_drvdata(pdev);
0787 struct clk_hw_onecell_data *hw_data = data->hw_data;
0788 int i;
0789
0790 of_clk_del_provider(pdev->dev.of_node);
0791
0792 for (i = 0; i < hw_data->num; i++)
0793 clk_hw_unregister(hw_data->hws[i]);
0794
0795 return 0;
0796 }
0797
0798 static struct platform_driver armada_3700_periph_clock_driver = {
0799 .probe = armada_3700_periph_clock_probe,
0800 .remove = armada_3700_periph_clock_remove,
0801 .driver = {
0802 .name = "marvell-armada-3700-periph-clock",
0803 .of_match_table = armada_3700_periph_clock_of_match,
0804 .pm = &armada_3700_periph_clock_pm_ops,
0805 },
0806 };
0807
0808 builtin_platform_driver(armada_3700_periph_clock_driver);