0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) "ap-cpu-clk: " fmt
0012
0013 #include <linux/clk-provider.h>
0014 #include <linux/clk.h>
0015 #include <linux/mfd/syscon.h>
0016 #include <linux/of.h>
0017 #include <linux/of_address.h>
0018 #include <linux/of_platform.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/regmap.h>
0021 #include "armada_ap_cp_helper.h"
0022
0023 #define AP806_CPU_CLUSTER0 0
0024 #define AP806_CPU_CLUSTER1 1
0025 #define AP806_CPUS_PER_CLUSTER 2
0026 #define APN806_CPU1_MASK 0x1
0027
0028 #define APN806_CLUSTER_NUM_OFFSET 8
0029 #define APN806_CLUSTER_NUM_MASK BIT(APN806_CLUSTER_NUM_OFFSET)
0030
0031 #define APN806_MAX_DIVIDER 32
0032
0033
0034
0035
0036
0037
0038
0039 struct cpu_dfs_regs {
0040 unsigned int divider_reg;
0041 unsigned int force_reg;
0042 unsigned int ratio_reg;
0043 unsigned int ratio_state_reg;
0044 unsigned int divider_mask;
0045 unsigned int cluster_offset;
0046 unsigned int force_mask;
0047 int divider_offset;
0048 int divider_ratio;
0049 int ratio_offset;
0050 int ratio_state_offset;
0051 int ratio_state_cluster_offset;
0052 };
0053
0054
0055 #define AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET 0x278
0056 #define AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET 0x280
0057 #define AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET 0x284
0058 #define AP806_CA72MP2_0_PLL_SR_REG_OFFSET 0xC94
0059
0060 #define AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x14
0061 #define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 0
0062 #define AP806_PLL_CR_CPU_CLK_DIV_RATIO 0
0063 #define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \
0064 (0x3f << AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET)
0065 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 24
0066 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \
0067 (0x1 << AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET)
0068 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 16
0069 #define AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET 0
0070 #define AP806_CA72MP2_0_PLL_RATIO_STATE 11
0071
0072 #define STATUS_POLL_PERIOD_US 1
0073 #define STATUS_POLL_TIMEOUT_US 1000000
0074
0075 #define to_ap_cpu_clk(_hw) container_of(_hw, struct ap_cpu_clk, hw)
0076
0077 static const struct cpu_dfs_regs ap806_dfs_regs = {
0078 .divider_reg = AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET,
0079 .force_reg = AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET,
0080 .ratio_reg = AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET,
0081 .ratio_state_reg = AP806_CA72MP2_0_PLL_SR_REG_OFFSET,
0082 .divider_mask = AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK,
0083 .cluster_offset = AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET,
0084 .force_mask = AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK,
0085 .divider_offset = AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET,
0086 .divider_ratio = AP806_PLL_CR_CPU_CLK_DIV_RATIO,
0087 .ratio_offset = AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET,
0088 .ratio_state_offset = AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET,
0089 .ratio_state_cluster_offset = AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET,
0090 };
0091
0092
0093 #define AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET 0x278
0094 #define AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET 0x27c
0095 #define AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET 0xc98
0096 #define AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x8
0097 #define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 18
0098 #define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \
0099 (0x3f << AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET)
0100 #define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET 12
0101 #define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK \
0102 (0x3f << AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET)
0103 #define AP807_PLL_CR_CPU_CLK_DIV_RATIO 3
0104 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 0
0105 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \
0106 (0x3 << AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET)
0107 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 6
0108 #define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET 20
0109 #define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET 3
0110
0111 static const struct cpu_dfs_regs ap807_dfs_regs = {
0112 .divider_reg = AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET,
0113 .force_reg = AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET,
0114 .ratio_reg = AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET,
0115 .ratio_state_reg = AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET,
0116 .divider_mask = AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK,
0117 .cluster_offset = AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET,
0118 .force_mask = AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK,
0119 .divider_offset = AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET,
0120 .divider_ratio = AP807_PLL_CR_CPU_CLK_DIV_RATIO,
0121 .ratio_offset = AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET,
0122 .ratio_state_offset = AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET,
0123 .ratio_state_cluster_offset =
0124 AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET
0125 };
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 struct ap_cpu_clk {
0136 unsigned int cluster;
0137 const char *clk_name;
0138 struct device *dev;
0139 struct clk_hw hw;
0140 struct regmap *pll_cr_base;
0141 const struct cpu_dfs_regs *pll_regs;
0142 };
0143
0144 static unsigned long ap_cpu_clk_recalc_rate(struct clk_hw *hw,
0145 unsigned long parent_rate)
0146 {
0147 struct ap_cpu_clk *clk = to_ap_cpu_clk(hw);
0148 unsigned int cpu_clkdiv_reg;
0149 int cpu_clkdiv_ratio;
0150
0151 cpu_clkdiv_reg = clk->pll_regs->divider_reg +
0152 (clk->cluster * clk->pll_regs->cluster_offset);
0153 regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, &cpu_clkdiv_ratio);
0154 cpu_clkdiv_ratio &= clk->pll_regs->divider_mask;
0155 cpu_clkdiv_ratio >>= clk->pll_regs->divider_offset;
0156
0157 return parent_rate / cpu_clkdiv_ratio;
0158 }
0159
0160 static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
0161 unsigned long parent_rate)
0162 {
0163 struct ap_cpu_clk *clk = to_ap_cpu_clk(hw);
0164 int ret, reg, divider = parent_rate / rate;
0165 unsigned int cpu_clkdiv_reg, cpu_force_reg, cpu_ratio_reg, stable_bit;
0166
0167 cpu_clkdiv_reg = clk->pll_regs->divider_reg +
0168 (clk->cluster * clk->pll_regs->cluster_offset);
0169 cpu_force_reg = clk->pll_regs->force_reg +
0170 (clk->cluster * clk->pll_regs->cluster_offset);
0171 cpu_ratio_reg = clk->pll_regs->ratio_reg +
0172 (clk->cluster * clk->pll_regs->cluster_offset);
0173
0174 regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, ®);
0175 reg &= ~(clk->pll_regs->divider_mask);
0176 reg |= (divider << clk->pll_regs->divider_offset);
0177
0178
0179
0180
0181
0182 if (clk->pll_regs->divider_ratio) {
0183 reg &= ~(AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK);
0184 reg |= ((divider * clk->pll_regs->divider_ratio) <<
0185 AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET);
0186 }
0187 regmap_write(clk->pll_cr_base, cpu_clkdiv_reg, reg);
0188
0189
0190 regmap_update_bits(clk->pll_cr_base, cpu_force_reg,
0191 clk->pll_regs->force_mask,
0192 clk->pll_regs->force_mask);
0193
0194 regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg,
0195 BIT(clk->pll_regs->ratio_offset),
0196 BIT(clk->pll_regs->ratio_offset));
0197
0198 stable_bit = BIT(clk->pll_regs->ratio_state_offset +
0199 clk->cluster *
0200 clk->pll_regs->ratio_state_cluster_offset);
0201 ret = regmap_read_poll_timeout(clk->pll_cr_base,
0202 clk->pll_regs->ratio_state_reg, reg,
0203 reg & stable_bit, STATUS_POLL_PERIOD_US,
0204 STATUS_POLL_TIMEOUT_US);
0205 if (ret)
0206 return ret;
0207
0208 regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg,
0209 BIT(clk->pll_regs->ratio_offset), 0);
0210
0211 return 0;
0212 }
0213
0214 static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
0215 unsigned long *parent_rate)
0216 {
0217 int divider = *parent_rate / rate;
0218
0219 divider = min(divider, APN806_MAX_DIVIDER);
0220
0221 return *parent_rate / divider;
0222 }
0223
0224 static const struct clk_ops ap_cpu_clk_ops = {
0225 .recalc_rate = ap_cpu_clk_recalc_rate,
0226 .round_rate = ap_cpu_clk_round_rate,
0227 .set_rate = ap_cpu_clk_set_rate,
0228 };
0229
0230 static int ap_cpu_clock_probe(struct platform_device *pdev)
0231 {
0232 int ret, nclusters = 0, cluster_index = 0;
0233 struct device *dev = &pdev->dev;
0234 struct device_node *dn, *np = dev->of_node;
0235 struct clk_hw_onecell_data *ap_cpu_data;
0236 struct ap_cpu_clk *ap_cpu_clk;
0237 struct regmap *regmap;
0238
0239 regmap = syscon_node_to_regmap(np->parent);
0240 if (IS_ERR(regmap)) {
0241 pr_err("cannot get pll_cr_base regmap\n");
0242 return PTR_ERR(regmap);
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 nclusters = 1;
0255 for_each_of_cpu_node(dn) {
0256 int cpu, err;
0257
0258 err = of_property_read_u32(dn, "reg", &cpu);
0259 if (WARN_ON(err)) {
0260 of_node_put(dn);
0261 return err;
0262 }
0263
0264
0265 if (cpu & APN806_CLUSTER_NUM_MASK) {
0266 nclusters = 2;
0267 of_node_put(dn);
0268 break;
0269 }
0270 }
0271
0272
0273
0274
0275 ap_cpu_clk = devm_kcalloc(dev, nclusters, sizeof(*ap_cpu_clk),
0276 GFP_KERNEL);
0277 if (!ap_cpu_clk)
0278 return -ENOMEM;
0279
0280 ap_cpu_data = devm_kzalloc(dev, struct_size(ap_cpu_data, hws,
0281 nclusters),
0282 GFP_KERNEL);
0283 if (!ap_cpu_data)
0284 return -ENOMEM;
0285
0286 for_each_of_cpu_node(dn) {
0287 char *clk_name = "cpu-cluster-0";
0288 struct clk_init_data init;
0289 const char *parent_name;
0290 struct clk *parent;
0291 int cpu, err;
0292
0293 err = of_property_read_u32(dn, "reg", &cpu);
0294 if (WARN_ON(err)) {
0295 of_node_put(dn);
0296 return err;
0297 }
0298
0299 cluster_index = cpu & APN806_CLUSTER_NUM_MASK;
0300 cluster_index >>= APN806_CLUSTER_NUM_OFFSET;
0301
0302
0303 if (ap_cpu_data->hws[cluster_index])
0304 continue;
0305
0306 parent = of_clk_get(np, cluster_index);
0307 if (IS_ERR(parent)) {
0308 dev_err(dev, "Could not get the clock parent\n");
0309 of_node_put(dn);
0310 return -EINVAL;
0311 }
0312 parent_name = __clk_get_name(parent);
0313 clk_name[12] += cluster_index;
0314 ap_cpu_clk[cluster_index].clk_name =
0315 ap_cp_unique_name(dev, np->parent, clk_name);
0316 ap_cpu_clk[cluster_index].cluster = cluster_index;
0317 ap_cpu_clk[cluster_index].pll_cr_base = regmap;
0318 ap_cpu_clk[cluster_index].hw.init = &init;
0319 ap_cpu_clk[cluster_index].dev = dev;
0320 ap_cpu_clk[cluster_index].pll_regs = of_device_get_match_data(&pdev->dev);
0321
0322 init.name = ap_cpu_clk[cluster_index].clk_name;
0323 init.ops = &ap_cpu_clk_ops;
0324 init.num_parents = 1;
0325 init.parent_names = &parent_name;
0326
0327 ret = devm_clk_hw_register(dev, &ap_cpu_clk[cluster_index].hw);
0328 if (ret) {
0329 of_node_put(dn);
0330 return ret;
0331 }
0332 ap_cpu_data->hws[cluster_index] = &ap_cpu_clk[cluster_index].hw;
0333 }
0334
0335 ap_cpu_data->num = cluster_index + 1;
0336
0337 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, ap_cpu_data);
0338 if (ret)
0339 dev_err(dev, "failed to register OF clock provider\n");
0340
0341 return ret;
0342 }
0343
0344 static const struct of_device_id ap_cpu_clock_of_match[] = {
0345 {
0346 .compatible = "marvell,ap806-cpu-clock",
0347 .data = &ap806_dfs_regs,
0348 },
0349 {
0350 .compatible = "marvell,ap807-cpu-clock",
0351 .data = &ap807_dfs_regs,
0352 },
0353 { }
0354 };
0355
0356 static struct platform_driver ap_cpu_clock_driver = {
0357 .probe = ap_cpu_clock_probe,
0358 .driver = {
0359 .name = "marvell-ap-cpu-clock",
0360 .of_match_table = ap_cpu_clock_of_match,
0361 .suppress_bind_attrs = true,
0362 },
0363 };
0364 builtin_platform_driver(ap_cpu_clock_driver);