0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/kernel.h>
0011 #include <linux/slab.h>
0012 #include <linux/clk.h>
0013 #include <linux/clk-provider.h>
0014 #include <linux/of_address.h>
0015 #include <linux/io.h>
0016 #include <linux/of.h>
0017 #include <linux/delay.h>
0018 #include <linux/mvebu-pmsu.h>
0019 #include <asm/smp_plat.h>
0020
0021 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
0022 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
0023 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
0024 #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
0025 #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
0026 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
0027 #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
0028
0029 #define PMU_DFS_RATIO_SHIFT 16
0030 #define PMU_DFS_RATIO_MASK 0x3F
0031
0032 #define MAX_CPU 4
0033 struct cpu_clk {
0034 struct clk_hw hw;
0035 int cpu;
0036 const char *clk_name;
0037 const char *parent_name;
0038 void __iomem *reg_base;
0039 void __iomem *pmu_dfs;
0040 };
0041
0042 static struct clk **clks;
0043
0044 static struct clk_onecell_data clk_data;
0045
0046 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
0047
0048 static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
0049 unsigned long parent_rate)
0050 {
0051 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
0052 u32 reg, div;
0053
0054 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
0055 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
0056 return parent_rate / div;
0057 }
0058
0059 static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
0060 unsigned long *parent_rate)
0061 {
0062
0063 u32 div;
0064
0065 div = *parent_rate / rate;
0066 if (div == 0)
0067 div = 1;
0068 else if (div > 3)
0069 div = 3;
0070
0071 return *parent_rate / div;
0072 }
0073
0074 static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
0075 unsigned long parent_rate)
0076
0077 {
0078 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
0079 u32 reg, div;
0080 u32 reload_mask;
0081
0082 div = parent_rate / rate;
0083 reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
0084 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
0085 | (div << (cpuclk->cpu * 8));
0086 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
0087
0088 reload_mask = 1 << (20 + cpuclk->cpu);
0089
0090 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
0091 | reload_mask;
0092 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
0093
0094
0095 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
0096 | 1 << 24;
0097 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
0098
0099
0100 udelay(1000);
0101 reg &= ~(reload_mask | 1 << 24);
0102 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
0103 udelay(1000);
0104
0105 return 0;
0106 }
0107
0108 static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
0109 unsigned long parent_rate)
0110 {
0111 u32 reg;
0112 unsigned long fabric_div, target_div, cur_rate;
0113 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
0114
0115
0116
0117
0118
0119 if (!cpuclk->pmu_dfs)
0120 return -ENODEV;
0121
0122 cur_rate = clk_hw_get_rate(hwclk);
0123
0124 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
0125 fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
0126 SYS_CTRL_CLK_DIVIDER_MASK;
0127
0128
0129 if (rate == 2 * cur_rate)
0130 target_div = fabric_div / 2;
0131
0132 else
0133 target_div = fabric_div;
0134
0135 if (target_div == 0)
0136 target_div = 1;
0137
0138 reg = readl(cpuclk->pmu_dfs);
0139 reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
0140 reg |= (target_div << PMU_DFS_RATIO_SHIFT);
0141 writel(reg, cpuclk->pmu_dfs);
0142
0143 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
0144 reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
0145 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
0146 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
0147
0148 return mvebu_pmsu_dfs_request(cpuclk->cpu);
0149 }
0150
0151 static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
0152 unsigned long parent_rate)
0153 {
0154 if (__clk_is_enabled(hwclk->clk))
0155 return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
0156 else
0157 return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
0158 }
0159
0160 static const struct clk_ops cpu_ops = {
0161 .recalc_rate = clk_cpu_recalc_rate,
0162 .round_rate = clk_cpu_round_rate,
0163 .set_rate = clk_cpu_set_rate,
0164 };
0165
0166 static void __init of_cpu_clk_setup(struct device_node *node)
0167 {
0168 struct cpu_clk *cpuclk;
0169 void __iomem *clock_complex_base = of_iomap(node, 0);
0170 void __iomem *pmu_dfs_base = of_iomap(node, 1);
0171 int ncpus = 0;
0172 struct device_node *dn;
0173
0174 if (clock_complex_base == NULL) {
0175 pr_err("%s: clock-complex base register not set\n",
0176 __func__);
0177 return;
0178 }
0179
0180 if (pmu_dfs_base == NULL)
0181 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
0182 __func__);
0183
0184 for_each_of_cpu_node(dn)
0185 ncpus++;
0186
0187 cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
0188 if (WARN_ON(!cpuclk))
0189 goto cpuclk_out;
0190
0191 clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
0192 if (WARN_ON(!clks))
0193 goto clks_out;
0194
0195 for_each_of_cpu_node(dn) {
0196 struct clk_init_data init;
0197 struct clk *clk;
0198 char *clk_name = kzalloc(5, GFP_KERNEL);
0199 int cpu, err;
0200
0201 if (WARN_ON(!clk_name))
0202 goto bail_out;
0203
0204 err = of_property_read_u32(dn, "reg", &cpu);
0205 if (WARN_ON(err))
0206 goto bail_out;
0207
0208 sprintf(clk_name, "cpu%d", cpu);
0209
0210 cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
0211 cpuclk[cpu].clk_name = clk_name;
0212 cpuclk[cpu].cpu = cpu;
0213 cpuclk[cpu].reg_base = clock_complex_base;
0214 if (pmu_dfs_base)
0215 cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
0216 cpuclk[cpu].hw.init = &init;
0217
0218 init.name = cpuclk[cpu].clk_name;
0219 init.ops = &cpu_ops;
0220 init.flags = 0;
0221 init.parent_names = &cpuclk[cpu].parent_name;
0222 init.num_parents = 1;
0223
0224 clk = clk_register(NULL, &cpuclk[cpu].hw);
0225 if (WARN_ON(IS_ERR(clk)))
0226 goto bail_out;
0227 clks[cpu] = clk;
0228 }
0229 clk_data.clk_num = MAX_CPU;
0230 clk_data.clks = clks;
0231 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
0232
0233 return;
0234 bail_out:
0235 kfree(clks);
0236 while(ncpus--)
0237 kfree(cpuclk[ncpus].clk_name);
0238 clks_out:
0239 kfree(cpuclk);
0240 cpuclk_out:
0241 iounmap(clock_complex_base);
0242 }
0243
0244 CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
0245 of_cpu_clk_setup);
0246
0247 static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
0248 {
0249 of_clk_add_provider(node, of_clk_src_simple_get, NULL);
0250 }
0251
0252 CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock",
0253 of_mv98dx3236_cpu_clk_setup);