0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/clk.h>
0012 #include <linux/compiler.h>
0013 #include <linux/slab.h>
0014 #include <linux/io.h>
0015 #include <linux/sh_clk.h>
0016
0017 #define CPG_CKSTP_BIT BIT(8)
0018
0019 static unsigned int sh_clk_read(struct clk *clk)
0020 {
0021 if (clk->flags & CLK_ENABLE_REG_8BIT)
0022 return ioread8(clk->mapped_reg);
0023 else if (clk->flags & CLK_ENABLE_REG_16BIT)
0024 return ioread16(clk->mapped_reg);
0025
0026 return ioread32(clk->mapped_reg);
0027 }
0028
0029 static void sh_clk_write(int value, struct clk *clk)
0030 {
0031 if (clk->flags & CLK_ENABLE_REG_8BIT)
0032 iowrite8(value, clk->mapped_reg);
0033 else if (clk->flags & CLK_ENABLE_REG_16BIT)
0034 iowrite16(value, clk->mapped_reg);
0035 else
0036 iowrite32(value, clk->mapped_reg);
0037 }
0038
0039 static int sh_clk_mstp_enable(struct clk *clk)
0040 {
0041 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
0042 if (clk->status_reg) {
0043 unsigned int (*read)(const void __iomem *addr);
0044 int i;
0045 void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
0046 (phys_addr_t)clk->enable_reg + clk->mapped_reg;
0047
0048 if (clk->flags & CLK_ENABLE_REG_8BIT)
0049 read = ioread8;
0050 else if (clk->flags & CLK_ENABLE_REG_16BIT)
0051 read = ioread16;
0052 else
0053 read = ioread32;
0054
0055 for (i = 1000;
0056 (read(mapped_status) & (1 << clk->enable_bit)) && i;
0057 i--)
0058 cpu_relax();
0059 if (!i) {
0060 pr_err("cpg: failed to enable %p[%d]\n",
0061 clk->enable_reg, clk->enable_bit);
0062 return -ETIMEDOUT;
0063 }
0064 }
0065 return 0;
0066 }
0067
0068 static void sh_clk_mstp_disable(struct clk *clk)
0069 {
0070 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
0071 }
0072
0073 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
0074 .enable = sh_clk_mstp_enable,
0075 .disable = sh_clk_mstp_disable,
0076 .recalc = followparent_recalc,
0077 };
0078
0079 int __init sh_clk_mstp_register(struct clk *clks, int nr)
0080 {
0081 struct clk *clkp;
0082 int ret = 0;
0083 int k;
0084
0085 for (k = 0; !ret && (k < nr); k++) {
0086 clkp = clks + k;
0087 clkp->ops = &sh_clk_mstp_clk_ops;
0088 ret |= clk_register(clkp);
0089 }
0090
0091 return ret;
0092 }
0093
0094
0095
0096
0097 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
0098 {
0099 return clk->priv;
0100 }
0101
0102 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
0103 {
0104 return clk_to_div_table(clk)->div_mult_table;
0105 }
0106
0107
0108
0109
0110 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
0111 {
0112 return clk_rate_table_round(clk, clk->freq_table, rate);
0113 }
0114
0115 static unsigned long sh_clk_div_recalc(struct clk *clk)
0116 {
0117 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
0118 unsigned int idx;
0119
0120 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
0121 table, clk->arch_flags ? &clk->arch_flags : NULL);
0122
0123 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
0124
0125 return clk->freq_table[idx].frequency;
0126 }
0127
0128 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
0129 {
0130 struct clk_div_table *dt = clk_to_div_table(clk);
0131 unsigned long value;
0132 int idx;
0133
0134 idx = clk_rate_table_find(clk, clk->freq_table, rate);
0135 if (idx < 0)
0136 return idx;
0137
0138 value = sh_clk_read(clk);
0139 value &= ~(clk->div_mask << clk->enable_bit);
0140 value |= (idx << clk->enable_bit);
0141 sh_clk_write(value, clk);
0142
0143
0144 if (dt->kick)
0145 dt->kick(clk);
0146
0147 return 0;
0148 }
0149
0150 static int sh_clk_div_enable(struct clk *clk)
0151 {
0152 if (clk->div_mask == SH_CLK_DIV6_MSK) {
0153 int ret = sh_clk_div_set_rate(clk, clk->rate);
0154 if (ret < 0)
0155 return ret;
0156 }
0157
0158 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
0159 return 0;
0160 }
0161
0162 static void sh_clk_div_disable(struct clk *clk)
0163 {
0164 unsigned int val;
0165
0166 val = sh_clk_read(clk);
0167 val |= CPG_CKSTP_BIT;
0168
0169
0170
0171
0172
0173
0174 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
0175 val |= clk->div_mask;
0176
0177 sh_clk_write(val, clk);
0178 }
0179
0180 static struct sh_clk_ops sh_clk_div_clk_ops = {
0181 .recalc = sh_clk_div_recalc,
0182 .set_rate = sh_clk_div_set_rate,
0183 .round_rate = sh_clk_div_round_rate,
0184 };
0185
0186 static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
0187 .recalc = sh_clk_div_recalc,
0188 .set_rate = sh_clk_div_set_rate,
0189 .round_rate = sh_clk_div_round_rate,
0190 .enable = sh_clk_div_enable,
0191 .disable = sh_clk_div_disable,
0192 };
0193
0194 static int __init sh_clk_init_parent(struct clk *clk)
0195 {
0196 u32 val;
0197
0198 if (clk->parent)
0199 return 0;
0200
0201 if (!clk->parent_table || !clk->parent_num)
0202 return 0;
0203
0204 if (!clk->src_width) {
0205 pr_err("sh_clk_init_parent: cannot select parent clock\n");
0206 return -EINVAL;
0207 }
0208
0209 val = (sh_clk_read(clk) >> clk->src_shift);
0210 val &= (1 << clk->src_width) - 1;
0211
0212 if (val >= clk->parent_num) {
0213 pr_err("sh_clk_init_parent: parent table size failed\n");
0214 return -EINVAL;
0215 }
0216
0217 clk_reparent(clk, clk->parent_table[val]);
0218 if (!clk->parent) {
0219 pr_err("sh_clk_init_parent: unable to set parent");
0220 return -EINVAL;
0221 }
0222
0223 return 0;
0224 }
0225
0226 static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
0227 struct clk_div_table *table, struct sh_clk_ops *ops)
0228 {
0229 struct clk *clkp;
0230 void *freq_table;
0231 int nr_divs = table->div_mult_table->nr_divisors;
0232 int freq_table_size = sizeof(struct cpufreq_frequency_table);
0233 int ret = 0;
0234 int k;
0235
0236 freq_table_size *= (nr_divs + 1);
0237 freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
0238 if (!freq_table) {
0239 pr_err("%s: unable to alloc memory\n", __func__);
0240 return -ENOMEM;
0241 }
0242
0243 for (k = 0; !ret && (k < nr); k++) {
0244 clkp = clks + k;
0245
0246 clkp->ops = ops;
0247 clkp->priv = table;
0248
0249 clkp->freq_table = freq_table + (k * freq_table_size);
0250 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
0251
0252 ret = clk_register(clkp);
0253 if (ret == 0)
0254 ret = sh_clk_init_parent(clkp);
0255 }
0256
0257 return ret;
0258 }
0259
0260
0261
0262
0263 static int sh_clk_div6_divisors[64] = {
0264 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
0265 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
0266 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
0267 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
0268 };
0269
0270 static struct clk_div_mult_table div6_div_mult_table = {
0271 .divisors = sh_clk_div6_divisors,
0272 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
0273 };
0274
0275 static struct clk_div_table sh_clk_div6_table = {
0276 .div_mult_table = &div6_div_mult_table,
0277 };
0278
0279 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
0280 {
0281 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
0282 u32 value;
0283 int ret, i;
0284
0285 if (!clk->parent_table || !clk->parent_num)
0286 return -EINVAL;
0287
0288
0289 for (i = 0; i < clk->parent_num; i++)
0290 if (clk->parent_table[i] == parent)
0291 break;
0292
0293 if (i == clk->parent_num)
0294 return -ENODEV;
0295
0296 ret = clk_reparent(clk, parent);
0297 if (ret < 0)
0298 return ret;
0299
0300 value = sh_clk_read(clk) &
0301 ~(((1 << clk->src_width) - 1) << clk->src_shift);
0302
0303 sh_clk_write(value | (i << clk->src_shift), clk);
0304
0305
0306 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
0307 table, NULL);
0308
0309 return 0;
0310 }
0311
0312 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
0313 .recalc = sh_clk_div_recalc,
0314 .round_rate = sh_clk_div_round_rate,
0315 .set_rate = sh_clk_div_set_rate,
0316 .enable = sh_clk_div_enable,
0317 .disable = sh_clk_div_disable,
0318 .set_parent = sh_clk_div6_set_parent,
0319 };
0320
0321 int __init sh_clk_div6_register(struct clk *clks, int nr)
0322 {
0323 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
0324 &sh_clk_div_enable_clk_ops);
0325 }
0326
0327 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
0328 {
0329 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
0330 &sh_clk_div6_reparent_clk_ops);
0331 }
0332
0333
0334
0335
0336 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
0337 {
0338 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
0339 u32 value;
0340 int ret;
0341
0342
0343
0344
0345
0346
0347 if (parent->flags & CLK_ENABLE_ON_INIT)
0348 value = sh_clk_read(clk) & ~(1 << 7);
0349 else
0350 value = sh_clk_read(clk) | (1 << 7);
0351
0352 ret = clk_reparent(clk, parent);
0353 if (ret < 0)
0354 return ret;
0355
0356 sh_clk_write(value, clk);
0357
0358
0359 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
0360 table, &clk->arch_flags);
0361
0362 return 0;
0363 }
0364
0365 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
0366 .recalc = sh_clk_div_recalc,
0367 .set_rate = sh_clk_div_set_rate,
0368 .round_rate = sh_clk_div_round_rate,
0369 .enable = sh_clk_div_enable,
0370 .disable = sh_clk_div_disable,
0371 .set_parent = sh_clk_div4_set_parent,
0372 };
0373
0374 int __init sh_clk_div4_register(struct clk *clks, int nr,
0375 struct clk_div4_table *table)
0376 {
0377 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
0378 }
0379
0380 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
0381 struct clk_div4_table *table)
0382 {
0383 return sh_clk_div_register_ops(clks, nr, table,
0384 &sh_clk_div_enable_clk_ops);
0385 }
0386
0387 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
0388 struct clk_div4_table *table)
0389 {
0390 return sh_clk_div_register_ops(clks, nr, table,
0391 &sh_clk_div4_reparent_clk_ops);
0392 }
0393
0394
0395 static unsigned long fsidiv_recalc(struct clk *clk)
0396 {
0397 u32 value;
0398
0399 value = __raw_readl(clk->mapping->base);
0400
0401 value >>= 16;
0402 if (value < 2)
0403 return clk->parent->rate;
0404
0405 return clk->parent->rate / value;
0406 }
0407
0408 static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
0409 {
0410 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
0411 }
0412
0413 static void fsidiv_disable(struct clk *clk)
0414 {
0415 __raw_writel(0, clk->mapping->base);
0416 }
0417
0418 static int fsidiv_enable(struct clk *clk)
0419 {
0420 u32 value;
0421
0422 value = __raw_readl(clk->mapping->base) >> 16;
0423 if (value < 2)
0424 return 0;
0425
0426 __raw_writel((value << 16) | 0x3, clk->mapping->base);
0427
0428 return 0;
0429 }
0430
0431 static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
0432 {
0433 int idx;
0434
0435 idx = (clk->parent->rate / rate) & 0xffff;
0436 if (idx < 2)
0437 __raw_writel(0, clk->mapping->base);
0438 else
0439 __raw_writel(idx << 16, clk->mapping->base);
0440
0441 return 0;
0442 }
0443
0444 static struct sh_clk_ops fsidiv_clk_ops = {
0445 .recalc = fsidiv_recalc,
0446 .round_rate = fsidiv_round_rate,
0447 .set_rate = fsidiv_set_rate,
0448 .enable = fsidiv_enable,
0449 .disable = fsidiv_disable,
0450 };
0451
0452 int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
0453 {
0454 struct clk_mapping *map;
0455 int i;
0456
0457 for (i = 0; i < nr; i++) {
0458
0459 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
0460 if (!map) {
0461 pr_err("%s: unable to alloc memory\n", __func__);
0462 return -ENOMEM;
0463 }
0464
0465
0466 map->phys = (phys_addr_t)clks[i].enable_reg;
0467 map->len = 8;
0468
0469 clks[i].enable_reg = 0;
0470 clks[i].ops = &fsidiv_clk_ops;
0471 clks[i].mapping = map;
0472
0473 clk_register(&clks[i]);
0474 }
0475
0476 return 0;
0477 }