0001
0002 #include <linux/init.h>
0003 #include <linux/clocksource.h>
0004 #include <linux/clockchips.h>
0005 #include <linux/interrupt.h>
0006 #include <linux/irq.h>
0007
0008 #include <linux/clk.h>
0009 #include <linux/delay.h>
0010 #include <linux/err.h>
0011 #include <linux/ioport.h>
0012 #include <linux/io.h>
0013 #include <linux/of_address.h>
0014 #include <linux/of_irq.h>
0015 #include <linux/sched_clock.h>
0016 #include <linux/syscore_ops.h>
0017 #include <soc/at91/atmel_tcb.h>
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 static void __iomem *tcaddr;
0042 static struct
0043 {
0044 u32 cmr;
0045 u32 imr;
0046 u32 rc;
0047 bool clken;
0048 } tcb_cache[3];
0049 static u32 bmr_cache;
0050
0051 static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128 };
0052
0053 static u64 tc_get_cycles(struct clocksource *cs)
0054 {
0055 unsigned long flags;
0056 u32 lower, upper;
0057
0058 raw_local_irq_save(flags);
0059 do {
0060 upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
0061 lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
0062 } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
0063
0064 raw_local_irq_restore(flags);
0065 return (upper << 16) | lower;
0066 }
0067
0068 static u64 tc_get_cycles32(struct clocksource *cs)
0069 {
0070 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
0071 }
0072
0073 static void tc_clksrc_suspend(struct clocksource *cs)
0074 {
0075 int i;
0076
0077 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
0078 tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
0079 tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
0080 tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
0081 tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
0082 ATMEL_TC_CLKSTA);
0083 }
0084
0085 bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
0086 }
0087
0088 static void tc_clksrc_resume(struct clocksource *cs)
0089 {
0090 int i;
0091
0092 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
0093
0094 writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
0095 writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
0096 writel(0, tcaddr + ATMEL_TC_REG(i, RA));
0097 writel(0, tcaddr + ATMEL_TC_REG(i, RB));
0098
0099 writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
0100
0101 writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
0102
0103 if (tcb_cache[i].clken)
0104 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
0105 }
0106
0107
0108 writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
0109
0110 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
0111 }
0112
0113 static struct clocksource clksrc = {
0114 .rating = 200,
0115 .read = tc_get_cycles,
0116 .mask = CLOCKSOURCE_MASK(32),
0117 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
0118 .suspend = tc_clksrc_suspend,
0119 .resume = tc_clksrc_resume,
0120 };
0121
0122 static u64 notrace tc_sched_clock_read(void)
0123 {
0124 return tc_get_cycles(&clksrc);
0125 }
0126
0127 static u64 notrace tc_sched_clock_read32(void)
0128 {
0129 return tc_get_cycles32(&clksrc);
0130 }
0131
0132 static struct delay_timer tc_delay_timer;
0133
0134 static unsigned long tc_delay_timer_read(void)
0135 {
0136 return tc_get_cycles(&clksrc);
0137 }
0138
0139 static unsigned long notrace tc_delay_timer_read32(void)
0140 {
0141 return tc_get_cycles32(&clksrc);
0142 }
0143
0144 #ifdef CONFIG_GENERIC_CLOCKEVENTS
0145
0146 struct tc_clkevt_device {
0147 struct clock_event_device clkevt;
0148 struct clk *clk;
0149 u32 rate;
0150 void __iomem *regs;
0151 };
0152
0153 static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
0154 {
0155 return container_of(clkevt, struct tc_clkevt_device, clkevt);
0156 }
0157
0158 static u32 timer_clock;
0159
0160 static int tc_shutdown(struct clock_event_device *d)
0161 {
0162 struct tc_clkevt_device *tcd = to_tc_clkevt(d);
0163 void __iomem *regs = tcd->regs;
0164
0165 writel(0xff, regs + ATMEL_TC_REG(2, IDR));
0166 writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
0167 if (!clockevent_state_detached(d))
0168 clk_disable(tcd->clk);
0169
0170 return 0;
0171 }
0172
0173 static int tc_set_oneshot(struct clock_event_device *d)
0174 {
0175 struct tc_clkevt_device *tcd = to_tc_clkevt(d);
0176 void __iomem *regs = tcd->regs;
0177
0178 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
0179 tc_shutdown(d);
0180
0181 clk_enable(tcd->clk);
0182
0183
0184 writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
0185 ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
0186 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
0187
0188
0189 return 0;
0190 }
0191
0192 static int tc_set_periodic(struct clock_event_device *d)
0193 {
0194 struct tc_clkevt_device *tcd = to_tc_clkevt(d);
0195 void __iomem *regs = tcd->regs;
0196
0197 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
0198 tc_shutdown(d);
0199
0200
0201
0202
0203 clk_enable(tcd->clk);
0204
0205
0206 writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
0207 regs + ATMEL_TC_REG(2, CMR));
0208 writel((tcd->rate + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
0209
0210
0211 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
0212
0213
0214 writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
0215 ATMEL_TC_REG(2, CCR));
0216 return 0;
0217 }
0218
0219 static int tc_next_event(unsigned long delta, struct clock_event_device *d)
0220 {
0221 writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
0222
0223
0224 writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
0225 tcaddr + ATMEL_TC_REG(2, CCR));
0226 return 0;
0227 }
0228
0229 static struct tc_clkevt_device clkevt = {
0230 .clkevt = {
0231 .features = CLOCK_EVT_FEAT_PERIODIC |
0232 CLOCK_EVT_FEAT_ONESHOT,
0233
0234 .rating = 125,
0235 .set_next_event = tc_next_event,
0236 .set_state_shutdown = tc_shutdown,
0237 .set_state_periodic = tc_set_periodic,
0238 .set_state_oneshot = tc_set_oneshot,
0239 },
0240 };
0241
0242 static irqreturn_t ch2_irq(int irq, void *handle)
0243 {
0244 struct tc_clkevt_device *dev = handle;
0245 unsigned int sr;
0246
0247 sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
0248 if (sr & ATMEL_TC_CPCS) {
0249 dev->clkevt.event_handler(&dev->clkevt);
0250 return IRQ_HANDLED;
0251 }
0252
0253 return IRQ_NONE;
0254 }
0255
0256 static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
0257 {
0258 int ret;
0259 struct clk *t2_clk = tc->clk[2];
0260 int irq = tc->irq[2];
0261 int bits = tc->tcb_config->counter_width;
0262
0263
0264 ret = clk_prepare_enable(t2_clk);
0265 if (ret)
0266 return ret;
0267
0268 clkevt.regs = tc->regs;
0269 clkevt.clk = t2_clk;
0270
0271 if (bits == 32) {
0272 timer_clock = divisor_idx;
0273 clkevt.rate = clk_get_rate(t2_clk) / atmel_tcb_divisors[divisor_idx];
0274 } else {
0275 ret = clk_prepare_enable(tc->slow_clk);
0276 if (ret) {
0277 clk_disable_unprepare(t2_clk);
0278 return ret;
0279 }
0280
0281 clkevt.rate = clk_get_rate(tc->slow_clk);
0282 timer_clock = ATMEL_TC_TIMER_CLOCK5;
0283 }
0284
0285 clk_disable(t2_clk);
0286
0287 clkevt.clkevt.cpumask = cpumask_of(0);
0288
0289 ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
0290 if (ret) {
0291 clk_unprepare(t2_clk);
0292 if (bits != 32)
0293 clk_disable_unprepare(tc->slow_clk);
0294 return ret;
0295 }
0296
0297 clockevents_config_and_register(&clkevt.clkevt, clkevt.rate, 1, BIT(bits) - 1);
0298
0299 return ret;
0300 }
0301
0302 #else
0303
0304 static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
0305 {
0306
0307 return 0;
0308 }
0309
0310 #endif
0311
0312 static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
0313 {
0314
0315 writel(mck_divisor_idx
0316 | ATMEL_TC_WAVE
0317 | ATMEL_TC_WAVESEL_UP
0318 | ATMEL_TC_ACPA_SET
0319 | ATMEL_TC_ACPC_CLEAR,
0320 tcaddr + ATMEL_TC_REG(0, CMR));
0321 writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
0322 writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
0323 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));
0324 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
0325
0326
0327 writel(ATMEL_TC_XC1
0328 | ATMEL_TC_WAVE
0329 | ATMEL_TC_WAVESEL_UP,
0330 tcaddr + ATMEL_TC_REG(1, CMR));
0331 writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));
0332 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
0333
0334
0335 writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
0336
0337 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
0338 }
0339
0340 static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
0341 {
0342
0343 writel(mck_divisor_idx
0344 | ATMEL_TC_WAVE
0345 | ATMEL_TC_WAVESEL_UP,
0346 tcaddr + ATMEL_TC_REG(0, CMR));
0347 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));
0348 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
0349
0350
0351 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
0352 }
0353
0354 static struct atmel_tcb_config tcb_rm9200_config = {
0355 .counter_width = 16,
0356 };
0357
0358 static struct atmel_tcb_config tcb_sam9x5_config = {
0359 .counter_width = 32,
0360 };
0361
0362 static struct atmel_tcb_config tcb_sama5d2_config = {
0363 .counter_width = 32,
0364 .has_gclk = 1,
0365 };
0366
0367 static const struct of_device_id atmel_tcb_of_match[] = {
0368 { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
0369 { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
0370 { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
0371 { }
0372 };
0373
0374 static int __init tcb_clksrc_init(struct device_node *node)
0375 {
0376 struct atmel_tc tc;
0377 struct clk *t0_clk;
0378 const struct of_device_id *match;
0379 u64 (*tc_sched_clock)(void);
0380 u32 rate, divided_rate = 0;
0381 int best_divisor_idx = -1;
0382 int bits;
0383 int i;
0384 int ret;
0385
0386
0387 if (tcaddr)
0388 return 0;
0389
0390 tc.regs = of_iomap(node->parent, 0);
0391 if (!tc.regs)
0392 return -ENXIO;
0393
0394 t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
0395 if (IS_ERR(t0_clk))
0396 return PTR_ERR(t0_clk);
0397
0398 tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
0399 if (IS_ERR(tc.slow_clk))
0400 return PTR_ERR(tc.slow_clk);
0401
0402 tc.clk[0] = t0_clk;
0403 tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
0404 if (IS_ERR(tc.clk[1]))
0405 tc.clk[1] = t0_clk;
0406 tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
0407 if (IS_ERR(tc.clk[2]))
0408 tc.clk[2] = t0_clk;
0409
0410 tc.irq[2] = of_irq_get(node->parent, 2);
0411 if (tc.irq[2] <= 0) {
0412 tc.irq[2] = of_irq_get(node->parent, 0);
0413 if (tc.irq[2] <= 0)
0414 return -EINVAL;
0415 }
0416
0417 match = of_match_node(atmel_tcb_of_match, node->parent);
0418 if (!match)
0419 return -ENODEV;
0420
0421 tc.tcb_config = match->data;
0422 bits = tc.tcb_config->counter_width;
0423
0424 for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
0425 writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
0426
0427 ret = clk_prepare_enable(t0_clk);
0428 if (ret) {
0429 pr_debug("can't enable T0 clk\n");
0430 return ret;
0431 }
0432
0433
0434 rate = (u32) clk_get_rate(t0_clk);
0435 i = 0;
0436 if (tc.tcb_config->has_gclk)
0437 i = 1;
0438 for (; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
0439 unsigned divisor = atmel_tcb_divisors[i];
0440 unsigned tmp;
0441
0442 tmp = rate / divisor;
0443 pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
0444 if ((best_divisor_idx >= 0) && (tmp < 5 * 1000 * 1000))
0445 break;
0446 divided_rate = tmp;
0447 best_divisor_idx = i;
0448 }
0449
0450 clksrc.name = kbasename(node->parent->full_name);
0451 clkevt.clkevt.name = kbasename(node->parent->full_name);
0452 pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
0453 ((divided_rate % 1000000) + 500) / 1000);
0454
0455 tcaddr = tc.regs;
0456
0457 if (bits == 32) {
0458
0459 clksrc.read = tc_get_cycles32;
0460
0461 tcb_setup_single_chan(&tc, best_divisor_idx);
0462 tc_sched_clock = tc_sched_clock_read32;
0463 tc_delay_timer.read_current_timer = tc_delay_timer_read32;
0464 } else {
0465
0466
0467
0468 ret = clk_prepare_enable(tc.clk[1]);
0469 if (ret) {
0470 pr_debug("can't enable T1 clk\n");
0471 goto err_disable_t0;
0472 }
0473
0474 tcb_setup_dual_chan(&tc, best_divisor_idx);
0475 tc_sched_clock = tc_sched_clock_read;
0476 tc_delay_timer.read_current_timer = tc_delay_timer_read;
0477 }
0478
0479
0480 ret = clocksource_register_hz(&clksrc, divided_rate);
0481 if (ret)
0482 goto err_disable_t1;
0483
0484
0485 ret = setup_clkevents(&tc, best_divisor_idx);
0486 if (ret)
0487 goto err_unregister_clksrc;
0488
0489 sched_clock_register(tc_sched_clock, 32, divided_rate);
0490
0491 tc_delay_timer.freq = divided_rate;
0492 register_current_timer_delay(&tc_delay_timer);
0493
0494 return 0;
0495
0496 err_unregister_clksrc:
0497 clocksource_unregister(&clksrc);
0498
0499 err_disable_t1:
0500 if (bits != 32)
0501 clk_disable_unprepare(tc.clk[1]);
0502
0503 err_disable_t0:
0504 clk_disable_unprepare(t0_clk);
0505
0506 tcaddr = NULL;
0507
0508 return ret;
0509 }
0510 TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);