0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clk.h>
0009 #include <linux/clockchips.h>
0010 #include <linux/clocksource.h>
0011 #include <linux/delay.h>
0012 #include <linux/err.h>
0013 #include <linux/init.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/io.h>
0016 #include <linux/ioport.h>
0017 #include <linux/irq.h>
0018 #include <linux/module.h>
0019 #include <linux/of.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/pm_domain.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/sh_timer.h>
0024 #include <linux/slab.h>
0025 #include <linux/spinlock.h>
0026
0027 #ifdef CONFIG_SUPERH
0028 #include <asm/platform_early.h>
0029 #endif
0030
0031 enum sh_tmu_model {
0032 SH_TMU,
0033 SH_TMU_SH3,
0034 };
0035
0036 struct sh_tmu_device;
0037
0038 struct sh_tmu_channel {
0039 struct sh_tmu_device *tmu;
0040 unsigned int index;
0041
0042 void __iomem *base;
0043 int irq;
0044
0045 unsigned long periodic;
0046 struct clock_event_device ced;
0047 struct clocksource cs;
0048 bool cs_enabled;
0049 unsigned int enable_count;
0050 };
0051
0052 struct sh_tmu_device {
0053 struct platform_device *pdev;
0054
0055 void __iomem *mapbase;
0056 struct clk *clk;
0057 unsigned long rate;
0058
0059 enum sh_tmu_model model;
0060
0061 raw_spinlock_t lock;
0062
0063 struct sh_tmu_channel *channels;
0064 unsigned int num_channels;
0065
0066 bool has_clockevent;
0067 bool has_clocksource;
0068 };
0069
0070 #define TSTR -1
0071 #define TCOR 0
0072 #define TCNT 1
0073 #define TCR 2
0074
0075 #define TCR_UNF (1 << 8)
0076 #define TCR_UNIE (1 << 5)
0077 #define TCR_TPSC_CLK4 (0 << 0)
0078 #define TCR_TPSC_CLK16 (1 << 0)
0079 #define TCR_TPSC_CLK64 (2 << 0)
0080 #define TCR_TPSC_CLK256 (3 << 0)
0081 #define TCR_TPSC_CLK1024 (4 << 0)
0082 #define TCR_TPSC_MASK (7 << 0)
0083
0084 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
0085 {
0086 unsigned long offs;
0087
0088 if (reg_nr == TSTR) {
0089 switch (ch->tmu->model) {
0090 case SH_TMU_SH3:
0091 return ioread8(ch->tmu->mapbase + 2);
0092 case SH_TMU:
0093 return ioread8(ch->tmu->mapbase + 4);
0094 }
0095 }
0096
0097 offs = reg_nr << 2;
0098
0099 if (reg_nr == TCR)
0100 return ioread16(ch->base + offs);
0101 else
0102 return ioread32(ch->base + offs);
0103 }
0104
0105 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
0106 unsigned long value)
0107 {
0108 unsigned long offs;
0109
0110 if (reg_nr == TSTR) {
0111 switch (ch->tmu->model) {
0112 case SH_TMU_SH3:
0113 return iowrite8(value, ch->tmu->mapbase + 2);
0114 case SH_TMU:
0115 return iowrite8(value, ch->tmu->mapbase + 4);
0116 }
0117 }
0118
0119 offs = reg_nr << 2;
0120
0121 if (reg_nr == TCR)
0122 iowrite16(value, ch->base + offs);
0123 else
0124 iowrite32(value, ch->base + offs);
0125 }
0126
0127 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
0128 {
0129 unsigned long flags, value;
0130
0131
0132 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
0133 value = sh_tmu_read(ch, TSTR);
0134
0135 if (start)
0136 value |= 1 << ch->index;
0137 else
0138 value &= ~(1 << ch->index);
0139
0140 sh_tmu_write(ch, TSTR, value);
0141 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
0142 }
0143
0144 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
0145 {
0146 int ret;
0147
0148
0149 ret = clk_enable(ch->tmu->clk);
0150 if (ret) {
0151 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
0152 ch->index);
0153 return ret;
0154 }
0155
0156
0157 sh_tmu_start_stop_ch(ch, 0);
0158
0159
0160 sh_tmu_write(ch, TCOR, 0xffffffff);
0161 sh_tmu_write(ch, TCNT, 0xffffffff);
0162
0163
0164 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
0165
0166
0167 sh_tmu_start_stop_ch(ch, 1);
0168
0169 return 0;
0170 }
0171
0172 static int sh_tmu_enable(struct sh_tmu_channel *ch)
0173 {
0174 if (ch->enable_count++ > 0)
0175 return 0;
0176
0177 pm_runtime_get_sync(&ch->tmu->pdev->dev);
0178 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
0179
0180 return __sh_tmu_enable(ch);
0181 }
0182
0183 static void __sh_tmu_disable(struct sh_tmu_channel *ch)
0184 {
0185
0186 sh_tmu_start_stop_ch(ch, 0);
0187
0188
0189 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
0190
0191
0192 clk_disable(ch->tmu->clk);
0193 }
0194
0195 static void sh_tmu_disable(struct sh_tmu_channel *ch)
0196 {
0197 if (WARN_ON(ch->enable_count == 0))
0198 return;
0199
0200 if (--ch->enable_count > 0)
0201 return;
0202
0203 __sh_tmu_disable(ch);
0204
0205 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
0206 pm_runtime_put(&ch->tmu->pdev->dev);
0207 }
0208
0209 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
0210 int periodic)
0211 {
0212
0213 sh_tmu_start_stop_ch(ch, 0);
0214
0215
0216 sh_tmu_read(ch, TCR);
0217
0218
0219 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
0220
0221
0222 if (periodic)
0223 sh_tmu_write(ch, TCOR, delta);
0224 else
0225 sh_tmu_write(ch, TCOR, 0xffffffff);
0226
0227 sh_tmu_write(ch, TCNT, delta);
0228
0229
0230 sh_tmu_start_stop_ch(ch, 1);
0231 }
0232
0233 static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
0234 {
0235 struct sh_tmu_channel *ch = dev_id;
0236
0237
0238 if (clockevent_state_oneshot(&ch->ced))
0239 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
0240 else
0241 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
0242
0243
0244 ch->ced.event_handler(&ch->ced);
0245 return IRQ_HANDLED;
0246 }
0247
0248 static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
0249 {
0250 return container_of(cs, struct sh_tmu_channel, cs);
0251 }
0252
0253 static u64 sh_tmu_clocksource_read(struct clocksource *cs)
0254 {
0255 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
0256
0257 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
0258 }
0259
0260 static int sh_tmu_clocksource_enable(struct clocksource *cs)
0261 {
0262 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
0263 int ret;
0264
0265 if (WARN_ON(ch->cs_enabled))
0266 return 0;
0267
0268 ret = sh_tmu_enable(ch);
0269 if (!ret)
0270 ch->cs_enabled = true;
0271
0272 return ret;
0273 }
0274
0275 static void sh_tmu_clocksource_disable(struct clocksource *cs)
0276 {
0277 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
0278
0279 if (WARN_ON(!ch->cs_enabled))
0280 return;
0281
0282 sh_tmu_disable(ch);
0283 ch->cs_enabled = false;
0284 }
0285
0286 static void sh_tmu_clocksource_suspend(struct clocksource *cs)
0287 {
0288 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
0289
0290 if (!ch->cs_enabled)
0291 return;
0292
0293 if (--ch->enable_count == 0) {
0294 __sh_tmu_disable(ch);
0295 dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
0296 }
0297 }
0298
0299 static void sh_tmu_clocksource_resume(struct clocksource *cs)
0300 {
0301 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
0302
0303 if (!ch->cs_enabled)
0304 return;
0305
0306 if (ch->enable_count++ == 0) {
0307 dev_pm_genpd_resume(&ch->tmu->pdev->dev);
0308 __sh_tmu_enable(ch);
0309 }
0310 }
0311
0312 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
0313 const char *name)
0314 {
0315 struct clocksource *cs = &ch->cs;
0316
0317 cs->name = name;
0318 cs->rating = 200;
0319 cs->read = sh_tmu_clocksource_read;
0320 cs->enable = sh_tmu_clocksource_enable;
0321 cs->disable = sh_tmu_clocksource_disable;
0322 cs->suspend = sh_tmu_clocksource_suspend;
0323 cs->resume = sh_tmu_clocksource_resume;
0324 cs->mask = CLOCKSOURCE_MASK(32);
0325 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
0326
0327 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
0328 ch->index);
0329
0330 clocksource_register_hz(cs, ch->tmu->rate);
0331 return 0;
0332 }
0333
0334 static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
0335 {
0336 return container_of(ced, struct sh_tmu_channel, ced);
0337 }
0338
0339 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
0340 {
0341 sh_tmu_enable(ch);
0342
0343 if (periodic) {
0344 ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
0345 sh_tmu_set_next(ch, ch->periodic, 1);
0346 }
0347 }
0348
0349 static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
0350 {
0351 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
0352
0353 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
0354 sh_tmu_disable(ch);
0355 return 0;
0356 }
0357
0358 static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
0359 int periodic)
0360 {
0361 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
0362
0363
0364 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
0365 sh_tmu_disable(ch);
0366
0367 dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
0368 ch->index, periodic ? "periodic" : "oneshot");
0369 sh_tmu_clock_event_start(ch, periodic);
0370 return 0;
0371 }
0372
0373 static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
0374 {
0375 return sh_tmu_clock_event_set_state(ced, 0);
0376 }
0377
0378 static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
0379 {
0380 return sh_tmu_clock_event_set_state(ced, 1);
0381 }
0382
0383 static int sh_tmu_clock_event_next(unsigned long delta,
0384 struct clock_event_device *ced)
0385 {
0386 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
0387
0388 BUG_ON(!clockevent_state_oneshot(ced));
0389
0390
0391 sh_tmu_set_next(ch, delta, 0);
0392 return 0;
0393 }
0394
0395 static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
0396 {
0397 dev_pm_genpd_suspend(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
0398 }
0399
0400 static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
0401 {
0402 dev_pm_genpd_resume(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
0403 }
0404
0405 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
0406 const char *name)
0407 {
0408 struct clock_event_device *ced = &ch->ced;
0409 int ret;
0410
0411 ced->name = name;
0412 ced->features = CLOCK_EVT_FEAT_PERIODIC;
0413 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
0414 ced->rating = 200;
0415 ced->cpumask = cpu_possible_mask;
0416 ced->set_next_event = sh_tmu_clock_event_next;
0417 ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
0418 ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
0419 ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
0420 ced->suspend = sh_tmu_clock_event_suspend;
0421 ced->resume = sh_tmu_clock_event_resume;
0422
0423 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
0424 ch->index);
0425
0426 clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
0427
0428 ret = request_irq(ch->irq, sh_tmu_interrupt,
0429 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
0430 dev_name(&ch->tmu->pdev->dev), ch);
0431 if (ret) {
0432 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
0433 ch->index, ch->irq);
0434 return;
0435 }
0436 }
0437
0438 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
0439 bool clockevent, bool clocksource)
0440 {
0441 if (clockevent) {
0442 ch->tmu->has_clockevent = true;
0443 sh_tmu_register_clockevent(ch, name);
0444 } else if (clocksource) {
0445 ch->tmu->has_clocksource = true;
0446 sh_tmu_register_clocksource(ch, name);
0447 }
0448
0449 return 0;
0450 }
0451
0452 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
0453 bool clockevent, bool clocksource,
0454 struct sh_tmu_device *tmu)
0455 {
0456
0457 if (!clockevent && !clocksource)
0458 return 0;
0459
0460 ch->tmu = tmu;
0461 ch->index = index;
0462
0463 if (tmu->model == SH_TMU_SH3)
0464 ch->base = tmu->mapbase + 4 + ch->index * 12;
0465 else
0466 ch->base = tmu->mapbase + 8 + ch->index * 12;
0467
0468 ch->irq = platform_get_irq(tmu->pdev, index);
0469 if (ch->irq < 0)
0470 return ch->irq;
0471
0472 ch->cs_enabled = false;
0473 ch->enable_count = 0;
0474
0475 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
0476 clockevent, clocksource);
0477 }
0478
0479 static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
0480 {
0481 struct resource *res;
0482
0483 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
0484 if (!res) {
0485 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
0486 return -ENXIO;
0487 }
0488
0489 tmu->mapbase = ioremap(res->start, resource_size(res));
0490 if (tmu->mapbase == NULL)
0491 return -ENXIO;
0492
0493 return 0;
0494 }
0495
0496 static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
0497 {
0498 struct device_node *np = tmu->pdev->dev.of_node;
0499
0500 tmu->model = SH_TMU;
0501 tmu->num_channels = 3;
0502
0503 of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
0504
0505 if (tmu->num_channels != 2 && tmu->num_channels != 3) {
0506 dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
0507 tmu->num_channels);
0508 return -EINVAL;
0509 }
0510
0511 return 0;
0512 }
0513
0514 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
0515 {
0516 unsigned int i;
0517 int ret;
0518
0519 tmu->pdev = pdev;
0520
0521 raw_spin_lock_init(&tmu->lock);
0522
0523 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
0524 ret = sh_tmu_parse_dt(tmu);
0525 if (ret < 0)
0526 return ret;
0527 } else if (pdev->dev.platform_data) {
0528 const struct platform_device_id *id = pdev->id_entry;
0529 struct sh_timer_config *cfg = pdev->dev.platform_data;
0530
0531 tmu->model = id->driver_data;
0532 tmu->num_channels = hweight8(cfg->channels_mask);
0533 } else {
0534 dev_err(&tmu->pdev->dev, "missing platform data\n");
0535 return -ENXIO;
0536 }
0537
0538
0539 tmu->clk = clk_get(&tmu->pdev->dev, "fck");
0540 if (IS_ERR(tmu->clk)) {
0541 dev_err(&tmu->pdev->dev, "cannot get clock\n");
0542 return PTR_ERR(tmu->clk);
0543 }
0544
0545 ret = clk_prepare(tmu->clk);
0546 if (ret < 0)
0547 goto err_clk_put;
0548
0549
0550 ret = clk_enable(tmu->clk);
0551 if (ret < 0)
0552 goto err_clk_unprepare;
0553
0554 tmu->rate = clk_get_rate(tmu->clk) / 4;
0555 clk_disable(tmu->clk);
0556
0557
0558 ret = sh_tmu_map_memory(tmu);
0559 if (ret < 0) {
0560 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
0561 goto err_clk_unprepare;
0562 }
0563
0564
0565 tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
0566 GFP_KERNEL);
0567 if (tmu->channels == NULL) {
0568 ret = -ENOMEM;
0569 goto err_unmap;
0570 }
0571
0572
0573
0574
0575
0576 for (i = 0; i < tmu->num_channels; ++i) {
0577 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
0578 i == 0, i == 1, tmu);
0579 if (ret < 0)
0580 goto err_unmap;
0581 }
0582
0583 platform_set_drvdata(pdev, tmu);
0584
0585 return 0;
0586
0587 err_unmap:
0588 kfree(tmu->channels);
0589 iounmap(tmu->mapbase);
0590 err_clk_unprepare:
0591 clk_unprepare(tmu->clk);
0592 err_clk_put:
0593 clk_put(tmu->clk);
0594 return ret;
0595 }
0596
0597 static int sh_tmu_probe(struct platform_device *pdev)
0598 {
0599 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
0600 int ret;
0601
0602 if (!is_sh_early_platform_device(pdev)) {
0603 pm_runtime_set_active(&pdev->dev);
0604 pm_runtime_enable(&pdev->dev);
0605 }
0606
0607 if (tmu) {
0608 dev_info(&pdev->dev, "kept as earlytimer\n");
0609 goto out;
0610 }
0611
0612 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
0613 if (tmu == NULL)
0614 return -ENOMEM;
0615
0616 ret = sh_tmu_setup(tmu, pdev);
0617 if (ret) {
0618 kfree(tmu);
0619 pm_runtime_idle(&pdev->dev);
0620 return ret;
0621 }
0622
0623 if (is_sh_early_platform_device(pdev))
0624 return 0;
0625
0626 out:
0627 if (tmu->has_clockevent || tmu->has_clocksource)
0628 pm_runtime_irq_safe(&pdev->dev);
0629 else
0630 pm_runtime_idle(&pdev->dev);
0631
0632 return 0;
0633 }
0634
0635 static int sh_tmu_remove(struct platform_device *pdev)
0636 {
0637 return -EBUSY;
0638 }
0639
0640 static const struct platform_device_id sh_tmu_id_table[] = {
0641 { "sh-tmu", SH_TMU },
0642 { "sh-tmu-sh3", SH_TMU_SH3 },
0643 { }
0644 };
0645 MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
0646
0647 static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
0648 { .compatible = "renesas,tmu" },
0649 { }
0650 };
0651 MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
0652
0653 static struct platform_driver sh_tmu_device_driver = {
0654 .probe = sh_tmu_probe,
0655 .remove = sh_tmu_remove,
0656 .driver = {
0657 .name = "sh_tmu",
0658 .of_match_table = of_match_ptr(sh_tmu_of_table),
0659 },
0660 .id_table = sh_tmu_id_table,
0661 };
0662
0663 static int __init sh_tmu_init(void)
0664 {
0665 return platform_driver_register(&sh_tmu_device_driver);
0666 }
0667
0668 static void __exit sh_tmu_exit(void)
0669 {
0670 platform_driver_unregister(&sh_tmu_device_driver);
0671 }
0672
0673 #ifdef CONFIG_SUPERH
0674 sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
0675 #endif
0676
0677 subsys_initcall(sh_tmu_init);
0678 module_exit(sh_tmu_exit);
0679
0680 MODULE_AUTHOR("Magnus Damm");
0681 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
0682 MODULE_LICENSE("GPL v2");