Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * SuperH Timer Support - MTU2
0004  *
0005  *  Copyright (C) 2009 Magnus Damm
0006  */
0007 
0008 #include <linux/clk.h>
0009 #include <linux/clockchips.h>
0010 #include <linux/delay.h>
0011 #include <linux/err.h>
0012 #include <linux/init.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/io.h>
0015 #include <linux/ioport.h>
0016 #include <linux/irq.h>
0017 #include <linux/module.h>
0018 #include <linux/of.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/pm_domain.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/sh_timer.h>
0023 #include <linux/slab.h>
0024 #include <linux/spinlock.h>
0025 
0026 #ifdef CONFIG_SUPERH
0027 #include <asm/platform_early.h>
0028 #endif
0029 
0030 struct sh_mtu2_device;
0031 
0032 struct sh_mtu2_channel {
0033     struct sh_mtu2_device *mtu;
0034     unsigned int index;
0035 
0036     void __iomem *base;
0037 
0038     struct clock_event_device ced;
0039 };
0040 
0041 struct sh_mtu2_device {
0042     struct platform_device *pdev;
0043 
0044     void __iomem *mapbase;
0045     struct clk *clk;
0046 
0047     raw_spinlock_t lock; /* Protect the shared registers */
0048 
0049     struct sh_mtu2_channel *channels;
0050     unsigned int num_channels;
0051 
0052     bool has_clockevent;
0053 };
0054 
0055 #define TSTR -1 /* shared register */
0056 #define TCR  0 /* channel register */
0057 #define TMDR 1 /* channel register */
0058 #define TIOR 2 /* channel register */
0059 #define TIER 3 /* channel register */
0060 #define TSR  4 /* channel register */
0061 #define TCNT 5 /* channel register */
0062 #define TGR  6 /* channel register */
0063 
0064 #define TCR_CCLR_NONE       (0 << 5)
0065 #define TCR_CCLR_TGRA       (1 << 5)
0066 #define TCR_CCLR_TGRB       (2 << 5)
0067 #define TCR_CCLR_SYNC       (3 << 5)
0068 #define TCR_CCLR_TGRC       (5 << 5)
0069 #define TCR_CCLR_TGRD       (6 << 5)
0070 #define TCR_CCLR_MASK       (7 << 5)
0071 #define TCR_CKEG_RISING     (0 << 3)
0072 #define TCR_CKEG_FALLING    (1 << 3)
0073 #define TCR_CKEG_BOTH       (2 << 3)
0074 #define TCR_CKEG_MASK       (3 << 3)
0075 /* Values 4 to 7 are channel-dependent */
0076 #define TCR_TPSC_P1     (0 << 0)
0077 #define TCR_TPSC_P4     (1 << 0)
0078 #define TCR_TPSC_P16        (2 << 0)
0079 #define TCR_TPSC_P64        (3 << 0)
0080 #define TCR_TPSC_CH0_TCLKA  (4 << 0)
0081 #define TCR_TPSC_CH0_TCLKB  (5 << 0)
0082 #define TCR_TPSC_CH0_TCLKC  (6 << 0)
0083 #define TCR_TPSC_CH0_TCLKD  (7 << 0)
0084 #define TCR_TPSC_CH1_TCLKA  (4 << 0)
0085 #define TCR_TPSC_CH1_TCLKB  (5 << 0)
0086 #define TCR_TPSC_CH1_P256   (6 << 0)
0087 #define TCR_TPSC_CH1_TCNT2  (7 << 0)
0088 #define TCR_TPSC_CH2_TCLKA  (4 << 0)
0089 #define TCR_TPSC_CH2_TCLKB  (5 << 0)
0090 #define TCR_TPSC_CH2_TCLKC  (6 << 0)
0091 #define TCR_TPSC_CH2_P1024  (7 << 0)
0092 #define TCR_TPSC_CH34_P256  (4 << 0)
0093 #define TCR_TPSC_CH34_P1024 (5 << 0)
0094 #define TCR_TPSC_CH34_TCLKA (6 << 0)
0095 #define TCR_TPSC_CH34_TCLKB (7 << 0)
0096 #define TCR_TPSC_MASK       (7 << 0)
0097 
0098 #define TMDR_BFE        (1 << 6)
0099 #define TMDR_BFB        (1 << 5)
0100 #define TMDR_BFA        (1 << 4)
0101 #define TMDR_MD_NORMAL      (0 << 0)
0102 #define TMDR_MD_PWM_1       (2 << 0)
0103 #define TMDR_MD_PWM_2       (3 << 0)
0104 #define TMDR_MD_PHASE_1     (4 << 0)
0105 #define TMDR_MD_PHASE_2     (5 << 0)
0106 #define TMDR_MD_PHASE_3     (6 << 0)
0107 #define TMDR_MD_PHASE_4     (7 << 0)
0108 #define TMDR_MD_PWM_SYNC    (8 << 0)
0109 #define TMDR_MD_PWM_COMP_CREST  (13 << 0)
0110 #define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
0111 #define TMDR_MD_PWM_COMP_BOTH   (15 << 0)
0112 #define TMDR_MD_MASK        (15 << 0)
0113 
0114 #define TIOC_IOCH(n)        ((n) << 4)
0115 #define TIOC_IOCL(n)        ((n) << 0)
0116 #define TIOR_OC_RETAIN      (0 << 0)
0117 #define TIOR_OC_0_CLEAR     (1 << 0)
0118 #define TIOR_OC_0_SET       (2 << 0)
0119 #define TIOR_OC_0_TOGGLE    (3 << 0)
0120 #define TIOR_OC_1_CLEAR     (5 << 0)
0121 #define TIOR_OC_1_SET       (6 << 0)
0122 #define TIOR_OC_1_TOGGLE    (7 << 0)
0123 #define TIOR_IC_RISING      (8 << 0)
0124 #define TIOR_IC_FALLING     (9 << 0)
0125 #define TIOR_IC_BOTH        (10 << 0)
0126 #define TIOR_IC_TCNT        (12 << 0)
0127 #define TIOR_MASK       (15 << 0)
0128 
0129 #define TIER_TTGE       (1 << 7)
0130 #define TIER_TTGE2      (1 << 6)
0131 #define TIER_TCIEU      (1 << 5)
0132 #define TIER_TCIEV      (1 << 4)
0133 #define TIER_TGIED      (1 << 3)
0134 #define TIER_TGIEC      (1 << 2)
0135 #define TIER_TGIEB      (1 << 1)
0136 #define TIER_TGIEA      (1 << 0)
0137 
0138 #define TSR_TCFD        (1 << 7)
0139 #define TSR_TCFU        (1 << 5)
0140 #define TSR_TCFV        (1 << 4)
0141 #define TSR_TGFD        (1 << 3)
0142 #define TSR_TGFC        (1 << 2)
0143 #define TSR_TGFB        (1 << 1)
0144 #define TSR_TGFA        (1 << 0)
0145 
0146 static unsigned long mtu2_reg_offs[] = {
0147     [TCR] = 0,
0148     [TMDR] = 1,
0149     [TIOR] = 2,
0150     [TIER] = 4,
0151     [TSR] = 5,
0152     [TCNT] = 6,
0153     [TGR] = 8,
0154 };
0155 
0156 static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
0157 {
0158     unsigned long offs;
0159 
0160     if (reg_nr == TSTR)
0161         return ioread8(ch->mtu->mapbase + 0x280);
0162 
0163     offs = mtu2_reg_offs[reg_nr];
0164 
0165     if ((reg_nr == TCNT) || (reg_nr == TGR))
0166         return ioread16(ch->base + offs);
0167     else
0168         return ioread8(ch->base + offs);
0169 }
0170 
0171 static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
0172                 unsigned long value)
0173 {
0174     unsigned long offs;
0175 
0176     if (reg_nr == TSTR)
0177         return iowrite8(value, ch->mtu->mapbase + 0x280);
0178 
0179     offs = mtu2_reg_offs[reg_nr];
0180 
0181     if ((reg_nr == TCNT) || (reg_nr == TGR))
0182         iowrite16(value, ch->base + offs);
0183     else
0184         iowrite8(value, ch->base + offs);
0185 }
0186 
0187 static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
0188 {
0189     unsigned long flags, value;
0190 
0191     /* start stop register shared by multiple timer channels */
0192     raw_spin_lock_irqsave(&ch->mtu->lock, flags);
0193     value = sh_mtu2_read(ch, TSTR);
0194 
0195     if (start)
0196         value |= 1 << ch->index;
0197     else
0198         value &= ~(1 << ch->index);
0199 
0200     sh_mtu2_write(ch, TSTR, value);
0201     raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
0202 }
0203 
0204 static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
0205 {
0206     unsigned long periodic;
0207     unsigned long rate;
0208     int ret;
0209 
0210     pm_runtime_get_sync(&ch->mtu->pdev->dev);
0211     dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
0212 
0213     /* enable clock */
0214     ret = clk_enable(ch->mtu->clk);
0215     if (ret) {
0216         dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
0217             ch->index);
0218         return ret;
0219     }
0220 
0221     /* make sure channel is disabled */
0222     sh_mtu2_start_stop_ch(ch, 0);
0223 
0224     rate = clk_get_rate(ch->mtu->clk) / 64;
0225     periodic = (rate + HZ/2) / HZ;
0226 
0227     /*
0228      * "Periodic Counter Operation"
0229      * Clear on TGRA compare match, divide clock by 64.
0230      */
0231     sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
0232     sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
0233               TIOC_IOCL(TIOR_OC_0_CLEAR));
0234     sh_mtu2_write(ch, TGR, periodic);
0235     sh_mtu2_write(ch, TCNT, 0);
0236     sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
0237     sh_mtu2_write(ch, TIER, TIER_TGIEA);
0238 
0239     /* enable channel */
0240     sh_mtu2_start_stop_ch(ch, 1);
0241 
0242     return 0;
0243 }
0244 
0245 static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
0246 {
0247     /* disable channel */
0248     sh_mtu2_start_stop_ch(ch, 0);
0249 
0250     /* stop clock */
0251     clk_disable(ch->mtu->clk);
0252 
0253     dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
0254     pm_runtime_put(&ch->mtu->pdev->dev);
0255 }
0256 
0257 static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
0258 {
0259     struct sh_mtu2_channel *ch = dev_id;
0260 
0261     /* acknowledge interrupt */
0262     sh_mtu2_read(ch, TSR);
0263     sh_mtu2_write(ch, TSR, ~TSR_TGFA);
0264 
0265     /* notify clockevent layer */
0266     ch->ced.event_handler(&ch->ced);
0267     return IRQ_HANDLED;
0268 }
0269 
0270 static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
0271 {
0272     return container_of(ced, struct sh_mtu2_channel, ced);
0273 }
0274 
0275 static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
0276 {
0277     struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
0278 
0279     if (clockevent_state_periodic(ced))
0280         sh_mtu2_disable(ch);
0281 
0282     return 0;
0283 }
0284 
0285 static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
0286 {
0287     struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
0288 
0289     if (clockevent_state_periodic(ced))
0290         sh_mtu2_disable(ch);
0291 
0292     dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
0293          ch->index);
0294     sh_mtu2_enable(ch);
0295     return 0;
0296 }
0297 
0298 static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
0299 {
0300     dev_pm_genpd_suspend(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
0301 }
0302 
0303 static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
0304 {
0305     dev_pm_genpd_resume(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
0306 }
0307 
0308 static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
0309                     const char *name)
0310 {
0311     struct clock_event_device *ced = &ch->ced;
0312 
0313     ced->name = name;
0314     ced->features = CLOCK_EVT_FEAT_PERIODIC;
0315     ced->rating = 200;
0316     ced->cpumask = cpu_possible_mask;
0317     ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
0318     ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
0319     ced->suspend = sh_mtu2_clock_event_suspend;
0320     ced->resume = sh_mtu2_clock_event_resume;
0321 
0322     dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
0323          ch->index);
0324     clockevents_register_device(ced);
0325 }
0326 
0327 static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
0328 {
0329     ch->mtu->has_clockevent = true;
0330     sh_mtu2_register_clockevent(ch, name);
0331 
0332     return 0;
0333 }
0334 
0335 static const unsigned int sh_mtu2_channel_offsets[] = {
0336     0x300, 0x380, 0x000,
0337 };
0338 
0339 static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
0340                  struct sh_mtu2_device *mtu)
0341 {
0342     char name[6];
0343     int irq;
0344     int ret;
0345 
0346     ch->mtu = mtu;
0347 
0348     sprintf(name, "tgi%ua", index);
0349     irq = platform_get_irq_byname(mtu->pdev, name);
0350     if (irq < 0) {
0351         /* Skip channels with no declared interrupt. */
0352         return 0;
0353     }
0354 
0355     ret = request_irq(irq, sh_mtu2_interrupt,
0356               IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
0357               dev_name(&ch->mtu->pdev->dev), ch);
0358     if (ret) {
0359         dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
0360             index, irq);
0361         return ret;
0362     }
0363 
0364     ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
0365     ch->index = index;
0366 
0367     return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
0368 }
0369 
0370 static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
0371 {
0372     struct resource *res;
0373 
0374     res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
0375     if (!res) {
0376         dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
0377         return -ENXIO;
0378     }
0379 
0380     mtu->mapbase = ioremap(res->start, resource_size(res));
0381     if (mtu->mapbase == NULL)
0382         return -ENXIO;
0383 
0384     return 0;
0385 }
0386 
0387 static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
0388              struct platform_device *pdev)
0389 {
0390     unsigned int i;
0391     int ret;
0392 
0393     mtu->pdev = pdev;
0394 
0395     raw_spin_lock_init(&mtu->lock);
0396 
0397     /* Get hold of clock. */
0398     mtu->clk = clk_get(&mtu->pdev->dev, "fck");
0399     if (IS_ERR(mtu->clk)) {
0400         dev_err(&mtu->pdev->dev, "cannot get clock\n");
0401         return PTR_ERR(mtu->clk);
0402     }
0403 
0404     ret = clk_prepare(mtu->clk);
0405     if (ret < 0)
0406         goto err_clk_put;
0407 
0408     /* Map the memory resource. */
0409     ret = sh_mtu2_map_memory(mtu);
0410     if (ret < 0) {
0411         dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
0412         goto err_clk_unprepare;
0413     }
0414 
0415     /* Allocate and setup the channels. */
0416     ret = platform_irq_count(pdev);
0417     if (ret < 0)
0418         goto err_unmap;
0419 
0420     mtu->num_channels = min_t(unsigned int, ret,
0421                   ARRAY_SIZE(sh_mtu2_channel_offsets));
0422 
0423     mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
0424                 GFP_KERNEL);
0425     if (mtu->channels == NULL) {
0426         ret = -ENOMEM;
0427         goto err_unmap;
0428     }
0429 
0430     for (i = 0; i < mtu->num_channels; ++i) {
0431         ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
0432         if (ret < 0)
0433             goto err_unmap;
0434     }
0435 
0436     platform_set_drvdata(pdev, mtu);
0437 
0438     return 0;
0439 
0440 err_unmap:
0441     kfree(mtu->channels);
0442     iounmap(mtu->mapbase);
0443 err_clk_unprepare:
0444     clk_unprepare(mtu->clk);
0445 err_clk_put:
0446     clk_put(mtu->clk);
0447     return ret;
0448 }
0449 
0450 static int sh_mtu2_probe(struct platform_device *pdev)
0451 {
0452     struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
0453     int ret;
0454 
0455     if (!is_sh_early_platform_device(pdev)) {
0456         pm_runtime_set_active(&pdev->dev);
0457         pm_runtime_enable(&pdev->dev);
0458     }
0459 
0460     if (mtu) {
0461         dev_info(&pdev->dev, "kept as earlytimer\n");
0462         goto out;
0463     }
0464 
0465     mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
0466     if (mtu == NULL)
0467         return -ENOMEM;
0468 
0469     ret = sh_mtu2_setup(mtu, pdev);
0470     if (ret) {
0471         kfree(mtu);
0472         pm_runtime_idle(&pdev->dev);
0473         return ret;
0474     }
0475     if (is_sh_early_platform_device(pdev))
0476         return 0;
0477 
0478  out:
0479     if (mtu->has_clockevent)
0480         pm_runtime_irq_safe(&pdev->dev);
0481     else
0482         pm_runtime_idle(&pdev->dev);
0483 
0484     return 0;
0485 }
0486 
0487 static int sh_mtu2_remove(struct platform_device *pdev)
0488 {
0489     return -EBUSY; /* cannot unregister clockevent */
0490 }
0491 
0492 static const struct platform_device_id sh_mtu2_id_table[] = {
0493     { "sh-mtu2", 0 },
0494     { },
0495 };
0496 MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
0497 
0498 static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
0499     { .compatible = "renesas,mtu2" },
0500     { }
0501 };
0502 MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
0503 
0504 static struct platform_driver sh_mtu2_device_driver = {
0505     .probe      = sh_mtu2_probe,
0506     .remove     = sh_mtu2_remove,
0507     .driver     = {
0508         .name   = "sh_mtu2",
0509         .of_match_table = of_match_ptr(sh_mtu2_of_table),
0510     },
0511     .id_table   = sh_mtu2_id_table,
0512 };
0513 
0514 static int __init sh_mtu2_init(void)
0515 {
0516     return platform_driver_register(&sh_mtu2_device_driver);
0517 }
0518 
0519 static void __exit sh_mtu2_exit(void)
0520 {
0521     platform_driver_unregister(&sh_mtu2_device_driver);
0522 }
0523 
0524 #ifdef CONFIG_SUPERH
0525 sh_early_platform_init("earlytimer", &sh_mtu2_device_driver);
0526 #endif
0527 
0528 subsys_initcall(sh_mtu2_init);
0529 module_exit(sh_mtu2_exit);
0530 
0531 MODULE_AUTHOR("Magnus Damm");
0532 MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
0533 MODULE_LICENSE("GPL v2");