0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clk.h>
0009 #include <linux/clockchips.h>
0010 #include <linux/clocksource.h>
0011 #include <linux/delay.h>
0012 #include <linux/err.h>
0013 #include <linux/init.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/io.h>
0016 #include <linux/ioport.h>
0017 #include <linux/irq.h>
0018 #include <linux/module.h>
0019 #include <linux/of.h>
0020 #include <linux/of_device.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/pm_domain.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/sh_timer.h>
0025 #include <linux/slab.h>
0026 #include <linux/spinlock.h>
0027
0028 #ifdef CONFIG_SUPERH
0029 #include <asm/platform_early.h>
0030 #endif
0031
0032 struct sh_cmt_device;
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 enum sh_cmt_model {
0064 SH_CMT_16BIT,
0065 SH_CMT_32BIT,
0066 SH_CMT_48BIT,
0067 SH_CMT0_RCAR_GEN2,
0068 SH_CMT1_RCAR_GEN2,
0069 };
0070
0071 struct sh_cmt_info {
0072 enum sh_cmt_model model;
0073
0074 unsigned int channels_mask;
0075
0076 unsigned long width;
0077 u32 overflow_bit;
0078 u32 clear_bits;
0079
0080
0081 u32 (*read_control)(void __iomem *base, unsigned long offs);
0082 void (*write_control)(void __iomem *base, unsigned long offs,
0083 u32 value);
0084
0085
0086 u32 (*read_count)(void __iomem *base, unsigned long offs);
0087 void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
0088 };
0089
0090 struct sh_cmt_channel {
0091 struct sh_cmt_device *cmt;
0092
0093 unsigned int index;
0094 unsigned int hwidx;
0095
0096 void __iomem *iostart;
0097 void __iomem *ioctrl;
0098
0099 unsigned int timer_bit;
0100 unsigned long flags;
0101 u32 match_value;
0102 u32 next_match_value;
0103 u32 max_match_value;
0104 raw_spinlock_t lock;
0105 struct clock_event_device ced;
0106 struct clocksource cs;
0107 u64 total_cycles;
0108 bool cs_enabled;
0109 };
0110
0111 struct sh_cmt_device {
0112 struct platform_device *pdev;
0113
0114 const struct sh_cmt_info *info;
0115
0116 void __iomem *mapbase;
0117 struct clk *clk;
0118 unsigned long rate;
0119
0120 raw_spinlock_t lock;
0121
0122 struct sh_cmt_channel *channels;
0123 unsigned int num_channels;
0124 unsigned int hw_channels;
0125
0126 bool has_clockevent;
0127 bool has_clocksource;
0128 };
0129
0130 #define SH_CMT16_CMCSR_CMF (1 << 7)
0131 #define SH_CMT16_CMCSR_CMIE (1 << 6)
0132 #define SH_CMT16_CMCSR_CKS8 (0 << 0)
0133 #define SH_CMT16_CMCSR_CKS32 (1 << 0)
0134 #define SH_CMT16_CMCSR_CKS128 (2 << 0)
0135 #define SH_CMT16_CMCSR_CKS512 (3 << 0)
0136 #define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
0137
0138 #define SH_CMT32_CMCSR_CMF (1 << 15)
0139 #define SH_CMT32_CMCSR_OVF (1 << 14)
0140 #define SH_CMT32_CMCSR_WRFLG (1 << 13)
0141 #define SH_CMT32_CMCSR_STTF (1 << 12)
0142 #define SH_CMT32_CMCSR_STPF (1 << 11)
0143 #define SH_CMT32_CMCSR_SSIE (1 << 10)
0144 #define SH_CMT32_CMCSR_CMS (1 << 9)
0145 #define SH_CMT32_CMCSR_CMM (1 << 8)
0146 #define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
0147 #define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
0148 #define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
0149 #define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
0150 #define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
0151 #define SH_CMT32_CMCSR_DBGIVD (1 << 3)
0152 #define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
0153 #define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
0154 #define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
0155 #define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
0156 #define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
0157
0158 static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
0159 {
0160 return ioread16(base + (offs << 1));
0161 }
0162
0163 static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
0164 {
0165 return ioread32(base + (offs << 2));
0166 }
0167
0168 static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
0169 {
0170 iowrite16(value, base + (offs << 1));
0171 }
0172
0173 static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
0174 {
0175 iowrite32(value, base + (offs << 2));
0176 }
0177
0178 static const struct sh_cmt_info sh_cmt_info[] = {
0179 [SH_CMT_16BIT] = {
0180 .model = SH_CMT_16BIT,
0181 .width = 16,
0182 .overflow_bit = SH_CMT16_CMCSR_CMF,
0183 .clear_bits = ~SH_CMT16_CMCSR_CMF,
0184 .read_control = sh_cmt_read16,
0185 .write_control = sh_cmt_write16,
0186 .read_count = sh_cmt_read16,
0187 .write_count = sh_cmt_write16,
0188 },
0189 [SH_CMT_32BIT] = {
0190 .model = SH_CMT_32BIT,
0191 .width = 32,
0192 .overflow_bit = SH_CMT32_CMCSR_CMF,
0193 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
0194 .read_control = sh_cmt_read16,
0195 .write_control = sh_cmt_write16,
0196 .read_count = sh_cmt_read32,
0197 .write_count = sh_cmt_write32,
0198 },
0199 [SH_CMT_48BIT] = {
0200 .model = SH_CMT_48BIT,
0201 .channels_mask = 0x3f,
0202 .width = 32,
0203 .overflow_bit = SH_CMT32_CMCSR_CMF,
0204 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
0205 .read_control = sh_cmt_read32,
0206 .write_control = sh_cmt_write32,
0207 .read_count = sh_cmt_read32,
0208 .write_count = sh_cmt_write32,
0209 },
0210 [SH_CMT0_RCAR_GEN2] = {
0211 .model = SH_CMT0_RCAR_GEN2,
0212 .channels_mask = 0x60,
0213 .width = 32,
0214 .overflow_bit = SH_CMT32_CMCSR_CMF,
0215 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
0216 .read_control = sh_cmt_read32,
0217 .write_control = sh_cmt_write32,
0218 .read_count = sh_cmt_read32,
0219 .write_count = sh_cmt_write32,
0220 },
0221 [SH_CMT1_RCAR_GEN2] = {
0222 .model = SH_CMT1_RCAR_GEN2,
0223 .channels_mask = 0xff,
0224 .width = 32,
0225 .overflow_bit = SH_CMT32_CMCSR_CMF,
0226 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
0227 .read_control = sh_cmt_read32,
0228 .write_control = sh_cmt_write32,
0229 .read_count = sh_cmt_read32,
0230 .write_count = sh_cmt_write32,
0231 },
0232 };
0233
0234 #define CMCSR 0
0235 #define CMCNT 1
0236 #define CMCOR 2
0237
0238 #define CMCLKE 0x1000
0239
0240 static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
0241 {
0242 if (ch->iostart)
0243 return ch->cmt->info->read_control(ch->iostart, 0);
0244 else
0245 return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
0246 }
0247
0248 static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
0249 {
0250 if (ch->iostart)
0251 ch->cmt->info->write_control(ch->iostart, 0, value);
0252 else
0253 ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
0254 }
0255
0256 static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
0257 {
0258 return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
0259 }
0260
0261 static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
0262 {
0263 ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
0264 }
0265
0266 static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
0267 {
0268 return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
0269 }
0270
0271 static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
0272 {
0273 ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
0274 }
0275
0276 static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
0277 {
0278 ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
0279 }
0280
0281 static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
0282 {
0283 u32 v1, v2, v3;
0284 u32 o1, o2;
0285
0286 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
0287
0288
0289 do {
0290 o2 = o1;
0291 v1 = sh_cmt_read_cmcnt(ch);
0292 v2 = sh_cmt_read_cmcnt(ch);
0293 v3 = sh_cmt_read_cmcnt(ch);
0294 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
0295 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
0296 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
0297
0298 *has_wrapped = o1;
0299 return v2;
0300 }
0301
0302 static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
0303 {
0304 unsigned long flags;
0305 u32 value;
0306
0307
0308 raw_spin_lock_irqsave(&ch->cmt->lock, flags);
0309 value = sh_cmt_read_cmstr(ch);
0310
0311 if (start)
0312 value |= 1 << ch->timer_bit;
0313 else
0314 value &= ~(1 << ch->timer_bit);
0315
0316 sh_cmt_write_cmstr(ch, value);
0317 raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
0318 }
0319
0320 static int sh_cmt_enable(struct sh_cmt_channel *ch)
0321 {
0322 int k, ret;
0323
0324 dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
0325
0326
0327 ret = clk_enable(ch->cmt->clk);
0328 if (ret) {
0329 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
0330 ch->index);
0331 goto err0;
0332 }
0333
0334
0335 sh_cmt_start_stop_ch(ch, 0);
0336
0337
0338 if (ch->cmt->info->width == 16) {
0339 sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
0340 SH_CMT16_CMCSR_CKS512);
0341 } else {
0342 u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ?
0343 SH_CMT32_CMCSR_CMTOUT_IE : 0;
0344 sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM |
0345 SH_CMT32_CMCSR_CMR_IRQ |
0346 SH_CMT32_CMCSR_CKS_RCLK8);
0347 }
0348
0349 sh_cmt_write_cmcor(ch, 0xffffffff);
0350 sh_cmt_write_cmcnt(ch, 0);
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 for (k = 0; k < 100; k++) {
0364 if (!sh_cmt_read_cmcnt(ch))
0365 break;
0366 udelay(1);
0367 }
0368
0369 if (sh_cmt_read_cmcnt(ch)) {
0370 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
0371 ch->index);
0372 ret = -ETIMEDOUT;
0373 goto err1;
0374 }
0375
0376
0377 sh_cmt_start_stop_ch(ch, 1);
0378 return 0;
0379 err1:
0380
0381 clk_disable(ch->cmt->clk);
0382
0383 err0:
0384 return ret;
0385 }
0386
0387 static void sh_cmt_disable(struct sh_cmt_channel *ch)
0388 {
0389
0390 sh_cmt_start_stop_ch(ch, 0);
0391
0392
0393 sh_cmt_write_cmcsr(ch, 0);
0394
0395
0396 clk_disable(ch->cmt->clk);
0397
0398 dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
0399 }
0400
0401
0402 #define FLAG_CLOCKEVENT (1 << 0)
0403 #define FLAG_CLOCKSOURCE (1 << 1)
0404 #define FLAG_REPROGRAM (1 << 2)
0405 #define FLAG_SKIPEVENT (1 << 3)
0406 #define FLAG_IRQCONTEXT (1 << 4)
0407
0408 static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
0409 int absolute)
0410 {
0411 u32 value = ch->next_match_value;
0412 u32 new_match;
0413 u32 delay = 0;
0414 u32 now = 0;
0415 u32 has_wrapped;
0416
0417 now = sh_cmt_get_counter(ch, &has_wrapped);
0418 ch->flags |= FLAG_REPROGRAM;
0419
0420 if (has_wrapped) {
0421
0422
0423
0424
0425 ch->flags |= FLAG_SKIPEVENT;
0426 return;
0427 }
0428
0429 if (absolute)
0430 now = 0;
0431
0432 do {
0433
0434
0435
0436 new_match = now + value + delay;
0437 if (new_match > ch->max_match_value)
0438 new_match = ch->max_match_value;
0439
0440 sh_cmt_write_cmcor(ch, new_match);
0441
0442 now = sh_cmt_get_counter(ch, &has_wrapped);
0443 if (has_wrapped && (new_match > ch->match_value)) {
0444
0445
0446
0447
0448
0449
0450 ch->flags |= FLAG_SKIPEVENT;
0451 break;
0452 }
0453
0454 if (has_wrapped) {
0455
0456
0457
0458
0459
0460
0461 ch->match_value = new_match;
0462 break;
0463 }
0464
0465
0466 if (now < new_match) {
0467
0468
0469
0470
0471
0472 ch->match_value = new_match;
0473 break;
0474 }
0475
0476
0477
0478
0479
0480
0481
0482 if (delay)
0483 delay <<= 1;
0484 else
0485 delay = 1;
0486
0487 if (!delay)
0488 dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
0489 ch->index);
0490
0491 } while (delay);
0492 }
0493
0494 static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
0495 {
0496 if (delta > ch->max_match_value)
0497 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
0498 ch->index);
0499
0500 ch->next_match_value = delta;
0501 sh_cmt_clock_event_program_verify(ch, 0);
0502 }
0503
0504 static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
0505 {
0506 unsigned long flags;
0507
0508 raw_spin_lock_irqsave(&ch->lock, flags);
0509 __sh_cmt_set_next(ch, delta);
0510 raw_spin_unlock_irqrestore(&ch->lock, flags);
0511 }
0512
0513 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
0514 {
0515 struct sh_cmt_channel *ch = dev_id;
0516
0517
0518 sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
0519 ch->cmt->info->clear_bits);
0520
0521
0522
0523
0524
0525 if (ch->flags & FLAG_CLOCKSOURCE)
0526 ch->total_cycles += ch->match_value + 1;
0527
0528 if (!(ch->flags & FLAG_REPROGRAM))
0529 ch->next_match_value = ch->max_match_value;
0530
0531 ch->flags |= FLAG_IRQCONTEXT;
0532
0533 if (ch->flags & FLAG_CLOCKEVENT) {
0534 if (!(ch->flags & FLAG_SKIPEVENT)) {
0535 if (clockevent_state_oneshot(&ch->ced)) {
0536 ch->next_match_value = ch->max_match_value;
0537 ch->flags |= FLAG_REPROGRAM;
0538 }
0539
0540 ch->ced.event_handler(&ch->ced);
0541 }
0542 }
0543
0544 ch->flags &= ~FLAG_SKIPEVENT;
0545
0546 if (ch->flags & FLAG_REPROGRAM) {
0547 ch->flags &= ~FLAG_REPROGRAM;
0548 sh_cmt_clock_event_program_verify(ch, 1);
0549
0550 if (ch->flags & FLAG_CLOCKEVENT)
0551 if ((clockevent_state_shutdown(&ch->ced))
0552 || (ch->match_value == ch->next_match_value))
0553 ch->flags &= ~FLAG_REPROGRAM;
0554 }
0555
0556 ch->flags &= ~FLAG_IRQCONTEXT;
0557
0558 return IRQ_HANDLED;
0559 }
0560
0561 static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
0562 {
0563 int ret = 0;
0564 unsigned long flags;
0565
0566 if (flag & FLAG_CLOCKSOURCE)
0567 pm_runtime_get_sync(&ch->cmt->pdev->dev);
0568
0569 raw_spin_lock_irqsave(&ch->lock, flags);
0570
0571 if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
0572 if (flag & FLAG_CLOCKEVENT)
0573 pm_runtime_get_sync(&ch->cmt->pdev->dev);
0574 ret = sh_cmt_enable(ch);
0575 }
0576
0577 if (ret)
0578 goto out;
0579 ch->flags |= flag;
0580
0581
0582 if (ch->cmt->num_channels == 1 &&
0583 flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
0584 __sh_cmt_set_next(ch, ch->max_match_value);
0585 out:
0586 raw_spin_unlock_irqrestore(&ch->lock, flags);
0587
0588 return ret;
0589 }
0590
0591 static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
0592 {
0593 unsigned long flags;
0594 unsigned long f;
0595
0596 raw_spin_lock_irqsave(&ch->lock, flags);
0597
0598 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
0599 ch->flags &= ~flag;
0600
0601 if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
0602 sh_cmt_disable(ch);
0603 if (flag & FLAG_CLOCKEVENT)
0604 pm_runtime_put(&ch->cmt->pdev->dev);
0605 }
0606
0607
0608 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
0609 __sh_cmt_set_next(ch, ch->max_match_value);
0610
0611 raw_spin_unlock_irqrestore(&ch->lock, flags);
0612
0613 if (flag & FLAG_CLOCKSOURCE)
0614 pm_runtime_put(&ch->cmt->pdev->dev);
0615 }
0616
0617 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
0618 {
0619 return container_of(cs, struct sh_cmt_channel, cs);
0620 }
0621
0622 static u64 sh_cmt_clocksource_read(struct clocksource *cs)
0623 {
0624 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
0625 u32 has_wrapped;
0626
0627 if (ch->cmt->num_channels == 1) {
0628 unsigned long flags;
0629 u64 value;
0630 u32 raw;
0631
0632 raw_spin_lock_irqsave(&ch->lock, flags);
0633 value = ch->total_cycles;
0634 raw = sh_cmt_get_counter(ch, &has_wrapped);
0635
0636 if (unlikely(has_wrapped))
0637 raw += ch->match_value + 1;
0638 raw_spin_unlock_irqrestore(&ch->lock, flags);
0639
0640 return value + raw;
0641 }
0642
0643 return sh_cmt_get_counter(ch, &has_wrapped);
0644 }
0645
0646 static int sh_cmt_clocksource_enable(struct clocksource *cs)
0647 {
0648 int ret;
0649 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
0650
0651 WARN_ON(ch->cs_enabled);
0652
0653 ch->total_cycles = 0;
0654
0655 ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
0656 if (!ret)
0657 ch->cs_enabled = true;
0658
0659 return ret;
0660 }
0661
0662 static void sh_cmt_clocksource_disable(struct clocksource *cs)
0663 {
0664 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
0665
0666 WARN_ON(!ch->cs_enabled);
0667
0668 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
0669 ch->cs_enabled = false;
0670 }
0671
0672 static void sh_cmt_clocksource_suspend(struct clocksource *cs)
0673 {
0674 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
0675
0676 if (!ch->cs_enabled)
0677 return;
0678
0679 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
0680 dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
0681 }
0682
0683 static void sh_cmt_clocksource_resume(struct clocksource *cs)
0684 {
0685 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
0686
0687 if (!ch->cs_enabled)
0688 return;
0689
0690 dev_pm_genpd_resume(&ch->cmt->pdev->dev);
0691 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
0692 }
0693
0694 static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
0695 const char *name)
0696 {
0697 struct clocksource *cs = &ch->cs;
0698
0699 cs->name = name;
0700 cs->rating = 125;
0701 cs->read = sh_cmt_clocksource_read;
0702 cs->enable = sh_cmt_clocksource_enable;
0703 cs->disable = sh_cmt_clocksource_disable;
0704 cs->suspend = sh_cmt_clocksource_suspend;
0705 cs->resume = sh_cmt_clocksource_resume;
0706 cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
0707 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
0708
0709 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
0710 ch->index);
0711
0712 clocksource_register_hz(cs, ch->cmt->rate);
0713 return 0;
0714 }
0715
0716 static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
0717 {
0718 return container_of(ced, struct sh_cmt_channel, ced);
0719 }
0720
0721 static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
0722 {
0723 sh_cmt_start(ch, FLAG_CLOCKEVENT);
0724
0725 if (periodic)
0726 sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
0727 else
0728 sh_cmt_set_next(ch, ch->max_match_value);
0729 }
0730
0731 static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
0732 {
0733 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
0734
0735 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
0736 return 0;
0737 }
0738
0739 static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
0740 int periodic)
0741 {
0742 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
0743
0744
0745 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
0746 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
0747
0748 dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
0749 ch->index, periodic ? "periodic" : "oneshot");
0750 sh_cmt_clock_event_start(ch, periodic);
0751 return 0;
0752 }
0753
0754 static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
0755 {
0756 return sh_cmt_clock_event_set_state(ced, 0);
0757 }
0758
0759 static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
0760 {
0761 return sh_cmt_clock_event_set_state(ced, 1);
0762 }
0763
0764 static int sh_cmt_clock_event_next(unsigned long delta,
0765 struct clock_event_device *ced)
0766 {
0767 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
0768
0769 BUG_ON(!clockevent_state_oneshot(ced));
0770 if (likely(ch->flags & FLAG_IRQCONTEXT))
0771 ch->next_match_value = delta - 1;
0772 else
0773 sh_cmt_set_next(ch, delta - 1);
0774
0775 return 0;
0776 }
0777
0778 static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
0779 {
0780 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
0781
0782 dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
0783 clk_unprepare(ch->cmt->clk);
0784 }
0785
0786 static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
0787 {
0788 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
0789
0790 clk_prepare(ch->cmt->clk);
0791 dev_pm_genpd_resume(&ch->cmt->pdev->dev);
0792 }
0793
0794 static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
0795 const char *name)
0796 {
0797 struct clock_event_device *ced = &ch->ced;
0798 int irq;
0799 int ret;
0800
0801 irq = platform_get_irq(ch->cmt->pdev, ch->index);
0802 if (irq < 0)
0803 return irq;
0804
0805 ret = request_irq(irq, sh_cmt_interrupt,
0806 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
0807 dev_name(&ch->cmt->pdev->dev), ch);
0808 if (ret) {
0809 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
0810 ch->index, irq);
0811 return ret;
0812 }
0813
0814 ced->name = name;
0815 ced->features = CLOCK_EVT_FEAT_PERIODIC;
0816 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
0817 ced->rating = 125;
0818 ced->cpumask = cpu_possible_mask;
0819 ced->set_next_event = sh_cmt_clock_event_next;
0820 ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
0821 ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
0822 ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
0823 ced->suspend = sh_cmt_clock_event_suspend;
0824 ced->resume = sh_cmt_clock_event_resume;
0825
0826
0827 ced->shift = 32;
0828 ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
0829 ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
0830 ced->max_delta_ticks = ch->max_match_value;
0831 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
0832 ced->min_delta_ticks = 0x1f;
0833
0834 dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
0835 ch->index);
0836 clockevents_register_device(ced);
0837
0838 return 0;
0839 }
0840
0841 static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
0842 bool clockevent, bool clocksource)
0843 {
0844 int ret;
0845
0846 if (clockevent) {
0847 ch->cmt->has_clockevent = true;
0848 ret = sh_cmt_register_clockevent(ch, name);
0849 if (ret < 0)
0850 return ret;
0851 }
0852
0853 if (clocksource) {
0854 ch->cmt->has_clocksource = true;
0855 sh_cmt_register_clocksource(ch, name);
0856 }
0857
0858 return 0;
0859 }
0860
0861 static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
0862 unsigned int hwidx, bool clockevent,
0863 bool clocksource, struct sh_cmt_device *cmt)
0864 {
0865 u32 value;
0866 int ret;
0867
0868
0869 if (!clockevent && !clocksource)
0870 return 0;
0871
0872 ch->cmt = cmt;
0873 ch->index = index;
0874 ch->hwidx = hwidx;
0875 ch->timer_bit = hwidx;
0876
0877
0878
0879
0880
0881
0882 switch (cmt->info->model) {
0883 case SH_CMT_16BIT:
0884 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
0885 break;
0886 case SH_CMT_32BIT:
0887 case SH_CMT_48BIT:
0888 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
0889 break;
0890 case SH_CMT0_RCAR_GEN2:
0891 case SH_CMT1_RCAR_GEN2:
0892 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
0893 ch->ioctrl = ch->iostart + 0x10;
0894 ch->timer_bit = 0;
0895
0896
0897 value = ioread32(cmt->mapbase + CMCLKE);
0898 value |= BIT(hwidx);
0899 iowrite32(value, cmt->mapbase + CMCLKE);
0900 break;
0901 }
0902
0903 if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
0904 ch->max_match_value = ~0;
0905 else
0906 ch->max_match_value = (1 << cmt->info->width) - 1;
0907
0908 ch->match_value = ch->max_match_value;
0909 raw_spin_lock_init(&ch->lock);
0910
0911 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
0912 clockevent, clocksource);
0913 if (ret) {
0914 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
0915 ch->index);
0916 return ret;
0917 }
0918 ch->cs_enabled = false;
0919
0920 return 0;
0921 }
0922
0923 static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
0924 {
0925 struct resource *mem;
0926
0927 mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
0928 if (!mem) {
0929 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
0930 return -ENXIO;
0931 }
0932
0933 cmt->mapbase = ioremap(mem->start, resource_size(mem));
0934 if (cmt->mapbase == NULL) {
0935 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
0936 return -ENXIO;
0937 }
0938
0939 return 0;
0940 }
0941
0942 static const struct platform_device_id sh_cmt_id_table[] = {
0943 { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
0944 { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
0945 { }
0946 };
0947 MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
0948
0949 static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
0950 {
0951
0952 .compatible = "renesas,cmt-48",
0953 .data = &sh_cmt_info[SH_CMT_48BIT]
0954 },
0955 {
0956
0957 .compatible = "renesas,cmt-48-gen2",
0958 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
0959 },
0960 {
0961 .compatible = "renesas,r8a7740-cmt1",
0962 .data = &sh_cmt_info[SH_CMT_48BIT]
0963 },
0964 {
0965 .compatible = "renesas,sh73a0-cmt1",
0966 .data = &sh_cmt_info[SH_CMT_48BIT]
0967 },
0968 {
0969 .compatible = "renesas,rcar-gen2-cmt0",
0970 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
0971 },
0972 {
0973 .compatible = "renesas,rcar-gen2-cmt1",
0974 .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
0975 },
0976 {
0977 .compatible = "renesas,rcar-gen3-cmt0",
0978 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
0979 },
0980 {
0981 .compatible = "renesas,rcar-gen3-cmt1",
0982 .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
0983 },
0984 {
0985 .compatible = "renesas,rcar-gen4-cmt0",
0986 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
0987 },
0988 {
0989 .compatible = "renesas,rcar-gen4-cmt1",
0990 .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
0991 },
0992 { }
0993 };
0994 MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
0995
0996 static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
0997 {
0998 unsigned int mask;
0999 unsigned int i;
1000 int ret;
1001
1002 cmt->pdev = pdev;
1003 raw_spin_lock_init(&cmt->lock);
1004
1005 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
1006 cmt->info = of_device_get_match_data(&pdev->dev);
1007 cmt->hw_channels = cmt->info->channels_mask;
1008 } else if (pdev->dev.platform_data) {
1009 struct sh_timer_config *cfg = pdev->dev.platform_data;
1010 const struct platform_device_id *id = pdev->id_entry;
1011
1012 cmt->info = (const struct sh_cmt_info *)id->driver_data;
1013 cmt->hw_channels = cfg->channels_mask;
1014 } else {
1015 dev_err(&cmt->pdev->dev, "missing platform data\n");
1016 return -ENXIO;
1017 }
1018
1019
1020 cmt->clk = clk_get(&cmt->pdev->dev, "fck");
1021 if (IS_ERR(cmt->clk)) {
1022 dev_err(&cmt->pdev->dev, "cannot get clock\n");
1023 return PTR_ERR(cmt->clk);
1024 }
1025
1026 ret = clk_prepare(cmt->clk);
1027 if (ret < 0)
1028 goto err_clk_put;
1029
1030
1031 ret = clk_enable(cmt->clk);
1032 if (ret < 0)
1033 goto err_clk_unprepare;
1034
1035 if (cmt->info->width == 16)
1036 cmt->rate = clk_get_rate(cmt->clk) / 512;
1037 else
1038 cmt->rate = clk_get_rate(cmt->clk) / 8;
1039
1040
1041 ret = sh_cmt_map_memory(cmt);
1042 if (ret < 0)
1043 goto err_clk_disable;
1044
1045
1046 cmt->num_channels = hweight8(cmt->hw_channels);
1047 cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
1048 GFP_KERNEL);
1049 if (cmt->channels == NULL) {
1050 ret = -ENOMEM;
1051 goto err_unmap;
1052 }
1053
1054
1055
1056
1057
1058 for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
1059 unsigned int hwidx = ffs(mask) - 1;
1060 bool clocksource = i == 1 || cmt->num_channels == 1;
1061 bool clockevent = i == 0;
1062
1063 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1064 clockevent, clocksource, cmt);
1065 if (ret < 0)
1066 goto err_unmap;
1067
1068 mask &= ~(1 << hwidx);
1069 }
1070
1071 clk_disable(cmt->clk);
1072
1073 platform_set_drvdata(pdev, cmt);
1074
1075 return 0;
1076
1077 err_unmap:
1078 kfree(cmt->channels);
1079 iounmap(cmt->mapbase);
1080 err_clk_disable:
1081 clk_disable(cmt->clk);
1082 err_clk_unprepare:
1083 clk_unprepare(cmt->clk);
1084 err_clk_put:
1085 clk_put(cmt->clk);
1086 return ret;
1087 }
1088
1089 static int sh_cmt_probe(struct platform_device *pdev)
1090 {
1091 struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
1092 int ret;
1093
1094 if (!is_sh_early_platform_device(pdev)) {
1095 pm_runtime_set_active(&pdev->dev);
1096 pm_runtime_enable(&pdev->dev);
1097 }
1098
1099 if (cmt) {
1100 dev_info(&pdev->dev, "kept as earlytimer\n");
1101 goto out;
1102 }
1103
1104 cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
1105 if (cmt == NULL)
1106 return -ENOMEM;
1107
1108 ret = sh_cmt_setup(cmt, pdev);
1109 if (ret) {
1110 kfree(cmt);
1111 pm_runtime_idle(&pdev->dev);
1112 return ret;
1113 }
1114 if (is_sh_early_platform_device(pdev))
1115 return 0;
1116
1117 out:
1118 if (cmt->has_clockevent || cmt->has_clocksource)
1119 pm_runtime_irq_safe(&pdev->dev);
1120 else
1121 pm_runtime_idle(&pdev->dev);
1122
1123 return 0;
1124 }
1125
1126 static int sh_cmt_remove(struct platform_device *pdev)
1127 {
1128 return -EBUSY;
1129 }
1130
1131 static struct platform_driver sh_cmt_device_driver = {
1132 .probe = sh_cmt_probe,
1133 .remove = sh_cmt_remove,
1134 .driver = {
1135 .name = "sh_cmt",
1136 .of_match_table = of_match_ptr(sh_cmt_of_table),
1137 },
1138 .id_table = sh_cmt_id_table,
1139 };
1140
1141 static int __init sh_cmt_init(void)
1142 {
1143 return platform_driver_register(&sh_cmt_device_driver);
1144 }
1145
1146 static void __exit sh_cmt_exit(void)
1147 {
1148 platform_driver_unregister(&sh_cmt_device_driver);
1149 }
1150
1151 #ifdef CONFIG_SUPERH
1152 sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
1153 #endif
1154
1155 subsys_initcall(sh_cmt_init);
1156 module_exit(sh_cmt_exit);
1157
1158 MODULE_AUTHOR("Magnus Damm");
1159 MODULE_DESCRIPTION("SuperH CMT Timer Driver");
1160 MODULE_LICENSE("GPL v2");