0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/init.h>
0019 #include <linux/kernel.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/clockchips.h>
0022 #include <linux/clk.h>
0023
0024 #include <linux/io.h>
0025 #include <linux/irq.h>
0026 #include <linux/of.h>
0027 #include <linux/of_address.h>
0028 #include <linux/of_irq.h>
0029 #include <linux/sched_clock.h>
0030 #include <asm/mach/time.h>
0031
0032 #include "addr-map.h"
0033 #include "regs-timers.h"
0034 #include "regs-apbc.h"
0035 #include "irqs.h"
0036 #include <linux/soc/mmp/cputype.h>
0037
0038 #define TIMERS_VIRT_BASE TIMERS1_VIRT_BASE
0039
0040 #define MAX_DELTA (0xfffffffe)
0041 #define MIN_DELTA (16)
0042
0043 static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE;
0044
0045
0046
0047
0048 static inline uint32_t timer_read(void)
0049 {
0050 int delay = 100;
0051
0052 __raw_writel(1, mmp_timer_base + TMR_CVWR(1));
0053
0054 while (delay--)
0055 cpu_relax();
0056
0057 return __raw_readl(mmp_timer_base + TMR_CVWR(1));
0058 }
0059
0060 static u64 notrace mmp_read_sched_clock(void)
0061 {
0062 return timer_read();
0063 }
0064
0065 static irqreturn_t timer_interrupt(int irq, void *dev_id)
0066 {
0067 struct clock_event_device *c = dev_id;
0068
0069
0070
0071
0072 __raw_writel(0x01, mmp_timer_base + TMR_ICR(0));
0073
0074
0075
0076
0077 __raw_writel(0x02, mmp_timer_base + TMR_CER);
0078
0079 c->event_handler(c);
0080
0081 return IRQ_HANDLED;
0082 }
0083
0084 static int timer_set_next_event(unsigned long delta,
0085 struct clock_event_device *dev)
0086 {
0087 unsigned long flags;
0088
0089 local_irq_save(flags);
0090
0091
0092
0093
0094 __raw_writel(0x02, mmp_timer_base + TMR_CER);
0095
0096
0097
0098
0099 __raw_writel(0x01, mmp_timer_base + TMR_ICR(0));
0100 __raw_writel(0x01, mmp_timer_base + TMR_IER(0));
0101
0102
0103
0104
0105 __raw_writel(delta - 1, mmp_timer_base + TMR_TN_MM(0, 0));
0106
0107
0108
0109
0110 __raw_writel(0x03, mmp_timer_base + TMR_CER);
0111
0112 local_irq_restore(flags);
0113
0114 return 0;
0115 }
0116
0117 static int timer_set_shutdown(struct clock_event_device *evt)
0118 {
0119 unsigned long flags;
0120
0121 local_irq_save(flags);
0122
0123 __raw_writel(0x00, mmp_timer_base + TMR_IER(0));
0124 local_irq_restore(flags);
0125
0126 return 0;
0127 }
0128
0129 static struct clock_event_device ckevt = {
0130 .name = "clockevent",
0131 .features = CLOCK_EVT_FEAT_ONESHOT,
0132 .rating = 200,
0133 .set_next_event = timer_set_next_event,
0134 .set_state_shutdown = timer_set_shutdown,
0135 .set_state_oneshot = timer_set_shutdown,
0136 };
0137
0138 static u64 clksrc_read(struct clocksource *cs)
0139 {
0140 return timer_read();
0141 }
0142
0143 static struct clocksource cksrc = {
0144 .name = "clocksource",
0145 .rating = 200,
0146 .read = clksrc_read,
0147 .mask = CLOCKSOURCE_MASK(32),
0148 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
0149 };
0150
0151 static void __init timer_config(void)
0152 {
0153 uint32_t ccr = __raw_readl(mmp_timer_base + TMR_CCR);
0154
0155 __raw_writel(0x0, mmp_timer_base + TMR_CER);
0156
0157 ccr &= (cpu_is_mmp2() || cpu_is_mmp3()) ?
0158 (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) :
0159 (TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3));
0160 __raw_writel(ccr, mmp_timer_base + TMR_CCR);
0161
0162
0163 __raw_writel(0x2, mmp_timer_base + TMR_CMR);
0164
0165 __raw_writel(0x1, mmp_timer_base + TMR_PLCR(0));
0166 __raw_writel(0x7, mmp_timer_base + TMR_ICR(0));
0167 __raw_writel(0x0, mmp_timer_base + TMR_IER(0));
0168
0169 __raw_writel(0x0, mmp_timer_base + TMR_PLCR(1));
0170 __raw_writel(0x7, mmp_timer_base + TMR_ICR(1));
0171 __raw_writel(0x0, mmp_timer_base + TMR_IER(1));
0172
0173
0174 __raw_writel(0x2, mmp_timer_base + TMR_CER);
0175 }
0176
0177 void __init mmp_timer_init(int irq, unsigned long rate)
0178 {
0179 timer_config();
0180
0181 sched_clock_register(mmp_read_sched_clock, 32, rate);
0182
0183 ckevt.cpumask = cpumask_of(0);
0184
0185 if (request_irq(irq, timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
0186 "timer", &ckevt))
0187 pr_err("Failed to request irq %d (timer)\n", irq);
0188
0189 clocksource_register_hz(&cksrc, rate);
0190 clockevents_config_and_register(&ckevt, rate, MIN_DELTA, MAX_DELTA);
0191 }
0192
0193 static int __init mmp_dt_init_timer(struct device_node *np)
0194 {
0195 struct clk *clk;
0196 int irq, ret;
0197 unsigned long rate;
0198
0199 clk = of_clk_get(np, 0);
0200 if (!IS_ERR(clk)) {
0201 ret = clk_prepare_enable(clk);
0202 if (ret)
0203 return ret;
0204 rate = clk_get_rate(clk);
0205 } else if (cpu_is_pj4()) {
0206 rate = 6500000;
0207 } else {
0208 rate = 3250000;
0209 }
0210
0211 irq = irq_of_parse_and_map(np, 0);
0212 if (!irq)
0213 return -EINVAL;
0214
0215 mmp_timer_base = of_iomap(np, 0);
0216 if (!mmp_timer_base)
0217 return -ENOMEM;
0218
0219 mmp_timer_init(irq, rate);
0220 return 0;
0221 }
0222
0223 TIMER_OF_DECLARE(mmp_timer, "mrvl,mmp-timer", mmp_dt_init_timer);