0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/interrupt.h>
0009 #include <linux/irq.h>
0010 #include <linux/clockchips.h>
0011 #include <linux/clk.h>
0012 #include <linux/delay.h>
0013 #include <linux/err.h>
0014 #include <linux/sched_clock.h>
0015 #include <linux/slab.h>
0016 #include <linux/of.h>
0017 #include <linux/of_address.h>
0018 #include <linux/of_irq.h>
0019 #include <soc/imx/timer.h>
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #define MXC_TCTL 0x00
0031 #define MXC_TCTL_TEN (1 << 0)
0032 #define MXC_TPRER 0x04
0033
0034
0035 #define MX1_2_TCTL_CLK_PCLK1 (1 << 1)
0036 #define MX1_2_TCTL_IRQEN (1 << 4)
0037 #define MX1_2_TCTL_FRR (1 << 8)
0038 #define MX1_2_TCMP 0x08
0039 #define MX1_2_TCN 0x10
0040 #define MX1_2_TSTAT 0x14
0041
0042
0043 #define MX2_TSTAT_CAPT (1 << 1)
0044 #define MX2_TSTAT_COMP (1 << 0)
0045
0046
0047 #define V2_TCTL_WAITEN (1 << 3)
0048 #define V2_TCTL_CLK_IPG (1 << 6)
0049 #define V2_TCTL_CLK_PER (2 << 6)
0050 #define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
0051 #define V2_TCTL_FRR (1 << 9)
0052 #define V2_TCTL_24MEN (1 << 10)
0053 #define V2_TPRER_PRE24M 12
0054 #define V2_IR 0x0c
0055 #define V2_TSTAT 0x08
0056 #define V2_TSTAT_OF1 (1 << 0)
0057 #define V2_TCN 0x24
0058 #define V2_TCMP 0x10
0059
0060 #define V2_TIMER_RATE_OSC_DIV8 3000000
0061
0062 struct imx_timer {
0063 enum imx_gpt_type type;
0064 void __iomem *base;
0065 int irq;
0066 struct clk *clk_per;
0067 struct clk *clk_ipg;
0068 const struct imx_gpt_data *gpt;
0069 struct clock_event_device ced;
0070 };
0071
0072 struct imx_gpt_data {
0073 int reg_tstat;
0074 int reg_tcn;
0075 int reg_tcmp;
0076 void (*gpt_setup_tctl)(struct imx_timer *imxtm);
0077 void (*gpt_irq_enable)(struct imx_timer *imxtm);
0078 void (*gpt_irq_disable)(struct imx_timer *imxtm);
0079 void (*gpt_irq_acknowledge)(struct imx_timer *imxtm);
0080 int (*set_next_event)(unsigned long evt,
0081 struct clock_event_device *ced);
0082 };
0083
0084 static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced)
0085 {
0086 return container_of(ced, struct imx_timer, ced);
0087 }
0088
0089 static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
0090 {
0091 unsigned int tmp;
0092
0093 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
0094 writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
0095 }
0096 #define imx21_gpt_irq_disable imx1_gpt_irq_disable
0097
0098 static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
0099 {
0100 writel_relaxed(0, imxtm->base + V2_IR);
0101 }
0102 #define imx6dl_gpt_irq_disable imx31_gpt_irq_disable
0103
0104 static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
0105 {
0106 unsigned int tmp;
0107
0108 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
0109 writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
0110 }
0111 #define imx21_gpt_irq_enable imx1_gpt_irq_enable
0112
0113 static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
0114 {
0115 writel_relaxed(1<<0, imxtm->base + V2_IR);
0116 }
0117 #define imx6dl_gpt_irq_enable imx31_gpt_irq_enable
0118
0119 static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
0120 {
0121 writel_relaxed(0, imxtm->base + MX1_2_TSTAT);
0122 }
0123
0124 static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm)
0125 {
0126 writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
0127 imxtm->base + MX1_2_TSTAT);
0128 }
0129
0130 static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
0131 {
0132 writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
0133 }
0134 #define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge
0135
0136 static void __iomem *sched_clock_reg;
0137
0138 static u64 notrace mxc_read_sched_clock(void)
0139 {
0140 return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
0141 }
0142
0143 #if defined(CONFIG_ARM)
0144 static struct delay_timer imx_delay_timer;
0145
0146 static unsigned long imx_read_current_timer(void)
0147 {
0148 return readl_relaxed(sched_clock_reg);
0149 }
0150 #endif
0151
0152 static int __init mxc_clocksource_init(struct imx_timer *imxtm)
0153 {
0154 unsigned int c = clk_get_rate(imxtm->clk_per);
0155 void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
0156
0157 #if defined(CONFIG_ARM)
0158 imx_delay_timer.read_current_timer = &imx_read_current_timer;
0159 imx_delay_timer.freq = c;
0160 register_current_timer_delay(&imx_delay_timer);
0161 #endif
0162
0163 sched_clock_reg = reg;
0164
0165 sched_clock_register(mxc_read_sched_clock, 32, c);
0166 return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
0167 clocksource_mmio_readl_up);
0168 }
0169
0170
0171
0172 static int mx1_2_set_next_event(unsigned long evt,
0173 struct clock_event_device *ced)
0174 {
0175 struct imx_timer *imxtm = to_imx_timer(ced);
0176 unsigned long tcmp;
0177
0178 tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt;
0179
0180 writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP);
0181
0182 return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ?
0183 -ETIME : 0;
0184 }
0185
0186 static int v2_set_next_event(unsigned long evt,
0187 struct clock_event_device *ced)
0188 {
0189 struct imx_timer *imxtm = to_imx_timer(ced);
0190 unsigned long tcmp;
0191
0192 tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt;
0193
0194 writel_relaxed(tcmp, imxtm->base + V2_TCMP);
0195
0196 return evt < 0x7fffffff &&
0197 (int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ?
0198 -ETIME : 0;
0199 }
0200
0201 static int mxc_shutdown(struct clock_event_device *ced)
0202 {
0203 struct imx_timer *imxtm = to_imx_timer(ced);
0204 u32 tcn;
0205
0206
0207 imxtm->gpt->gpt_irq_disable(imxtm);
0208
0209 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
0210
0211 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
0212
0213
0214 imxtm->gpt->gpt_irq_acknowledge(imxtm);
0215
0216 #ifdef DEBUG
0217 printk(KERN_INFO "%s: changing mode\n", __func__);
0218 #endif
0219
0220 return 0;
0221 }
0222
0223 static int mxc_set_oneshot(struct clock_event_device *ced)
0224 {
0225 struct imx_timer *imxtm = to_imx_timer(ced);
0226
0227
0228 imxtm->gpt->gpt_irq_disable(imxtm);
0229
0230 if (!clockevent_state_oneshot(ced)) {
0231 u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
0232
0233 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
0234
0235
0236 imxtm->gpt->gpt_irq_acknowledge(imxtm);
0237 }
0238
0239 #ifdef DEBUG
0240 printk(KERN_INFO "%s: changing mode\n", __func__);
0241 #endif
0242
0243
0244
0245
0246
0247
0248
0249 imxtm->gpt->gpt_irq_enable(imxtm);
0250
0251 return 0;
0252 }
0253
0254
0255
0256
0257 static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
0258 {
0259 struct clock_event_device *ced = dev_id;
0260 struct imx_timer *imxtm = to_imx_timer(ced);
0261 uint32_t tstat;
0262
0263 tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
0264
0265 imxtm->gpt->gpt_irq_acknowledge(imxtm);
0266
0267 ced->event_handler(ced);
0268
0269 return IRQ_HANDLED;
0270 }
0271
0272 static int __init mxc_clockevent_init(struct imx_timer *imxtm)
0273 {
0274 struct clock_event_device *ced = &imxtm->ced;
0275
0276 ced->name = "mxc_timer1";
0277 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
0278 ced->set_state_shutdown = mxc_shutdown;
0279 ced->set_state_oneshot = mxc_set_oneshot;
0280 ced->tick_resume = mxc_shutdown;
0281 ced->set_next_event = imxtm->gpt->set_next_event;
0282 ced->rating = 200;
0283 ced->cpumask = cpumask_of(0);
0284 ced->irq = imxtm->irq;
0285 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
0286 0xff, 0xfffffffe);
0287
0288 return request_irq(imxtm->irq, mxc_timer_interrupt,
0289 IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced);
0290 }
0291
0292 static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
0293 {
0294 u32 tctl_val;
0295
0296 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
0297 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
0298 }
0299 #define imx21_gpt_setup_tctl imx1_gpt_setup_tctl
0300
0301 static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
0302 {
0303 u32 tctl_val;
0304
0305 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
0306 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8)
0307 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
0308 else
0309 tctl_val |= V2_TCTL_CLK_PER;
0310
0311 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
0312 }
0313
0314 static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm)
0315 {
0316 u32 tctl_val;
0317
0318 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
0319 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) {
0320 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
0321
0322 writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER);
0323 tctl_val |= V2_TCTL_24MEN;
0324 } else {
0325 tctl_val |= V2_TCTL_CLK_PER;
0326 }
0327
0328 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
0329 }
0330
0331 static const struct imx_gpt_data imx1_gpt_data = {
0332 .reg_tstat = MX1_2_TSTAT,
0333 .reg_tcn = MX1_2_TCN,
0334 .reg_tcmp = MX1_2_TCMP,
0335 .gpt_irq_enable = imx1_gpt_irq_enable,
0336 .gpt_irq_disable = imx1_gpt_irq_disable,
0337 .gpt_irq_acknowledge = imx1_gpt_irq_acknowledge,
0338 .gpt_setup_tctl = imx1_gpt_setup_tctl,
0339 .set_next_event = mx1_2_set_next_event,
0340 };
0341
0342 static const struct imx_gpt_data imx21_gpt_data = {
0343 .reg_tstat = MX1_2_TSTAT,
0344 .reg_tcn = MX1_2_TCN,
0345 .reg_tcmp = MX1_2_TCMP,
0346 .gpt_irq_enable = imx21_gpt_irq_enable,
0347 .gpt_irq_disable = imx21_gpt_irq_disable,
0348 .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
0349 .gpt_setup_tctl = imx21_gpt_setup_tctl,
0350 .set_next_event = mx1_2_set_next_event,
0351 };
0352
0353 static const struct imx_gpt_data imx31_gpt_data = {
0354 .reg_tstat = V2_TSTAT,
0355 .reg_tcn = V2_TCN,
0356 .reg_tcmp = V2_TCMP,
0357 .gpt_irq_enable = imx31_gpt_irq_enable,
0358 .gpt_irq_disable = imx31_gpt_irq_disable,
0359 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
0360 .gpt_setup_tctl = imx31_gpt_setup_tctl,
0361 .set_next_event = v2_set_next_event,
0362 };
0363
0364 static const struct imx_gpt_data imx6dl_gpt_data = {
0365 .reg_tstat = V2_TSTAT,
0366 .reg_tcn = V2_TCN,
0367 .reg_tcmp = V2_TCMP,
0368 .gpt_irq_enable = imx6dl_gpt_irq_enable,
0369 .gpt_irq_disable = imx6dl_gpt_irq_disable,
0370 .gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge,
0371 .gpt_setup_tctl = imx6dl_gpt_setup_tctl,
0372 .set_next_event = v2_set_next_event,
0373 };
0374
0375 static int __init _mxc_timer_init(struct imx_timer *imxtm)
0376 {
0377 int ret;
0378
0379 switch (imxtm->type) {
0380 case GPT_TYPE_IMX1:
0381 imxtm->gpt = &imx1_gpt_data;
0382 break;
0383 case GPT_TYPE_IMX21:
0384 imxtm->gpt = &imx21_gpt_data;
0385 break;
0386 case GPT_TYPE_IMX31:
0387 imxtm->gpt = &imx31_gpt_data;
0388 break;
0389 case GPT_TYPE_IMX6DL:
0390 imxtm->gpt = &imx6dl_gpt_data;
0391 break;
0392 default:
0393 return -EINVAL;
0394 }
0395
0396 if (IS_ERR(imxtm->clk_per)) {
0397 pr_err("i.MX timer: unable to get clk\n");
0398 return PTR_ERR(imxtm->clk_per);
0399 }
0400
0401 if (!IS_ERR(imxtm->clk_ipg))
0402 clk_prepare_enable(imxtm->clk_ipg);
0403
0404 clk_prepare_enable(imxtm->clk_per);
0405
0406
0407
0408
0409
0410 writel_relaxed(0, imxtm->base + MXC_TCTL);
0411 writel_relaxed(0, imxtm->base + MXC_TPRER);
0412
0413 imxtm->gpt->gpt_setup_tctl(imxtm);
0414
0415
0416 ret = mxc_clocksource_init(imxtm);
0417 if (ret)
0418 return ret;
0419
0420 return mxc_clockevent_init(imxtm);
0421 }
0422
0423 void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
0424 {
0425 struct imx_timer *imxtm;
0426
0427 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
0428 BUG_ON(!imxtm);
0429
0430 imxtm->clk_per = clk_get_sys("imx-gpt.0", "per");
0431 imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg");
0432
0433 imxtm->base = ioremap(pbase, SZ_4K);
0434 BUG_ON(!imxtm->base);
0435
0436 imxtm->type = type;
0437 imxtm->irq = irq;
0438
0439 _mxc_timer_init(imxtm);
0440 }
0441
0442 static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
0443 {
0444 struct imx_timer *imxtm;
0445 static int initialized;
0446 int ret;
0447
0448
0449 if (initialized)
0450 return 0;
0451
0452 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
0453 if (!imxtm)
0454 return -ENOMEM;
0455
0456 imxtm->base = of_iomap(np, 0);
0457 if (!imxtm->base)
0458 return -ENXIO;
0459
0460 imxtm->irq = irq_of_parse_and_map(np, 0);
0461 if (imxtm->irq <= 0)
0462 return -EINVAL;
0463
0464 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
0465
0466
0467 imxtm->clk_per = of_clk_get_by_name(np, "osc_per");
0468 if (IS_ERR(imxtm->clk_per))
0469 imxtm->clk_per = of_clk_get_by_name(np, "per");
0470
0471 imxtm->type = type;
0472
0473 ret = _mxc_timer_init(imxtm);
0474 if (ret)
0475 return ret;
0476
0477 initialized = 1;
0478
0479 return 0;
0480 }
0481
0482 static int __init imx1_timer_init_dt(struct device_node *np)
0483 {
0484 return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
0485 }
0486
0487 static int __init imx21_timer_init_dt(struct device_node *np)
0488 {
0489 return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
0490 }
0491
0492 static int __init imx31_timer_init_dt(struct device_node *np)
0493 {
0494 enum imx_gpt_type type = GPT_TYPE_IMX31;
0495
0496
0497
0498
0499
0500
0501
0502 if (of_machine_is_compatible("fsl,imx6dl"))
0503 type = GPT_TYPE_IMX6DL;
0504
0505 return mxc_timer_init_dt(np, type);
0506 }
0507
0508 static int __init imx6dl_timer_init_dt(struct device_node *np)
0509 {
0510 return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
0511 }
0512
0513 TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
0514 TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
0515 TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
0516 TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
0517 TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
0518 TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
0519 TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
0520 TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
0521 TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
0522 TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
0523 TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
0524 TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);