0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/interrupt.h>
0011 #include <linux/irq.h>
0012 #include <linux/err.h>
0013 #include <linux/clk.h>
0014 #include <linux/clockchips.h>
0015 #include <linux/cpu.h>
0016 #include <linux/delay.h>
0017 #include <linux/percpu.h>
0018 #include <linux/of.h>
0019 #include <linux/of_irq.h>
0020 #include <linux/of_address.h>
0021 #include <linux/clocksource.h>
0022 #include <linux/sched_clock.h>
0023
0024 #define EXYNOS4_MCTREG(x) (x)
0025 #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
0026 #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
0027 #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
0028 #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
0029 #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
0030 #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
0031 #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
0032 #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
0033 #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
0034 #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
0035 #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
0036 #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
0037 #define EXYNOS4_MCT_L_MASK (0xffffff00)
0038
0039 #define MCT_L_TCNTB_OFFSET (0x00)
0040 #define MCT_L_ICNTB_OFFSET (0x08)
0041 #define MCT_L_TCON_OFFSET (0x20)
0042 #define MCT_L_INT_CSTAT_OFFSET (0x30)
0043 #define MCT_L_INT_ENB_OFFSET (0x34)
0044 #define MCT_L_WSTAT_OFFSET (0x40)
0045 #define MCT_G_TCON_START (1 << 8)
0046 #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
0047 #define MCT_G_TCON_COMP0_ENABLE (1 << 0)
0048 #define MCT_L_TCON_INTERVAL_MODE (1 << 2)
0049 #define MCT_L_TCON_INT_START (1 << 1)
0050 #define MCT_L_TCON_TIMER_START (1 << 0)
0051
0052 #define TICK_BASE_CNT 1
0053
0054 #ifdef CONFIG_ARM
0055
0056 #define MCT_CLKSOURCE_RATING 450
0057 #define MCT_CLKEVENTS_RATING 500
0058 #else
0059 #define MCT_CLKSOURCE_RATING 350
0060 #define MCT_CLKEVENTS_RATING 350
0061 #endif
0062
0063
0064 #define MCT_G0_IRQ 0
0065
0066 #define MCT_L0_IRQ 4
0067
0068 #define MCT_NR_IRQS 20
0069
0070 enum {
0071 MCT_INT_SPI,
0072 MCT_INT_PPI
0073 };
0074
0075 static void __iomem *reg_base;
0076 static unsigned long clk_rate;
0077 static unsigned int mct_int_type;
0078 static int mct_irqs[MCT_NR_IRQS];
0079
0080 struct mct_clock_event_device {
0081 struct clock_event_device evt;
0082 unsigned long base;
0083
0084
0085
0086
0087 char name[11];
0088 };
0089
0090 static void exynos4_mct_write(unsigned int value, unsigned long offset)
0091 {
0092 unsigned long stat_addr;
0093 u32 mask;
0094 u32 i;
0095
0096 writel_relaxed(value, reg_base + offset);
0097
0098 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
0099 stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
0100 switch (offset & ~EXYNOS4_MCT_L_MASK) {
0101 case MCT_L_TCON_OFFSET:
0102 mask = 1 << 3;
0103 break;
0104 case MCT_L_ICNTB_OFFSET:
0105 mask = 1 << 1;
0106 break;
0107 case MCT_L_TCNTB_OFFSET:
0108 mask = 1 << 0;
0109 break;
0110 default:
0111 return;
0112 }
0113 } else {
0114 switch (offset) {
0115 case EXYNOS4_MCT_G_TCON:
0116 stat_addr = EXYNOS4_MCT_G_WSTAT;
0117 mask = 1 << 16;
0118 break;
0119 case EXYNOS4_MCT_G_COMP0_L:
0120 stat_addr = EXYNOS4_MCT_G_WSTAT;
0121 mask = 1 << 0;
0122 break;
0123 case EXYNOS4_MCT_G_COMP0_U:
0124 stat_addr = EXYNOS4_MCT_G_WSTAT;
0125 mask = 1 << 1;
0126 break;
0127 case EXYNOS4_MCT_G_COMP0_ADD_INCR:
0128 stat_addr = EXYNOS4_MCT_G_WSTAT;
0129 mask = 1 << 2;
0130 break;
0131 case EXYNOS4_MCT_G_CNT_L:
0132 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
0133 mask = 1 << 0;
0134 break;
0135 case EXYNOS4_MCT_G_CNT_U:
0136 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
0137 mask = 1 << 1;
0138 break;
0139 default:
0140 return;
0141 }
0142 }
0143
0144
0145 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
0146 if (readl_relaxed(reg_base + stat_addr) & mask) {
0147 writel_relaxed(mask, reg_base + stat_addr);
0148 return;
0149 }
0150
0151 panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
0152 }
0153
0154
0155 static void exynos4_mct_frc_start(void)
0156 {
0157 u32 reg;
0158
0159 reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
0160 reg |= MCT_G_TCON_START;
0161 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
0162 }
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 static u64 exynos4_read_count_64(void)
0175 {
0176 unsigned int lo, hi;
0177 u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
0178
0179 do {
0180 hi = hi2;
0181 lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
0182 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
0183 } while (hi != hi2);
0184
0185 return ((u64)hi << 32) | lo;
0186 }
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 static u32 notrace exynos4_read_count_32(void)
0197 {
0198 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
0199 }
0200
0201 static u64 exynos4_frc_read(struct clocksource *cs)
0202 {
0203 return exynos4_read_count_32();
0204 }
0205
0206 static void exynos4_frc_resume(struct clocksource *cs)
0207 {
0208 exynos4_mct_frc_start();
0209 }
0210
0211 static struct clocksource mct_frc = {
0212 .name = "mct-frc",
0213 .rating = MCT_CLKSOURCE_RATING,
0214 .read = exynos4_frc_read,
0215 .mask = CLOCKSOURCE_MASK(32),
0216 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
0217 .resume = exynos4_frc_resume,
0218 };
0219
0220 static u64 notrace exynos4_read_sched_clock(void)
0221 {
0222 return exynos4_read_count_32();
0223 }
0224
0225 #if defined(CONFIG_ARM)
0226 static struct delay_timer exynos4_delay_timer;
0227
0228 static cycles_t exynos4_read_current_timer(void)
0229 {
0230 BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
0231 "cycles_t needs to move to 32-bit for ARM64 usage");
0232 return exynos4_read_count_32();
0233 }
0234 #endif
0235
0236 static int __init exynos4_clocksource_init(void)
0237 {
0238 exynos4_mct_frc_start();
0239
0240 #if defined(CONFIG_ARM)
0241 exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
0242 exynos4_delay_timer.freq = clk_rate;
0243 register_current_timer_delay(&exynos4_delay_timer);
0244 #endif
0245
0246 if (clocksource_register_hz(&mct_frc, clk_rate))
0247 panic("%s: can't register clocksource\n", mct_frc.name);
0248
0249 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
0250
0251 return 0;
0252 }
0253
0254 static void exynos4_mct_comp0_stop(void)
0255 {
0256 unsigned int tcon;
0257
0258 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
0259 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
0260
0261 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
0262 exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
0263 }
0264
0265 static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
0266 {
0267 unsigned int tcon;
0268 u64 comp_cycle;
0269
0270 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
0271
0272 if (periodic) {
0273 tcon |= MCT_G_TCON_COMP0_AUTO_INC;
0274 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
0275 }
0276
0277 comp_cycle = exynos4_read_count_64() + cycles;
0278 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
0279 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
0280
0281 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
0282
0283 tcon |= MCT_G_TCON_COMP0_ENABLE;
0284 exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
0285 }
0286
0287 static int exynos4_comp_set_next_event(unsigned long cycles,
0288 struct clock_event_device *evt)
0289 {
0290 exynos4_mct_comp0_start(false, cycles);
0291
0292 return 0;
0293 }
0294
0295 static int mct_set_state_shutdown(struct clock_event_device *evt)
0296 {
0297 exynos4_mct_comp0_stop();
0298 return 0;
0299 }
0300
0301 static int mct_set_state_periodic(struct clock_event_device *evt)
0302 {
0303 unsigned long cycles_per_jiffy;
0304
0305 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
0306 >> evt->shift);
0307 exynos4_mct_comp0_stop();
0308 exynos4_mct_comp0_start(true, cycles_per_jiffy);
0309 return 0;
0310 }
0311
0312 static struct clock_event_device mct_comp_device = {
0313 .name = "mct-comp",
0314 .features = CLOCK_EVT_FEAT_PERIODIC |
0315 CLOCK_EVT_FEAT_ONESHOT,
0316 .rating = 250,
0317 .set_next_event = exynos4_comp_set_next_event,
0318 .set_state_periodic = mct_set_state_periodic,
0319 .set_state_shutdown = mct_set_state_shutdown,
0320 .set_state_oneshot = mct_set_state_shutdown,
0321 .set_state_oneshot_stopped = mct_set_state_shutdown,
0322 .tick_resume = mct_set_state_shutdown,
0323 };
0324
0325 static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
0326 {
0327 struct clock_event_device *evt = dev_id;
0328
0329 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
0330
0331 evt->event_handler(evt);
0332
0333 return IRQ_HANDLED;
0334 }
0335
0336 static int exynos4_clockevent_init(void)
0337 {
0338 mct_comp_device.cpumask = cpumask_of(0);
0339 clockevents_config_and_register(&mct_comp_device, clk_rate,
0340 0xf, 0xffffffff);
0341 if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr,
0342 IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq",
0343 &mct_comp_device))
0344 pr_err("%s: request_irq() failed\n", "mct_comp_irq");
0345
0346 return 0;
0347 }
0348
0349 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
0350
0351
0352 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
0353 {
0354 unsigned long tmp;
0355 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
0356 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
0357
0358 tmp = readl_relaxed(reg_base + offset);
0359 if (tmp & mask) {
0360 tmp &= ~mask;
0361 exynos4_mct_write(tmp, offset);
0362 }
0363 }
0364
0365 static void exynos4_mct_tick_start(unsigned long cycles,
0366 struct mct_clock_event_device *mevt)
0367 {
0368 unsigned long tmp;
0369
0370 exynos4_mct_tick_stop(mevt);
0371
0372 tmp = (1 << 31) | cycles;
0373
0374
0375 exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
0376
0377
0378 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
0379
0380 tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
0381 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
0382 MCT_L_TCON_INTERVAL_MODE;
0383 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
0384 }
0385
0386 static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
0387 {
0388
0389 if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
0390 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
0391 }
0392
0393 static int exynos4_tick_set_next_event(unsigned long cycles,
0394 struct clock_event_device *evt)
0395 {
0396 struct mct_clock_event_device *mevt;
0397
0398 mevt = container_of(evt, struct mct_clock_event_device, evt);
0399 exynos4_mct_tick_start(cycles, mevt);
0400 return 0;
0401 }
0402
0403 static int set_state_shutdown(struct clock_event_device *evt)
0404 {
0405 struct mct_clock_event_device *mevt;
0406
0407 mevt = container_of(evt, struct mct_clock_event_device, evt);
0408 exynos4_mct_tick_stop(mevt);
0409 exynos4_mct_tick_clear(mevt);
0410 return 0;
0411 }
0412
0413 static int set_state_periodic(struct clock_event_device *evt)
0414 {
0415 struct mct_clock_event_device *mevt;
0416 unsigned long cycles_per_jiffy;
0417
0418 mevt = container_of(evt, struct mct_clock_event_device, evt);
0419 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
0420 >> evt->shift);
0421 exynos4_mct_tick_stop(mevt);
0422 exynos4_mct_tick_start(cycles_per_jiffy, mevt);
0423 return 0;
0424 }
0425
0426 static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
0427 {
0428 struct mct_clock_event_device *mevt = dev_id;
0429 struct clock_event_device *evt = &mevt->evt;
0430
0431
0432
0433
0434
0435
0436 if (!clockevent_state_periodic(&mevt->evt))
0437 exynos4_mct_tick_stop(mevt);
0438
0439 exynos4_mct_tick_clear(mevt);
0440
0441 evt->event_handler(evt);
0442
0443 return IRQ_HANDLED;
0444 }
0445
0446 static int exynos4_mct_starting_cpu(unsigned int cpu)
0447 {
0448 struct mct_clock_event_device *mevt =
0449 per_cpu_ptr(&percpu_mct_tick, cpu);
0450 struct clock_event_device *evt = &mevt->evt;
0451
0452 mevt->base = EXYNOS4_MCT_L_BASE(cpu);
0453 snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
0454
0455 evt->name = mevt->name;
0456 evt->cpumask = cpumask_of(cpu);
0457 evt->set_next_event = exynos4_tick_set_next_event;
0458 evt->set_state_periodic = set_state_periodic;
0459 evt->set_state_shutdown = set_state_shutdown;
0460 evt->set_state_oneshot = set_state_shutdown;
0461 evt->set_state_oneshot_stopped = set_state_shutdown;
0462 evt->tick_resume = set_state_shutdown;
0463 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
0464 CLOCK_EVT_FEAT_PERCPU;
0465 evt->rating = MCT_CLKEVENTS_RATING;
0466
0467 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
0468
0469 if (mct_int_type == MCT_INT_SPI) {
0470
0471 if (evt->irq == -1)
0472 return -EIO;
0473
0474 irq_force_affinity(evt->irq, cpumask_of(cpu));
0475 enable_irq(evt->irq);
0476 } else {
0477 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
0478 }
0479 clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
0480 0xf, 0x7fffffff);
0481
0482 return 0;
0483 }
0484
0485 static int exynos4_mct_dying_cpu(unsigned int cpu)
0486 {
0487 struct mct_clock_event_device *mevt =
0488 per_cpu_ptr(&percpu_mct_tick, cpu);
0489 struct clock_event_device *evt = &mevt->evt;
0490
0491 evt->set_state_shutdown(evt);
0492 if (mct_int_type == MCT_INT_SPI) {
0493 if (evt->irq != -1)
0494 disable_irq_nosync(evt->irq);
0495 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
0496 } else {
0497 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
0498 }
0499 return 0;
0500 }
0501
0502 static int __init exynos4_timer_resources(struct device_node *np)
0503 {
0504 struct clk *mct_clk, *tick_clk;
0505
0506 reg_base = of_iomap(np, 0);
0507 if (!reg_base)
0508 panic("%s: unable to ioremap mct address space\n", __func__);
0509
0510 tick_clk = of_clk_get_by_name(np, "fin_pll");
0511 if (IS_ERR(tick_clk))
0512 panic("%s: unable to determine tick clock rate\n", __func__);
0513 clk_rate = clk_get_rate(tick_clk);
0514
0515 mct_clk = of_clk_get_by_name(np, "mct");
0516 if (IS_ERR(mct_clk))
0517 panic("%s: unable to retrieve mct clock instance\n", __func__);
0518 clk_prepare_enable(mct_clk);
0519
0520 return 0;
0521 }
0522
0523 static int __init exynos4_timer_interrupts(struct device_node *np,
0524 unsigned int int_type)
0525 {
0526 int nr_irqs, i, err, cpu;
0527
0528 mct_int_type = int_type;
0529
0530
0531 mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
0532
0533
0534
0535
0536
0537
0538 nr_irqs = of_irq_count(np);
0539 if (nr_irqs > ARRAY_SIZE(mct_irqs)) {
0540 pr_err("exynos-mct: too many (%d) interrupts configured in DT\n",
0541 nr_irqs);
0542 nr_irqs = ARRAY_SIZE(mct_irqs);
0543 }
0544 for (i = MCT_L0_IRQ; i < nr_irqs; i++)
0545 mct_irqs[i] = irq_of_parse_and_map(np, i);
0546
0547 if (mct_int_type == MCT_INT_PPI) {
0548
0549 err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
0550 exynos4_mct_tick_isr, "MCT",
0551 &percpu_mct_tick);
0552 WARN(err, "MCT: can't request IRQ %d (%d)\n",
0553 mct_irqs[MCT_L0_IRQ], err);
0554 } else {
0555 for_each_possible_cpu(cpu) {
0556 int mct_irq;
0557 struct mct_clock_event_device *pcpu_mevt =
0558 per_cpu_ptr(&percpu_mct_tick, cpu);
0559
0560 pcpu_mevt->evt.irq = -1;
0561 if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
0562 break;
0563 mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
0564
0565 irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
0566 if (request_irq(mct_irq,
0567 exynos4_mct_tick_isr,
0568 IRQF_TIMER | IRQF_NOBALANCING,
0569 pcpu_mevt->name, pcpu_mevt)) {
0570 pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
0571 cpu);
0572
0573 continue;
0574 }
0575 pcpu_mevt->evt.irq = mct_irq;
0576 }
0577 }
0578
0579
0580 err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
0581 "clockevents/exynos4/mct_timer:starting",
0582 exynos4_mct_starting_cpu,
0583 exynos4_mct_dying_cpu);
0584 if (err)
0585 goto out_irq;
0586
0587 return 0;
0588
0589 out_irq:
0590 if (mct_int_type == MCT_INT_PPI) {
0591 free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
0592 } else {
0593 for_each_possible_cpu(cpu) {
0594 struct mct_clock_event_device *pcpu_mevt =
0595 per_cpu_ptr(&percpu_mct_tick, cpu);
0596
0597 if (pcpu_mevt->evt.irq != -1) {
0598 free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
0599 pcpu_mevt->evt.irq = -1;
0600 }
0601 }
0602 }
0603 return err;
0604 }
0605
0606 static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
0607 {
0608 int ret;
0609
0610 ret = exynos4_timer_resources(np);
0611 if (ret)
0612 return ret;
0613
0614 ret = exynos4_timer_interrupts(np, int_type);
0615 if (ret)
0616 return ret;
0617
0618 ret = exynos4_clocksource_init();
0619 if (ret)
0620 return ret;
0621
0622 return exynos4_clockevent_init();
0623 }
0624
0625
0626 static int __init mct_init_spi(struct device_node *np)
0627 {
0628 return mct_init_dt(np, MCT_INT_SPI);
0629 }
0630
0631 static int __init mct_init_ppi(struct device_node *np)
0632 {
0633 return mct_init_dt(np, MCT_INT_PPI);
0634 }
0635 TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
0636 TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);