0001
0002
0003
0004 #include <linux/init.h>
0005 #include <linux/interrupt.h>
0006 #include <linux/sched_clock.h>
0007 #include <linux/cpu.h>
0008 #include <linux/of_irq.h>
0009 #include <asm/reg_ops.h>
0010
0011 #include "timer-of.h"
0012
0013 #define PTIM_CCVR "cr<3, 14>"
0014 #define PTIM_CTLR "cr<0, 14>"
0015 #define PTIM_LVR "cr<6, 14>"
0016 #define PTIM_TSR "cr<1, 14>"
0017
0018 static int csky_mptimer_irq;
0019
0020 static int csky_mptimer_set_next_event(unsigned long delta,
0021 struct clock_event_device *ce)
0022 {
0023 mtcr(PTIM_LVR, delta);
0024
0025 return 0;
0026 }
0027
0028 static int csky_mptimer_shutdown(struct clock_event_device *ce)
0029 {
0030 mtcr(PTIM_CTLR, 0);
0031
0032 return 0;
0033 }
0034
0035 static int csky_mptimer_oneshot(struct clock_event_device *ce)
0036 {
0037 mtcr(PTIM_CTLR, 1);
0038
0039 return 0;
0040 }
0041
0042 static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce)
0043 {
0044 mtcr(PTIM_CTLR, 0);
0045
0046 return 0;
0047 }
0048
0049 static DEFINE_PER_CPU(struct timer_of, csky_to) = {
0050 .flags = TIMER_OF_CLOCK,
0051 .clkevt = {
0052 .rating = 300,
0053 .features = CLOCK_EVT_FEAT_PERCPU |
0054 CLOCK_EVT_FEAT_ONESHOT,
0055 .set_state_shutdown = csky_mptimer_shutdown,
0056 .set_state_oneshot = csky_mptimer_oneshot,
0057 .set_state_oneshot_stopped = csky_mptimer_oneshot_stopped,
0058 .set_next_event = csky_mptimer_set_next_event,
0059 },
0060 };
0061
0062 static irqreturn_t csky_timer_interrupt(int irq, void *dev)
0063 {
0064 struct timer_of *to = this_cpu_ptr(&csky_to);
0065
0066 mtcr(PTIM_TSR, 0);
0067
0068 to->clkevt.event_handler(&to->clkevt);
0069
0070 return IRQ_HANDLED;
0071 }
0072
0073
0074
0075
0076 static int csky_mptimer_starting_cpu(unsigned int cpu)
0077 {
0078 struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
0079
0080 to->clkevt.cpumask = cpumask_of(cpu);
0081
0082 enable_percpu_irq(csky_mptimer_irq, 0);
0083
0084 clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
0085 2, ULONG_MAX);
0086
0087 return 0;
0088 }
0089
0090 static int csky_mptimer_dying_cpu(unsigned int cpu)
0091 {
0092 disable_percpu_irq(csky_mptimer_irq);
0093
0094 return 0;
0095 }
0096
0097
0098
0099
0100 static u64 notrace sched_clock_read(void)
0101 {
0102 return (u64)mfcr(PTIM_CCVR);
0103 }
0104
0105 static u64 clksrc_read(struct clocksource *c)
0106 {
0107 return (u64)mfcr(PTIM_CCVR);
0108 }
0109
0110 struct clocksource csky_clocksource = {
0111 .name = "csky",
0112 .rating = 400,
0113 .mask = CLOCKSOURCE_MASK(32),
0114 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
0115 .read = clksrc_read,
0116 };
0117
0118 static int __init csky_mptimer_init(struct device_node *np)
0119 {
0120 int ret, cpu, cpu_rollback;
0121 struct timer_of *to = NULL;
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 csky_mptimer_irq = irq_of_parse_and_map(np, 0);
0136 if (csky_mptimer_irq <= 0)
0137 return -EINVAL;
0138
0139 ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
0140 "csky_mp_timer", &csky_to);
0141 if (ret)
0142 return -EINVAL;
0143
0144 for_each_possible_cpu(cpu) {
0145 to = per_cpu_ptr(&csky_to, cpu);
0146 ret = timer_of_init(np, to);
0147 if (ret)
0148 goto rollback;
0149 }
0150
0151 clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
0152 sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
0153
0154 ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
0155 "clockevents/csky/timer:starting",
0156 csky_mptimer_starting_cpu,
0157 csky_mptimer_dying_cpu);
0158 if (ret)
0159 return -EINVAL;
0160
0161 return 0;
0162
0163 rollback:
0164 for_each_possible_cpu(cpu_rollback) {
0165 if (cpu_rollback == cpu)
0166 break;
0167
0168 to = per_cpu_ptr(&csky_to, cpu_rollback);
0169 timer_of_cleanup(to);
0170 }
0171 return -EINVAL;
0172 }
0173 TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init);