0001
0002
0003
0004 #include <linux/clk.h>
0005 #include <linux/clockchips.h>
0006 #include <linux/clocksource.h>
0007 #include <linux/interrupt.h>
0008 #include <linux/of_address.h>
0009 #include <linux/of_irq.h>
0010 #include <linux/of_platform.h>
0011 #include <linux/sched_clock.h>
0012
0013 #define TIMER0_FREQ 1000000
0014 #define GXP_TIMER_CNT_OFS 0x00
0015 #define GXP_TIMESTAMP_OFS 0x08
0016 #define GXP_TIMER_CTRL_OFS 0x14
0017
0018
0019
0020 #define MASK_TCS_ENABLE 0x01
0021 #define MASK_TCS_PERIOD 0x02
0022 #define MASK_TCS_RELOAD 0x04
0023 #define MASK_TCS_TC 0x80
0024
0025 struct gxp_timer {
0026 void __iomem *counter;
0027 void __iomem *control;
0028 struct clock_event_device evt;
0029 };
0030
0031 static struct gxp_timer *gxp_timer;
0032
0033 static void __iomem *system_clock __ro_after_init;
0034
0035 static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
0036 {
0037 return container_of(evt_dev, struct gxp_timer, evt);
0038 }
0039
0040 static u64 notrace gxp_sched_read(void)
0041 {
0042 return readl_relaxed(system_clock);
0043 }
0044
0045 static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
0046 {
0047 struct gxp_timer *timer = to_gxp_timer(evt_dev);
0048
0049
0050 writeb_relaxed(MASK_TCS_TC, timer->control);
0051 writel_relaxed(event, timer->counter);
0052 writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
0053
0054 return 0;
0055 }
0056
0057 static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
0058 {
0059 struct gxp_timer *timer = (struct gxp_timer *)dev_id;
0060
0061 if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
0062 return IRQ_NONE;
0063
0064 writeb_relaxed(MASK_TCS_TC, timer->control);
0065
0066 timer->evt.event_handler(&timer->evt);
0067
0068 return IRQ_HANDLED;
0069 }
0070
0071 static int __init gxp_timer_init(struct device_node *node)
0072 {
0073 void __iomem *base;
0074 struct clk *clk;
0075 u32 freq;
0076 int ret, irq;
0077
0078 gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
0079 if (!gxp_timer) {
0080 ret = -ENOMEM;
0081 pr_err("Can't allocate gxp_timer");
0082 return ret;
0083 }
0084
0085 clk = of_clk_get(node, 0);
0086 if (IS_ERR(clk)) {
0087 ret = (int)PTR_ERR(clk);
0088 pr_err("%pOFn clock not found: %d\n", node, ret);
0089 goto err_free;
0090 }
0091
0092 ret = clk_prepare_enable(clk);
0093 if (ret) {
0094 pr_err("%pOFn clock enable failed: %d\n", node, ret);
0095 goto err_clk_enable;
0096 }
0097
0098 base = of_iomap(node, 0);
0099 if (!base) {
0100 ret = -ENXIO;
0101 pr_err("Can't map timer base registers");
0102 goto err_iomap;
0103 }
0104
0105
0106 gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
0107 gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
0108 system_clock = base + GXP_TIMESTAMP_OFS;
0109
0110 gxp_timer->evt.name = node->name;
0111 gxp_timer->evt.rating = 300;
0112 gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
0113 gxp_timer->evt.set_next_event = gxp_time_set_next_event;
0114 gxp_timer->evt.cpumask = cpumask_of(0);
0115
0116 irq = irq_of_parse_and_map(node, 0);
0117 if (irq <= 0) {
0118 ret = -EINVAL;
0119 pr_err("GXP Timer Can't parse IRQ %d", irq);
0120 goto err_exit;
0121 }
0122
0123 freq = clk_get_rate(clk);
0124
0125 ret = clocksource_mmio_init(system_clock, node->name, freq,
0126 300, 32, clocksource_mmio_readl_up);
0127 if (ret) {
0128 pr_err("%pOFn init clocksource failed: %d", node, ret);
0129 goto err_exit;
0130 }
0131
0132 sched_clock_register(gxp_sched_read, 32, freq);
0133
0134 irq = irq_of_parse_and_map(node, 0);
0135 if (irq <= 0) {
0136 ret = -EINVAL;
0137 pr_err("%pOFn Can't parse IRQ %d", node, irq);
0138 goto err_exit;
0139 }
0140
0141 clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
0142 0xf, 0xffffffff);
0143
0144 ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
0145 node->name, gxp_timer);
0146 if (ret) {
0147 pr_err("%pOFn request_irq() failed: %d", node, ret);
0148 goto err_exit;
0149 }
0150
0151 pr_debug("gxp: system timer (irq = %d)\n", irq);
0152 return 0;
0153
0154 err_exit:
0155 iounmap(base);
0156 err_iomap:
0157 clk_disable_unprepare(clk);
0158 err_clk_enable:
0159 clk_put(clk);
0160 err_free:
0161 kfree(gxp_timer);
0162 return ret;
0163 }
0164
0165
0166
0167
0168
0169
0170 static int gxp_timer_probe(struct platform_device *pdev)
0171 {
0172 struct platform_device *gxp_watchdog_device;
0173 struct device *dev = &pdev->dev;
0174
0175 if (!gxp_timer) {
0176 pr_err("Gxp Timer not initialized, cannot create watchdog");
0177 return -ENOMEM;
0178 }
0179
0180 gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
0181 if (!gxp_watchdog_device) {
0182 pr_err("Timer failed to allocate gxp-wdt");
0183 return -ENOMEM;
0184 }
0185
0186
0187 gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
0188 gxp_watchdog_device->dev.parent = dev;
0189
0190 return platform_device_add(gxp_watchdog_device);
0191 }
0192
0193 static const struct of_device_id gxp_timer_of_match[] = {
0194 { .compatible = "hpe,gxp-timer", },
0195 {},
0196 };
0197
0198 static struct platform_driver gxp_timer_driver = {
0199 .probe = gxp_timer_probe,
0200 .driver = {
0201 .name = "gxp-timer",
0202 .of_match_table = gxp_timer_of_match,
0203 .suppress_bind_attrs = true,
0204 },
0205 };
0206
0207 builtin_platform_driver(gxp_timer_driver);
0208
0209 TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);