0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clk.h>
0009 #include <linux/clk-provider.h>
0010 #include <linux/err.h>
0011 #include <linux/if_vlan.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/module.h>
0014 #include <linux/netdevice.h>
0015 #include <linux/net_tstamp.h>
0016 #include <linux/of.h>
0017 #include <linux/of_irq.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/ptp_classify.h>
0021 #include <linux/ptp_clock_kernel.h>
0022
0023 #include "am65-cpts.h"
0024
0025 struct am65_genf_regs {
0026 u32 comp_lo;
0027 u32 comp_hi;
0028 u32 control;
0029 u32 length;
0030 u32 ppm_low;
0031 u32 ppm_hi;
0032 u32 ts_nudge;
0033 } __aligned(32) __packed;
0034
0035 #define AM65_CPTS_GENF_MAX_NUM 9
0036 #define AM65_CPTS_ESTF_MAX_NUM 8
0037
0038 struct am65_cpts_regs {
0039 u32 idver;
0040 u32 control;
0041 u32 rftclk_sel;
0042 u32 ts_push;
0043 u32 ts_load_val_lo;
0044 u32 ts_load_en;
0045 u32 ts_comp_lo;
0046 u32 ts_comp_length;
0047 u32 intstat_raw;
0048 u32 intstat_masked;
0049 u32 int_enable;
0050 u32 ts_comp_nudge;
0051 u32 event_pop;
0052 u32 event_0;
0053 u32 event_1;
0054 u32 event_2;
0055 u32 event_3;
0056 u32 ts_load_val_hi;
0057 u32 ts_comp_hi;
0058 u32 ts_add_val;
0059 u32 ts_ppm_low;
0060 u32 ts_ppm_hi;
0061 u32 ts_nudge;
0062 u32 reserv[33];
0063 struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
0064 struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
0065 };
0066
0067
0068 #define AM65_CPTS_CONTROL_EN BIT(0)
0069 #define AM65_CPTS_CONTROL_INT_TEST BIT(1)
0070 #define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
0071 #define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
0072 #define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
0073 #define AM65_CPTS_CONTROL_64MODE BIT(5)
0074 #define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
0075 #define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
0076 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
0077 #define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
0078 #define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
0079 #define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
0080 #define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
0081 #define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
0082 #define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
0083 #define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
0084 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
0085
0086 #define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
0087
0088 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
0089 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
0090
0091
0092 #define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
0093
0094
0095 #define AM65_CPTS_TS_PUSH BIT(0)
0096
0097
0098 #define AM65_CPTS_TS_LOAD_EN BIT(0)
0099
0100
0101 #define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
0102
0103
0104 #define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
0105
0106
0107 #define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
0108
0109
0110 #define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
0111
0112
0113 #define AM65_CPTS_EVENT_POP BIT(0)
0114
0115
0116 #define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
0117
0118 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
0119 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
0120
0121 #define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
0122 #define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
0123
0124 #define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
0125 #define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
0126
0127
0128 #define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
0129 #define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
0130
0131 enum {
0132 AM65_CPTS_EV_PUSH,
0133 AM65_CPTS_EV_ROLL,
0134 AM65_CPTS_EV_HALF,
0135 AM65_CPTS_EV_HW,
0136 AM65_CPTS_EV_RX,
0137 AM65_CPTS_EV_TX,
0138 AM65_CPTS_EV_TS_COMP,
0139 AM65_CPTS_EV_HOST,
0140 };
0141
0142 struct am65_cpts_event {
0143 struct list_head list;
0144 unsigned long tmo;
0145 u32 event1;
0146 u32 event2;
0147 u64 timestamp;
0148 };
0149
0150 #define AM65_CPTS_FIFO_DEPTH (16)
0151 #define AM65_CPTS_MAX_EVENTS (32)
0152 #define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20)
0153 #define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1
0154 #define AM65_CPTS_MIN_PPM 0x400
0155
0156 struct am65_cpts {
0157 struct device *dev;
0158 struct am65_cpts_regs __iomem *reg;
0159 struct ptp_clock_info ptp_info;
0160 struct ptp_clock *ptp_clock;
0161 int phc_index;
0162 struct clk_hw *clk_mux_hw;
0163 struct device_node *clk_mux_np;
0164 struct clk *refclk;
0165 u32 refclk_freq;
0166 struct list_head events;
0167 struct list_head pool;
0168 struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
0169 spinlock_t lock;
0170 u32 ext_ts_inputs;
0171 u32 genf_num;
0172 u32 ts_add_val;
0173 int irq;
0174 struct mutex ptp_clk_lock;
0175 u64 timestamp;
0176 u32 genf_enable;
0177 u32 hw_ts_enable;
0178 struct sk_buff_head txq;
0179 };
0180
0181 struct am65_cpts_skb_cb_data {
0182 unsigned long tmo;
0183 u32 skb_mtype_seqid;
0184 };
0185
0186 #define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
0187 #define am65_cpts_read32(c, r) readl(&(c)->reg->r)
0188
0189 static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
0190 {
0191 u32 val;
0192
0193 val = upper_32_bits(start_tstamp);
0194 am65_cpts_write32(cpts, val, ts_load_val_hi);
0195 val = lower_32_bits(start_tstamp);
0196 am65_cpts_write32(cpts, val, ts_load_val_lo);
0197
0198 am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
0199 }
0200
0201 static void am65_cpts_set_add_val(struct am65_cpts *cpts)
0202 {
0203
0204 cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
0205
0206 am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
0207 }
0208
0209 static void am65_cpts_disable(struct am65_cpts *cpts)
0210 {
0211 am65_cpts_write32(cpts, 0, control);
0212 am65_cpts_write32(cpts, 0, int_enable);
0213 }
0214
0215 static int am65_cpts_event_get_port(struct am65_cpts_event *event)
0216 {
0217 return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
0218 AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
0219 }
0220
0221 static int am65_cpts_event_get_type(struct am65_cpts_event *event)
0222 {
0223 return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
0224 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
0225 }
0226
0227 static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
0228 {
0229 struct list_head *this, *next;
0230 struct am65_cpts_event *event;
0231 int removed = 0;
0232
0233 list_for_each_safe(this, next, &cpts->events) {
0234 event = list_entry(this, struct am65_cpts_event, list);
0235 if (time_after(jiffies, event->tmo)) {
0236 list_del_init(&event->list);
0237 list_add(&event->list, &cpts->pool);
0238 ++removed;
0239 }
0240 }
0241
0242 if (removed)
0243 dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
0244 return removed ? 0 : -1;
0245 }
0246
0247 static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
0248 struct am65_cpts_event *event)
0249 {
0250 u32 r = am65_cpts_read32(cpts, intstat_raw);
0251
0252 if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
0253 event->timestamp = am65_cpts_read32(cpts, event_0);
0254 event->event1 = am65_cpts_read32(cpts, event_1);
0255 event->event2 = am65_cpts_read32(cpts, event_2);
0256 event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
0257 am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
0258 return false;
0259 }
0260 return true;
0261 }
0262
0263 static int am65_cpts_fifo_read(struct am65_cpts *cpts)
0264 {
0265 struct ptp_clock_event pevent;
0266 struct am65_cpts_event *event;
0267 bool schedule = false;
0268 int i, type, ret = 0;
0269 unsigned long flags;
0270
0271 spin_lock_irqsave(&cpts->lock, flags);
0272 for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
0273 event = list_first_entry_or_null(&cpts->pool,
0274 struct am65_cpts_event, list);
0275
0276 if (!event) {
0277 if (am65_cpts_cpts_purge_events(cpts)) {
0278 dev_err(cpts->dev, "cpts: event pool empty\n");
0279 ret = -1;
0280 goto out;
0281 }
0282 continue;
0283 }
0284
0285 if (am65_cpts_fifo_pop_event(cpts, event))
0286 break;
0287
0288 type = am65_cpts_event_get_type(event);
0289 switch (type) {
0290 case AM65_CPTS_EV_PUSH:
0291 cpts->timestamp = event->timestamp;
0292 dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
0293 cpts->timestamp);
0294 break;
0295 case AM65_CPTS_EV_RX:
0296 case AM65_CPTS_EV_TX:
0297 event->tmo = jiffies +
0298 msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
0299
0300 list_del_init(&event->list);
0301 list_add_tail(&event->list, &cpts->events);
0302
0303 dev_dbg(cpts->dev,
0304 "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
0305 event->event1, event->event2,
0306 event->timestamp);
0307 schedule = true;
0308 break;
0309 case AM65_CPTS_EV_HW:
0310 pevent.index = am65_cpts_event_get_port(event) - 1;
0311 pevent.timestamp = event->timestamp;
0312 pevent.type = PTP_CLOCK_EXTTS;
0313 dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
0314 pevent.index, event->timestamp);
0315
0316 ptp_clock_event(cpts->ptp_clock, &pevent);
0317 break;
0318 case AM65_CPTS_EV_HOST:
0319 break;
0320 case AM65_CPTS_EV_ROLL:
0321 case AM65_CPTS_EV_HALF:
0322 case AM65_CPTS_EV_TS_COMP:
0323 dev_dbg(cpts->dev,
0324 "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
0325 type,
0326 event->event1, event->event2,
0327 event->timestamp);
0328 break;
0329 default:
0330 dev_err(cpts->dev, "cpts: unknown event type\n");
0331 ret = -1;
0332 goto out;
0333 }
0334 }
0335
0336 out:
0337 spin_unlock_irqrestore(&cpts->lock, flags);
0338
0339 if (schedule)
0340 ptp_schedule_worker(cpts->ptp_clock, 0);
0341
0342 return ret;
0343 }
0344
0345 static u64 am65_cpts_gettime(struct am65_cpts *cpts,
0346 struct ptp_system_timestamp *sts)
0347 {
0348 unsigned long flags;
0349 u64 val = 0;
0350
0351
0352
0353
0354 am65_cpts_write32(cpts, 0, int_enable);
0355
0356
0357 spin_lock_irqsave(&cpts->lock, flags);
0358 ptp_read_system_prets(sts);
0359 am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
0360 am65_cpts_read32(cpts, ts_push);
0361 ptp_read_system_postts(sts);
0362 spin_unlock_irqrestore(&cpts->lock, flags);
0363
0364 am65_cpts_fifo_read(cpts);
0365
0366 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
0367
0368 val = cpts->timestamp;
0369
0370 return val;
0371 }
0372
0373 static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
0374 {
0375 struct am65_cpts *cpts = dev_id;
0376
0377 if (am65_cpts_fifo_read(cpts))
0378 dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
0379
0380 return IRQ_HANDLED;
0381 }
0382
0383
0384 static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
0385 {
0386 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
0387 int neg_adj = 0;
0388 u64 adj_period;
0389 u32 val;
0390
0391 if (ppb < 0) {
0392 neg_adj = 1;
0393 ppb = -ppb;
0394 }
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 adj_period = div_u64(cpts->refclk_freq, ppb);
0406
0407 mutex_lock(&cpts->ptp_clk_lock);
0408
0409 val = am65_cpts_read32(cpts, control);
0410 if (neg_adj)
0411 val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
0412 else
0413 val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
0414 am65_cpts_write32(cpts, val, control);
0415
0416 val = upper_32_bits(adj_period) & 0x3FF;
0417 am65_cpts_write32(cpts, val, ts_ppm_hi);
0418 val = lower_32_bits(adj_period);
0419 am65_cpts_write32(cpts, val, ts_ppm_low);
0420
0421 mutex_unlock(&cpts->ptp_clk_lock);
0422
0423 return 0;
0424 }
0425
0426 static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
0427 {
0428 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
0429 s64 ns;
0430
0431 mutex_lock(&cpts->ptp_clk_lock);
0432 ns = am65_cpts_gettime(cpts, NULL);
0433 ns += delta;
0434 am65_cpts_settime(cpts, ns);
0435 mutex_unlock(&cpts->ptp_clk_lock);
0436
0437 return 0;
0438 }
0439
0440 static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
0441 struct timespec64 *ts,
0442 struct ptp_system_timestamp *sts)
0443 {
0444 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
0445 u64 ns;
0446
0447 mutex_lock(&cpts->ptp_clk_lock);
0448 ns = am65_cpts_gettime(cpts, sts);
0449 mutex_unlock(&cpts->ptp_clk_lock);
0450 *ts = ns_to_timespec64(ns);
0451
0452 return 0;
0453 }
0454
0455 u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
0456 {
0457 u64 ns;
0458
0459
0460 mutex_lock(&cpts->ptp_clk_lock);
0461 ns = am65_cpts_gettime(cpts, NULL);
0462 mutex_unlock(&cpts->ptp_clk_lock);
0463
0464 return ns;
0465 }
0466 EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
0467
0468 static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
0469 const struct timespec64 *ts)
0470 {
0471 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
0472 u64 ns;
0473
0474 ns = timespec64_to_ns(ts);
0475 mutex_lock(&cpts->ptp_clk_lock);
0476 am65_cpts_settime(cpts, ns);
0477 mutex_unlock(&cpts->ptp_clk_lock);
0478
0479 return 0;
0480 }
0481
0482 static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
0483 {
0484 u32 v;
0485
0486 v = am65_cpts_read32(cpts, control);
0487 if (on) {
0488 v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
0489 cpts->hw_ts_enable |= BIT(index);
0490 } else {
0491 v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
0492 cpts->hw_ts_enable &= ~BIT(index);
0493 }
0494 am65_cpts_write32(cpts, v, control);
0495 }
0496
0497 static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
0498 {
0499 if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
0500 return 0;
0501
0502 mutex_lock(&cpts->ptp_clk_lock);
0503 am65_cpts_extts_enable_hw(cpts, index, on);
0504 mutex_unlock(&cpts->ptp_clk_lock);
0505
0506 dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
0507 __func__, index, on ? "enabled" : "disabled");
0508
0509 return 0;
0510 }
0511
0512 int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
0513 struct am65_cpts_estf_cfg *cfg)
0514 {
0515 u64 cycles;
0516 u32 val;
0517
0518 cycles = cfg->ns_period * cpts->refclk_freq;
0519 cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
0520 if (cycles > U32_MAX)
0521 return -EINVAL;
0522
0523
0524 am65_cpts_write32(cpts, 0, estf[idx].length);
0525
0526 val = upper_32_bits(cfg->ns_start);
0527 am65_cpts_write32(cpts, val, estf[idx].comp_hi);
0528 val = lower_32_bits(cfg->ns_start);
0529 am65_cpts_write32(cpts, val, estf[idx].comp_lo);
0530 val = lower_32_bits(cycles);
0531 am65_cpts_write32(cpts, val, estf[idx].length);
0532
0533 dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
0534
0535 return 0;
0536 }
0537 EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
0538
0539 void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
0540 {
0541 am65_cpts_write32(cpts, 0, estf[idx].length);
0542
0543 dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
0544 }
0545 EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
0546
0547 static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
0548 struct ptp_perout_request *req, int on)
0549 {
0550 u64 ns_period, ns_start, cycles;
0551 struct timespec64 ts;
0552 u32 val;
0553
0554 if (on) {
0555 ts.tv_sec = req->period.sec;
0556 ts.tv_nsec = req->period.nsec;
0557 ns_period = timespec64_to_ns(&ts);
0558
0559 cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
0560
0561 ts.tv_sec = req->start.sec;
0562 ts.tv_nsec = req->start.nsec;
0563 ns_start = timespec64_to_ns(&ts);
0564
0565 val = upper_32_bits(ns_start);
0566 am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
0567 val = lower_32_bits(ns_start);
0568 am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
0569 val = lower_32_bits(cycles);
0570 am65_cpts_write32(cpts, val, genf[req->index].length);
0571
0572 cpts->genf_enable |= BIT(req->index);
0573 } else {
0574 am65_cpts_write32(cpts, 0, genf[req->index].length);
0575
0576 cpts->genf_enable &= ~BIT(req->index);
0577 }
0578 }
0579
0580 static int am65_cpts_perout_enable(struct am65_cpts *cpts,
0581 struct ptp_perout_request *req, int on)
0582 {
0583 if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
0584 return 0;
0585
0586 mutex_lock(&cpts->ptp_clk_lock);
0587 am65_cpts_perout_enable_hw(cpts, req, on);
0588 mutex_unlock(&cpts->ptp_clk_lock);
0589
0590 dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
0591 __func__, req->index, on ? "enabled" : "disabled");
0592
0593 return 0;
0594 }
0595
0596 static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
0597 struct ptp_clock_request *rq, int on)
0598 {
0599 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
0600
0601 switch (rq->type) {
0602 case PTP_CLK_REQ_EXTTS:
0603 return am65_cpts_extts_enable(cpts, rq->extts.index, on);
0604 case PTP_CLK_REQ_PEROUT:
0605 return am65_cpts_perout_enable(cpts, &rq->perout, on);
0606 default:
0607 break;
0608 }
0609
0610 return -EOPNOTSUPP;
0611 }
0612
0613 static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
0614
0615 static struct ptp_clock_info am65_ptp_info = {
0616 .owner = THIS_MODULE,
0617 .name = "CTPS timer",
0618 .adjfreq = am65_cpts_ptp_adjfreq,
0619 .adjtime = am65_cpts_ptp_adjtime,
0620 .gettimex64 = am65_cpts_ptp_gettimex,
0621 .settime64 = am65_cpts_ptp_settime,
0622 .enable = am65_cpts_ptp_enable,
0623 .do_aux_work = am65_cpts_ts_work,
0624 };
0625
0626 static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
0627 struct am65_cpts_event *event)
0628 {
0629 struct sk_buff_head txq_list;
0630 struct sk_buff *skb, *tmp;
0631 unsigned long flags;
0632 bool found = false;
0633 u32 mtype_seqid;
0634
0635 mtype_seqid = event->event1 &
0636 (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
0637 AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
0638 AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
0639
0640 __skb_queue_head_init(&txq_list);
0641
0642 spin_lock_irqsave(&cpts->txq.lock, flags);
0643 skb_queue_splice_init(&cpts->txq, &txq_list);
0644 spin_unlock_irqrestore(&cpts->txq.lock, flags);
0645
0646
0647 skb_queue_walk_safe(&txq_list, skb, tmp) {
0648 struct skb_shared_hwtstamps ssh;
0649 struct am65_cpts_skb_cb_data *skb_cb =
0650 (struct am65_cpts_skb_cb_data *)skb->cb;
0651
0652 if (mtype_seqid == skb_cb->skb_mtype_seqid) {
0653 u64 ns = event->timestamp;
0654
0655 memset(&ssh, 0, sizeof(ssh));
0656 ssh.hwtstamp = ns_to_ktime(ns);
0657 skb_tstamp_tx(skb, &ssh);
0658 found = true;
0659 __skb_unlink(skb, &txq_list);
0660 dev_consume_skb_any(skb);
0661 dev_dbg(cpts->dev,
0662 "match tx timestamp mtype_seqid %08x\n",
0663 mtype_seqid);
0664 break;
0665 }
0666
0667 if (time_after(jiffies, skb_cb->tmo)) {
0668
0669 dev_dbg(cpts->dev,
0670 "expiring tx timestamp mtype_seqid %08x\n",
0671 mtype_seqid);
0672 __skb_unlink(skb, &txq_list);
0673 dev_consume_skb_any(skb);
0674 }
0675 }
0676
0677 spin_lock_irqsave(&cpts->txq.lock, flags);
0678 skb_queue_splice(&txq_list, &cpts->txq);
0679 spin_unlock_irqrestore(&cpts->txq.lock, flags);
0680
0681 return found;
0682 }
0683
0684 static void am65_cpts_find_ts(struct am65_cpts *cpts)
0685 {
0686 struct am65_cpts_event *event;
0687 struct list_head *this, *next;
0688 LIST_HEAD(events_free);
0689 unsigned long flags;
0690 LIST_HEAD(events);
0691
0692 spin_lock_irqsave(&cpts->lock, flags);
0693 list_splice_init(&cpts->events, &events);
0694 spin_unlock_irqrestore(&cpts->lock, flags);
0695
0696 list_for_each_safe(this, next, &events) {
0697 event = list_entry(this, struct am65_cpts_event, list);
0698 if (am65_cpts_match_tx_ts(cpts, event) ||
0699 time_after(jiffies, event->tmo)) {
0700 list_del_init(&event->list);
0701 list_add(&event->list, &events_free);
0702 }
0703 }
0704
0705 spin_lock_irqsave(&cpts->lock, flags);
0706 list_splice_tail(&events, &cpts->events);
0707 list_splice_tail(&events_free, &cpts->pool);
0708 spin_unlock_irqrestore(&cpts->lock, flags);
0709 }
0710
0711 static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
0712 {
0713 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
0714 unsigned long flags;
0715 long delay = -1;
0716
0717 am65_cpts_find_ts(cpts);
0718
0719 spin_lock_irqsave(&cpts->txq.lock, flags);
0720 if (!skb_queue_empty(&cpts->txq))
0721 delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
0722 spin_unlock_irqrestore(&cpts->txq.lock, flags);
0723
0724 return delay;
0725 }
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735 void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
0736 {
0737 u32 val;
0738
0739 mutex_lock(&cpts->ptp_clk_lock);
0740 val = am65_cpts_read32(cpts, control);
0741 if (en)
0742 val |= AM65_CPTS_CONTROL_TSTAMP_EN;
0743 else
0744 val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
0745 am65_cpts_write32(cpts, val, control);
0746 mutex_unlock(&cpts->ptp_clk_lock);
0747 }
0748 EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
0749
0750 static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
0751 {
0752 unsigned int ptp_class = ptp_classify_raw(skb);
0753 struct ptp_header *hdr;
0754 u8 msgtype;
0755 u16 seqid;
0756
0757 if (ptp_class == PTP_CLASS_NONE)
0758 return 0;
0759
0760 hdr = ptp_parse_header(skb, ptp_class);
0761 if (!hdr)
0762 return 0;
0763
0764 msgtype = ptp_get_msgtype(hdr, ptp_class);
0765 seqid = ntohs(hdr->sequence_id);
0766
0767 *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
0768 AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
0769 *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
0770
0771 return 1;
0772 }
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782 void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
0783 {
0784 struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
0785
0786 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
0787 return;
0788
0789
0790
0791
0792 skb_get(skb);
0793
0794 skb_cb->tmo = jiffies + msecs_to_jiffies(100);
0795 skb_queue_tail(&cpts->txq, skb);
0796 ptp_schedule_worker(cpts->ptp_clock, 0);
0797 }
0798 EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809 void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
0810 {
0811 struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
0812 int ret;
0813
0814 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
0815 return;
0816
0817 ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
0818 if (!ret)
0819 return;
0820 skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
0821 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
0822
0823 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
0824 }
0825 EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
0826
0827 int am65_cpts_phc_index(struct am65_cpts *cpts)
0828 {
0829 return cpts->phc_index;
0830 }
0831 EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
0832
0833 static void cpts_free_clk_mux(void *data)
0834 {
0835 struct am65_cpts *cpts = data;
0836
0837 of_clk_del_provider(cpts->clk_mux_np);
0838 clk_hw_unregister_mux(cpts->clk_mux_hw);
0839 of_node_put(cpts->clk_mux_np);
0840 }
0841
0842 static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
0843 struct device_node *node)
0844 {
0845 unsigned int num_parents;
0846 const char **parent_names;
0847 char *clk_mux_name;
0848 void __iomem *reg;
0849 int ret = -EINVAL;
0850
0851 cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
0852 if (!cpts->clk_mux_np)
0853 return 0;
0854
0855 num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
0856 if (num_parents < 1) {
0857 dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
0858 cpts->clk_mux_np);
0859 goto mux_fail;
0860 }
0861
0862 parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
0863 GFP_KERNEL);
0864 if (!parent_names) {
0865 ret = -ENOMEM;
0866 goto mux_fail;
0867 }
0868
0869 of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
0870
0871 clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
0872 dev_name(cpts->dev), cpts->clk_mux_np);
0873 if (!clk_mux_name) {
0874 ret = -ENOMEM;
0875 goto mux_fail;
0876 }
0877
0878 reg = &cpts->reg->rftclk_sel;
0879
0880
0881
0882 cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
0883 parent_names, num_parents,
0884 0, reg, 0, 5, 0, NULL);
0885 if (IS_ERR(cpts->clk_mux_hw)) {
0886 ret = PTR_ERR(cpts->clk_mux_hw);
0887 goto mux_fail;
0888 }
0889
0890 ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
0891 cpts->clk_mux_hw);
0892 if (ret)
0893 goto clk_hw_register;
0894
0895 ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
0896 if (ret)
0897 dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
0898
0899 return ret;
0900
0901 clk_hw_register:
0902 clk_hw_unregister_mux(cpts->clk_mux_hw);
0903 mux_fail:
0904 of_node_put(cpts->clk_mux_np);
0905 return ret;
0906 }
0907
0908 static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
0909 {
0910 u32 prop[2];
0911
0912 if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
0913 cpts->ext_ts_inputs = prop[0];
0914
0915 if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
0916 cpts->genf_num = prop[0];
0917
0918 return cpts_of_mux_clk_setup(cpts, node);
0919 }
0920
0921 static void am65_cpts_release(void *data)
0922 {
0923 struct am65_cpts *cpts = data;
0924
0925 ptp_clock_unregister(cpts->ptp_clock);
0926 am65_cpts_disable(cpts);
0927 clk_disable_unprepare(cpts->refclk);
0928 }
0929
0930 struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
0931 struct device_node *node)
0932 {
0933 struct am65_cpts *cpts;
0934 int ret, i;
0935
0936 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
0937 if (!cpts)
0938 return ERR_PTR(-ENOMEM);
0939
0940 cpts->dev = dev;
0941 cpts->reg = (struct am65_cpts_regs __iomem *)regs;
0942
0943 cpts->irq = of_irq_get_byname(node, "cpts");
0944 if (cpts->irq <= 0) {
0945 ret = cpts->irq ?: -ENXIO;
0946 if (ret != -EPROBE_DEFER)
0947 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
0948 ret);
0949 return ERR_PTR(ret);
0950 }
0951
0952 ret = am65_cpts_of_parse(cpts, node);
0953 if (ret)
0954 return ERR_PTR(ret);
0955
0956 mutex_init(&cpts->ptp_clk_lock);
0957 INIT_LIST_HEAD(&cpts->events);
0958 INIT_LIST_HEAD(&cpts->pool);
0959 spin_lock_init(&cpts->lock);
0960 skb_queue_head_init(&cpts->txq);
0961
0962 for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
0963 list_add(&cpts->pool_data[i].list, &cpts->pool);
0964
0965 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
0966 if (IS_ERR(cpts->refclk)) {
0967 ret = PTR_ERR(cpts->refclk);
0968 if (ret != -EPROBE_DEFER)
0969 dev_err(dev, "Failed to get refclk %d\n", ret);
0970 return ERR_PTR(ret);
0971 }
0972
0973 ret = clk_prepare_enable(cpts->refclk);
0974 if (ret) {
0975 dev_err(dev, "Failed to enable refclk %d\n", ret);
0976 return ERR_PTR(ret);
0977 }
0978
0979 cpts->refclk_freq = clk_get_rate(cpts->refclk);
0980
0981 am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
0982 cpts->ptp_info = am65_ptp_info;
0983
0984 if (cpts->ext_ts_inputs)
0985 cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
0986 if (cpts->genf_num)
0987 cpts->ptp_info.n_per_out = cpts->genf_num;
0988
0989 am65_cpts_set_add_val(cpts);
0990
0991 am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
0992 AM65_CPTS_CONTROL_64MODE |
0993 AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
0994 control);
0995 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
0996
0997
0998 am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
0999
1000 cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
1001 if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
1002 dev_err(dev, "Failed to register ptp clk %ld\n",
1003 PTR_ERR(cpts->ptp_clock));
1004 ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
1005 goto refclk_disable;
1006 }
1007 cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
1008
1009 ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
1010 if (ret) {
1011 dev_err(dev, "failed to add ptpclk reset action %d", ret);
1012 return ERR_PTR(ret);
1013 }
1014
1015 ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
1016 am65_cpts_interrupt,
1017 IRQF_ONESHOT, dev_name(dev), cpts);
1018 if (ret < 0) {
1019 dev_err(cpts->dev, "error attaching irq %d\n", ret);
1020 return ERR_PTR(ret);
1021 }
1022
1023 dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
1024 am65_cpts_read32(cpts, idver),
1025 cpts->refclk_freq, cpts->ts_add_val);
1026
1027 return cpts;
1028
1029 refclk_disable:
1030 clk_disable_unprepare(cpts->refclk);
1031 return ERR_PTR(ret);
1032 }
1033 EXPORT_SYMBOL_GPL(am65_cpts_create);
1034
1035 static int am65_cpts_probe(struct platform_device *pdev)
1036 {
1037 struct device_node *node = pdev->dev.of_node;
1038 struct device *dev = &pdev->dev;
1039 struct am65_cpts *cpts;
1040 void __iomem *base;
1041
1042 base = devm_platform_ioremap_resource_byname(pdev, "cpts");
1043 if (IS_ERR(base))
1044 return PTR_ERR(base);
1045
1046 cpts = am65_cpts_create(dev, base, node);
1047 return PTR_ERR_OR_ZERO(cpts);
1048 }
1049
1050 static const struct of_device_id am65_cpts_of_match[] = {
1051 { .compatible = "ti,am65-cpts", },
1052 { .compatible = "ti,j721e-cpts", },
1053 {},
1054 };
1055 MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
1056
1057 static struct platform_driver am65_cpts_driver = {
1058 .probe = am65_cpts_probe,
1059 .driver = {
1060 .name = "am65-cpts",
1061 .of_match_table = am65_cpts_of_match,
1062 },
1063 };
1064 module_platform_driver(am65_cpts_driver);
1065
1066 MODULE_LICENSE("GPL v2");
1067 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
1068 MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");