0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clk-provider.h>
0009 #include <linux/err.h>
0010 #include <linux/if.h>
0011 #include <linux/hrtimer.h>
0012 #include <linux/module.h>
0013 #include <linux/net_tstamp.h>
0014 #include <linux/ptp_classify.h>
0015 #include <linux/time.h>
0016 #include <linux/uaccess.h>
0017 #include <linux/workqueue.h>
0018 #include <linux/if_ether.h>
0019 #include <linux/if_vlan.h>
0020
0021 #include "cpts.h"
0022
0023 #define CPTS_SKB_TX_WORK_TIMEOUT 1
0024 #define CPTS_SKB_RX_TX_TMO 100
0025 #define CPTS_EVENT_RX_TX_TIMEOUT (100)
0026
0027 struct cpts_skb_cb_data {
0028 u32 skb_mtype_seqid;
0029 unsigned long tmo;
0030 };
0031
0032 #define cpts_read32(c, r) readl_relaxed(&c->reg->r)
0033 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
0034
0035 static int cpts_event_port(struct cpts_event *event)
0036 {
0037 return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK;
0038 }
0039
0040 static int event_expired(struct cpts_event *event)
0041 {
0042 return time_after(jiffies, event->tmo);
0043 }
0044
0045 static int event_type(struct cpts_event *event)
0046 {
0047 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
0048 }
0049
0050 static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
0051 {
0052 u32 r = cpts_read32(cpts, intstat_raw);
0053
0054 if (r & TS_PEND_RAW) {
0055 *high = cpts_read32(cpts, event_high);
0056 *low = cpts_read32(cpts, event_low);
0057 cpts_write32(cpts, EVENT_POP, event_pop);
0058 return 0;
0059 }
0060 return -1;
0061 }
0062
0063 static int cpts_purge_events(struct cpts *cpts)
0064 {
0065 struct list_head *this, *next;
0066 struct cpts_event *event;
0067 int removed = 0;
0068
0069 list_for_each_safe(this, next, &cpts->events) {
0070 event = list_entry(this, struct cpts_event, list);
0071 if (event_expired(event)) {
0072 list_del_init(&event->list);
0073 list_add(&event->list, &cpts->pool);
0074 ++removed;
0075 }
0076 }
0077
0078 if (removed)
0079 dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
0080 return removed ? 0 : -1;
0081 }
0082
0083 static void cpts_purge_txq(struct cpts *cpts)
0084 {
0085 struct cpts_skb_cb_data *skb_cb;
0086 struct sk_buff *skb, *tmp;
0087 int removed = 0;
0088
0089 skb_queue_walk_safe(&cpts->txq, skb, tmp) {
0090 skb_cb = (struct cpts_skb_cb_data *)skb->cb;
0091 if (time_after(jiffies, skb_cb->tmo)) {
0092 __skb_unlink(skb, &cpts->txq);
0093 dev_consume_skb_any(skb);
0094 ++removed;
0095 }
0096 }
0097
0098 if (removed)
0099 dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
0100 }
0101
0102
0103
0104
0105 static int cpts_fifo_read(struct cpts *cpts, int match)
0106 {
0107 struct ptp_clock_event pevent;
0108 bool need_schedule = false;
0109 struct cpts_event *event;
0110 unsigned long flags;
0111 int i, type = -1;
0112 u32 hi, lo;
0113
0114 spin_lock_irqsave(&cpts->lock, flags);
0115
0116 for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
0117 if (cpts_fifo_pop(cpts, &hi, &lo))
0118 break;
0119
0120 if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
0121 dev_warn(cpts->dev, "cpts: event pool empty\n");
0122 break;
0123 }
0124
0125 event = list_first_entry(&cpts->pool, struct cpts_event, list);
0126 event->high = hi;
0127 event->low = lo;
0128 event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
0129 type = event_type(event);
0130
0131 dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
0132 type, event->high, event->low);
0133 switch (type) {
0134 case CPTS_EV_PUSH:
0135 WRITE_ONCE(cpts->cur_timestamp, lo);
0136 timecounter_read(&cpts->tc);
0137 if (cpts->mult_new) {
0138 cpts->cc.mult = cpts->mult_new;
0139 cpts->mult_new = 0;
0140 }
0141 if (!cpts->irq_poll)
0142 complete(&cpts->ts_push_complete);
0143 break;
0144 case CPTS_EV_TX:
0145 case CPTS_EV_RX:
0146 event->tmo = jiffies +
0147 msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
0148
0149 list_del_init(&event->list);
0150 list_add_tail(&event->list, &cpts->events);
0151 need_schedule = true;
0152 break;
0153 case CPTS_EV_ROLL:
0154 case CPTS_EV_HALF:
0155 break;
0156 case CPTS_EV_HW:
0157 pevent.timestamp = event->timestamp;
0158 pevent.type = PTP_CLOCK_EXTTS;
0159 pevent.index = cpts_event_port(event) - 1;
0160 ptp_clock_event(cpts->clock, &pevent);
0161 break;
0162 default:
0163 dev_err(cpts->dev, "cpts: unknown event type\n");
0164 break;
0165 }
0166 if (type == match)
0167 break;
0168 }
0169
0170 spin_unlock_irqrestore(&cpts->lock, flags);
0171
0172 if (!cpts->irq_poll && need_schedule)
0173 ptp_schedule_worker(cpts->clock, 0);
0174
0175 return type == match ? 0 : -1;
0176 }
0177
0178 void cpts_misc_interrupt(struct cpts *cpts)
0179 {
0180 cpts_fifo_read(cpts, -1);
0181 }
0182 EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
0183
0184 static u64 cpts_systim_read(const struct cyclecounter *cc)
0185 {
0186 struct cpts *cpts = container_of(cc, struct cpts, cc);
0187
0188 return READ_ONCE(cpts->cur_timestamp);
0189 }
0190
0191 static void cpts_update_cur_time(struct cpts *cpts, int match,
0192 struct ptp_system_timestamp *sts)
0193 {
0194 unsigned long flags;
0195
0196 reinit_completion(&cpts->ts_push_complete);
0197
0198
0199 spin_lock_irqsave(&cpts->lock, flags);
0200 ptp_read_system_prets(sts);
0201 cpts_write32(cpts, TS_PUSH, ts_push);
0202 cpts_read32(cpts, ts_push);
0203 ptp_read_system_postts(sts);
0204 spin_unlock_irqrestore(&cpts->lock, flags);
0205
0206 if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
0207 dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
0208
0209 if (!cpts->irq_poll &&
0210 !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
0211 dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
0212 }
0213
0214
0215
0216 static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
0217 {
0218 struct cpts *cpts = container_of(ptp, struct cpts, info);
0219 int neg_adj = 0;
0220 u32 diff, mult;
0221 u64 adj;
0222
0223 if (ppb < 0) {
0224 neg_adj = 1;
0225 ppb = -ppb;
0226 }
0227 mult = cpts->cc_mult;
0228 adj = mult;
0229 adj *= ppb;
0230 diff = div_u64(adj, 1000000000ULL);
0231
0232 mutex_lock(&cpts->ptp_clk_mutex);
0233
0234 cpts->mult_new = neg_adj ? mult - diff : mult + diff;
0235
0236 cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
0237
0238 mutex_unlock(&cpts->ptp_clk_mutex);
0239 return 0;
0240 }
0241
0242 static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
0243 {
0244 struct cpts *cpts = container_of(ptp, struct cpts, info);
0245
0246 mutex_lock(&cpts->ptp_clk_mutex);
0247 timecounter_adjtime(&cpts->tc, delta);
0248 mutex_unlock(&cpts->ptp_clk_mutex);
0249
0250 return 0;
0251 }
0252
0253 static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
0254 struct timespec64 *ts,
0255 struct ptp_system_timestamp *sts)
0256 {
0257 struct cpts *cpts = container_of(ptp, struct cpts, info);
0258 u64 ns;
0259
0260 mutex_lock(&cpts->ptp_clk_mutex);
0261
0262 cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
0263
0264 ns = timecounter_read(&cpts->tc);
0265 mutex_unlock(&cpts->ptp_clk_mutex);
0266
0267 *ts = ns_to_timespec64(ns);
0268
0269 return 0;
0270 }
0271
0272 static int cpts_ptp_settime(struct ptp_clock_info *ptp,
0273 const struct timespec64 *ts)
0274 {
0275 struct cpts *cpts = container_of(ptp, struct cpts, info);
0276 u64 ns;
0277
0278 ns = timespec64_to_ns(ts);
0279
0280 mutex_lock(&cpts->ptp_clk_mutex);
0281 timecounter_init(&cpts->tc, &cpts->cc, ns);
0282 mutex_unlock(&cpts->ptp_clk_mutex);
0283
0284 return 0;
0285 }
0286
0287 static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
0288 {
0289 u32 v;
0290
0291 if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
0292 return 0;
0293
0294 mutex_lock(&cpts->ptp_clk_mutex);
0295
0296 v = cpts_read32(cpts, control);
0297 if (on) {
0298 v |= BIT(8 + index);
0299 cpts->hw_ts_enable |= BIT(index);
0300 } else {
0301 v &= ~BIT(8 + index);
0302 cpts->hw_ts_enable &= ~BIT(index);
0303 }
0304 cpts_write32(cpts, v, control);
0305
0306 mutex_unlock(&cpts->ptp_clk_mutex);
0307
0308 return 0;
0309 }
0310
0311 static int cpts_ptp_enable(struct ptp_clock_info *ptp,
0312 struct ptp_clock_request *rq, int on)
0313 {
0314 struct cpts *cpts = container_of(ptp, struct cpts, info);
0315
0316 switch (rq->type) {
0317 case PTP_CLK_REQ_EXTTS:
0318 return cpts_extts_enable(cpts, rq->extts.index, on);
0319 default:
0320 break;
0321 }
0322
0323 return -EOPNOTSUPP;
0324 }
0325
0326 static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
0327 {
0328 struct sk_buff_head txq_list;
0329 struct sk_buff *skb, *tmp;
0330 unsigned long flags;
0331 bool found = false;
0332 u32 mtype_seqid;
0333
0334 mtype_seqid = event->high &
0335 ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
0336 (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
0337 (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
0338
0339 __skb_queue_head_init(&txq_list);
0340
0341 spin_lock_irqsave(&cpts->txq.lock, flags);
0342 skb_queue_splice_init(&cpts->txq, &txq_list);
0343 spin_unlock_irqrestore(&cpts->txq.lock, flags);
0344
0345 skb_queue_walk_safe(&txq_list, skb, tmp) {
0346 struct skb_shared_hwtstamps ssh;
0347 struct cpts_skb_cb_data *skb_cb =
0348 (struct cpts_skb_cb_data *)skb->cb;
0349
0350 if (mtype_seqid == skb_cb->skb_mtype_seqid) {
0351 memset(&ssh, 0, sizeof(ssh));
0352 ssh.hwtstamp = ns_to_ktime(event->timestamp);
0353 skb_tstamp_tx(skb, &ssh);
0354 found = true;
0355 __skb_unlink(skb, &txq_list);
0356 dev_consume_skb_any(skb);
0357 dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
0358 mtype_seqid);
0359 break;
0360 }
0361
0362 if (time_after(jiffies, skb_cb->tmo)) {
0363
0364 dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
0365 __skb_unlink(skb, &txq_list);
0366 dev_consume_skb_any(skb);
0367 }
0368 }
0369
0370 spin_lock_irqsave(&cpts->txq.lock, flags);
0371 skb_queue_splice(&txq_list, &cpts->txq);
0372 spin_unlock_irqrestore(&cpts->txq.lock, flags);
0373
0374 return found;
0375 }
0376
0377 static void cpts_process_events(struct cpts *cpts)
0378 {
0379 struct list_head *this, *next;
0380 struct cpts_event *event;
0381 LIST_HEAD(events_free);
0382 unsigned long flags;
0383 LIST_HEAD(events);
0384
0385 spin_lock_irqsave(&cpts->lock, flags);
0386 list_splice_init(&cpts->events, &events);
0387 spin_unlock_irqrestore(&cpts->lock, flags);
0388
0389 list_for_each_safe(this, next, &events) {
0390 event = list_entry(this, struct cpts_event, list);
0391 if (cpts_match_tx_ts(cpts, event) ||
0392 time_after(jiffies, event->tmo)) {
0393 list_del_init(&event->list);
0394 list_add(&event->list, &events_free);
0395 }
0396 }
0397
0398 spin_lock_irqsave(&cpts->lock, flags);
0399 list_splice_tail(&events, &cpts->events);
0400 list_splice_tail(&events_free, &cpts->pool);
0401 spin_unlock_irqrestore(&cpts->lock, flags);
0402 }
0403
0404 static long cpts_overflow_check(struct ptp_clock_info *ptp)
0405 {
0406 struct cpts *cpts = container_of(ptp, struct cpts, info);
0407 unsigned long delay = cpts->ov_check_period;
0408 unsigned long flags;
0409 u64 ns;
0410
0411 mutex_lock(&cpts->ptp_clk_mutex);
0412
0413 cpts_update_cur_time(cpts, -1, NULL);
0414 ns = timecounter_read(&cpts->tc);
0415
0416 cpts_process_events(cpts);
0417
0418 spin_lock_irqsave(&cpts->txq.lock, flags);
0419 if (!skb_queue_empty(&cpts->txq)) {
0420 cpts_purge_txq(cpts);
0421 if (!skb_queue_empty(&cpts->txq))
0422 delay = CPTS_SKB_TX_WORK_TIMEOUT;
0423 }
0424 spin_unlock_irqrestore(&cpts->txq.lock, flags);
0425
0426 dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
0427 mutex_unlock(&cpts->ptp_clk_mutex);
0428 return (long)delay;
0429 }
0430
0431 static const struct ptp_clock_info cpts_info = {
0432 .owner = THIS_MODULE,
0433 .name = "CTPS timer",
0434 .max_adj = 1000000,
0435 .n_ext_ts = 0,
0436 .n_pins = 0,
0437 .pps = 0,
0438 .adjfreq = cpts_ptp_adjfreq,
0439 .adjtime = cpts_ptp_adjtime,
0440 .gettimex64 = cpts_ptp_gettimeex,
0441 .settime64 = cpts_ptp_settime,
0442 .enable = cpts_ptp_enable,
0443 .do_aux_work = cpts_overflow_check,
0444 };
0445
0446 static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
0447 {
0448 unsigned int ptp_class = ptp_classify_raw(skb);
0449 struct ptp_header *hdr;
0450 u8 msgtype;
0451 u16 seqid;
0452
0453 if (ptp_class == PTP_CLASS_NONE)
0454 return 0;
0455
0456 hdr = ptp_parse_header(skb, ptp_class);
0457 if (!hdr)
0458 return 0;
0459
0460 msgtype = ptp_get_msgtype(hdr, ptp_class);
0461 seqid = ntohs(hdr->sequence_id);
0462
0463 *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
0464 *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
0465
0466 return 1;
0467 }
0468
0469 static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
0470 int ev_type, u32 skb_mtype_seqid)
0471 {
0472 struct list_head *this, *next;
0473 struct cpts_event *event;
0474 unsigned long flags;
0475 u32 mtype_seqid;
0476 u64 ns = 0;
0477
0478 cpts_fifo_read(cpts, -1);
0479 spin_lock_irqsave(&cpts->lock, flags);
0480 list_for_each_safe(this, next, &cpts->events) {
0481 event = list_entry(this, struct cpts_event, list);
0482 if (event_expired(event)) {
0483 list_del_init(&event->list);
0484 list_add(&event->list, &cpts->pool);
0485 continue;
0486 }
0487
0488 mtype_seqid = event->high &
0489 ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
0490 (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
0491 (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
0492
0493 if (mtype_seqid == skb_mtype_seqid) {
0494 ns = event->timestamp;
0495 list_del_init(&event->list);
0496 list_add(&event->list, &cpts->pool);
0497 break;
0498 }
0499 }
0500 spin_unlock_irqrestore(&cpts->lock, flags);
0501
0502 return ns;
0503 }
0504
0505 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
0506 {
0507 struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
0508 struct skb_shared_hwtstamps *ssh;
0509 int ret;
0510 u64 ns;
0511
0512
0513
0514
0515
0516 skb_reset_mac_header(skb);
0517 ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
0518 if (!ret)
0519 return;
0520
0521 skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
0522
0523 dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
0524 __func__, skb_cb->skb_mtype_seqid);
0525
0526 ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
0527 if (!ns)
0528 return;
0529 ssh = skb_hwtstamps(skb);
0530 memset(ssh, 0, sizeof(*ssh));
0531 ssh->hwtstamp = ns_to_ktime(ns);
0532 }
0533 EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
0534
0535 void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
0536 {
0537 struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
0538 int ret;
0539
0540 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
0541 return;
0542
0543 ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
0544 if (!ret)
0545 return;
0546
0547 skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
0548
0549 dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
0550 __func__, skb_cb->skb_mtype_seqid);
0551
0552
0553 skb_get(skb);
0554
0555 skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
0556 skb_queue_tail(&cpts->txq, skb);
0557 ptp_schedule_worker(cpts->clock, 0);
0558 }
0559 EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
0560
0561 int cpts_register(struct cpts *cpts)
0562 {
0563 int err, i;
0564
0565 skb_queue_head_init(&cpts->txq);
0566 INIT_LIST_HEAD(&cpts->events);
0567 INIT_LIST_HEAD(&cpts->pool);
0568 for (i = 0; i < CPTS_MAX_EVENTS; i++)
0569 list_add(&cpts->pool_data[i].list, &cpts->pool);
0570
0571 err = clk_enable(cpts->refclk);
0572 if (err)
0573 return err;
0574
0575 cpts_write32(cpts, CPTS_EN, control);
0576 cpts_write32(cpts, TS_PEND_EN, int_enable);
0577
0578 timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
0579
0580 cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
0581 if (IS_ERR(cpts->clock)) {
0582 err = PTR_ERR(cpts->clock);
0583 cpts->clock = NULL;
0584 goto err_ptp;
0585 }
0586 cpts->phc_index = ptp_clock_index(cpts->clock);
0587
0588 ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
0589 return 0;
0590
0591 err_ptp:
0592 clk_disable(cpts->refclk);
0593 return err;
0594 }
0595 EXPORT_SYMBOL_GPL(cpts_register);
0596
0597 void cpts_unregister(struct cpts *cpts)
0598 {
0599 if (WARN_ON(!cpts->clock))
0600 return;
0601
0602 ptp_clock_unregister(cpts->clock);
0603 cpts->clock = NULL;
0604 cpts->phc_index = -1;
0605
0606 cpts_write32(cpts, 0, int_enable);
0607 cpts_write32(cpts, 0, control);
0608
0609
0610 skb_queue_purge(&cpts->txq);
0611
0612 clk_disable(cpts->refclk);
0613 }
0614 EXPORT_SYMBOL_GPL(cpts_unregister);
0615
0616 static void cpts_calc_mult_shift(struct cpts *cpts)
0617 {
0618 u64 frac, maxsec, ns;
0619 u32 freq;
0620
0621 freq = clk_get_rate(cpts->refclk);
0622
0623
0624
0625
0626 maxsec = cpts->cc.mask;
0627 do_div(maxsec, freq);
0628
0629
0630
0631 if (maxsec > 10)
0632 maxsec = 10;
0633
0634
0635 cpts->ov_check_period = (HZ * maxsec) / 2;
0636 dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
0637 cpts->ov_check_period);
0638
0639 if (cpts->cc.mult || cpts->cc.shift)
0640 return;
0641
0642 clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
0643 freq, NSEC_PER_SEC, maxsec);
0644
0645 frac = 0;
0646 ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
0647
0648 dev_info(cpts->dev,
0649 "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
0650 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
0651 }
0652
0653 static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
0654 {
0655 struct device_node *refclk_np;
0656 const char **parent_names;
0657 unsigned int num_parents;
0658 struct clk_hw *clk_hw;
0659 int ret = -EINVAL;
0660 u32 *mux_table;
0661
0662 refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
0663 if (!refclk_np)
0664
0665 return 0;
0666
0667 num_parents = of_clk_get_parent_count(refclk_np);
0668 if (num_parents < 1) {
0669 dev_err(cpts->dev, "mux-clock %s must have parents\n",
0670 refclk_np->name);
0671 goto mux_fail;
0672 }
0673
0674 parent_names = devm_kcalloc(cpts->dev, num_parents,
0675 sizeof(*parent_names), GFP_KERNEL);
0676
0677 mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
0678 GFP_KERNEL);
0679 if (!mux_table || !parent_names) {
0680 ret = -ENOMEM;
0681 goto mux_fail;
0682 }
0683
0684 of_clk_parent_fill(refclk_np, parent_names, num_parents);
0685
0686 ret = of_property_read_variable_u32_array(refclk_np, "ti,mux-tbl",
0687 mux_table,
0688 num_parents, num_parents);
0689 if (ret < 0)
0690 goto mux_fail;
0691
0692 clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name,
0693 parent_names, num_parents,
0694 0,
0695 &cpts->reg->rftclk_sel, 0, 0x1F,
0696 0, mux_table, NULL);
0697 if (IS_ERR(clk_hw)) {
0698 ret = PTR_ERR(clk_hw);
0699 goto mux_fail;
0700 }
0701
0702 ret = devm_add_action_or_reset(cpts->dev,
0703 (void(*)(void *))clk_hw_unregister_mux,
0704 clk_hw);
0705 if (ret) {
0706 dev_err(cpts->dev, "add clkmux unreg action %d", ret);
0707 goto mux_fail;
0708 }
0709
0710 ret = of_clk_add_hw_provider(refclk_np, of_clk_hw_simple_get, clk_hw);
0711 if (ret)
0712 goto mux_fail;
0713
0714 ret = devm_add_action_or_reset(cpts->dev,
0715 (void(*)(void *))of_clk_del_provider,
0716 refclk_np);
0717 if (ret) {
0718 dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
0719 goto mux_fail;
0720 }
0721
0722 return ret;
0723
0724 mux_fail:
0725 of_node_put(refclk_np);
0726 return ret;
0727 }
0728
0729 static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
0730 {
0731 int ret = -EINVAL;
0732 u32 prop;
0733
0734 if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
0735 cpts->cc.mult = prop;
0736
0737 if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
0738 cpts->cc.shift = prop;
0739
0740 if ((cpts->cc.mult && !cpts->cc.shift) ||
0741 (!cpts->cc.mult && cpts->cc.shift))
0742 goto of_error;
0743
0744 return cpts_of_mux_clk_setup(cpts, node);
0745
0746 of_error:
0747 dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
0748 return ret;
0749 }
0750
0751 struct cpts *cpts_create(struct device *dev, void __iomem *regs,
0752 struct device_node *node, u32 n_ext_ts)
0753 {
0754 struct cpts *cpts;
0755 int ret;
0756
0757 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
0758 if (!cpts)
0759 return ERR_PTR(-ENOMEM);
0760
0761 cpts->dev = dev;
0762 cpts->reg = (struct cpsw_cpts __iomem *)regs;
0763 cpts->irq_poll = true;
0764 spin_lock_init(&cpts->lock);
0765 mutex_init(&cpts->ptp_clk_mutex);
0766 init_completion(&cpts->ts_push_complete);
0767
0768 ret = cpts_of_parse(cpts, node);
0769 if (ret)
0770 return ERR_PTR(ret);
0771
0772 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
0773 if (IS_ERR(cpts->refclk))
0774
0775 cpts->refclk = devm_clk_get(dev, "cpts");
0776
0777 if (IS_ERR(cpts->refclk)) {
0778 dev_err(dev, "Failed to get cpts refclk %ld\n",
0779 PTR_ERR(cpts->refclk));
0780 return ERR_CAST(cpts->refclk);
0781 }
0782
0783 ret = clk_prepare(cpts->refclk);
0784 if (ret)
0785 return ERR_PTR(ret);
0786
0787 cpts->cc.read = cpts_systim_read;
0788 cpts->cc.mask = CLOCKSOURCE_MASK(32);
0789 cpts->info = cpts_info;
0790 cpts->phc_index = -1;
0791
0792 if (n_ext_ts)
0793 cpts->info.n_ext_ts = n_ext_ts;
0794
0795 cpts_calc_mult_shift(cpts);
0796
0797
0798
0799 cpts->cc_mult = cpts->cc.mult;
0800
0801 return cpts;
0802 }
0803 EXPORT_SYMBOL_GPL(cpts_create);
0804
0805 void cpts_release(struct cpts *cpts)
0806 {
0807 if (!cpts)
0808 return;
0809
0810 if (WARN_ON(!cpts->refclk))
0811 return;
0812
0813 clk_unprepare(cpts->refclk);
0814 }
0815 EXPORT_SYMBOL_GPL(cpts_release);
0816
0817 MODULE_LICENSE("GPL v2");
0818 MODULE_DESCRIPTION("TI CPTS driver");
0819 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");