0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/clocksource.h>
0034 #include <linux/highmem.h>
0035 #include <linux/ptp_clock_kernel.h>
0036 #include <rdma/mlx5-abi.h>
0037 #include "lib/eq.h"
0038 #include "en.h"
0039 #include "clock.h"
0040
0041 enum {
0042 MLX5_CYCLES_SHIFT = 23
0043 };
0044
0045 enum {
0046 MLX5_PIN_MODE_IN = 0x0,
0047 MLX5_PIN_MODE_OUT = 0x1,
0048 };
0049
0050 enum {
0051 MLX5_OUT_PATTERN_PULSE = 0x0,
0052 MLX5_OUT_PATTERN_PERIODIC = 0x1,
0053 };
0054
0055 enum {
0056 MLX5_EVENT_MODE_DISABLE = 0x0,
0057 MLX5_EVENT_MODE_REPETETIVE = 0x1,
0058 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
0059 };
0060
0061 enum {
0062 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
0063 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
0064 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
0065 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
0066 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
0067 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
0068 };
0069
0070 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
0071 {
0072 return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
0073 }
0074
0075 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
0076 {
0077 return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
0078 }
0079
0080 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
0081 {
0082 u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
0083
0084 if (!MLX5_CAP_MCAM_REG(dev, mtutc))
0085 return -EOPNOTSUPP;
0086
0087 return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
0088 MLX5_REG_MTUTC, 0, 1);
0089 }
0090
0091 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
0092 struct ptp_system_timestamp *sts,
0093 bool real_time)
0094 {
0095 u32 timer_h, timer_h1, timer_l;
0096
0097 timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
0098 &dev->iseg->internal_timer_h);
0099 ptp_read_system_prets(sts);
0100 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
0101 &dev->iseg->internal_timer_l);
0102 ptp_read_system_postts(sts);
0103 timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
0104 &dev->iseg->internal_timer_h);
0105 if (timer_h != timer_h1) {
0106
0107 ptp_read_system_prets(sts);
0108 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
0109 &dev->iseg->internal_timer_l);
0110 ptp_read_system_postts(sts);
0111 }
0112
0113 return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
0114 (u64)timer_l | (u64)timer_h1 << 32;
0115 }
0116
0117 static u64 read_internal_timer(const struct cyclecounter *cc)
0118 {
0119 struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
0120 struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
0121 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
0122 clock);
0123
0124 return mlx5_read_time(mdev, NULL, false) & cc->mask;
0125 }
0126
0127 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
0128 {
0129 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
0130 struct mlx5_clock *clock = &mdev->clock;
0131 struct mlx5_timer *timer;
0132 u32 sign;
0133
0134 if (!clock_info)
0135 return;
0136
0137 sign = smp_load_acquire(&clock_info->sign);
0138 smp_store_mb(clock_info->sign,
0139 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
0140
0141 timer = &clock->timer;
0142 clock_info->cycles = timer->tc.cycle_last;
0143 clock_info->mult = timer->cycles.mult;
0144 clock_info->nsec = timer->tc.nsec;
0145 clock_info->frac = timer->tc.frac;
0146
0147 smp_store_release(&clock_info->sign,
0148 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
0149 }
0150
0151 static void mlx5_pps_out(struct work_struct *work)
0152 {
0153 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
0154 out_work);
0155 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
0156 pps_info);
0157 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
0158 clock);
0159 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
0160 unsigned long flags;
0161 int i;
0162
0163 for (i = 0; i < clock->ptp_info.n_pins; i++) {
0164 u64 tstart;
0165
0166 write_seqlock_irqsave(&clock->lock, flags);
0167 tstart = clock->pps_info.start[i];
0168 clock->pps_info.start[i] = 0;
0169 write_sequnlock_irqrestore(&clock->lock, flags);
0170 if (!tstart)
0171 continue;
0172
0173 MLX5_SET(mtpps_reg, in, pin, i);
0174 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
0175 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
0176 mlx5_set_mtpps(mdev, in, sizeof(in));
0177 }
0178 }
0179
0180 static void mlx5_timestamp_overflow(struct work_struct *work)
0181 {
0182 struct delayed_work *dwork = to_delayed_work(work);
0183 struct mlx5_core_dev *mdev;
0184 struct mlx5_timer *timer;
0185 struct mlx5_clock *clock;
0186 unsigned long flags;
0187
0188 timer = container_of(dwork, struct mlx5_timer, overflow_work);
0189 clock = container_of(timer, struct mlx5_clock, timer);
0190 mdev = container_of(clock, struct mlx5_core_dev, clock);
0191
0192 write_seqlock_irqsave(&clock->lock, flags);
0193 timecounter_read(&timer->tc);
0194 mlx5_update_clock_info_page(mdev);
0195 write_sequnlock_irqrestore(&clock->lock, flags);
0196 schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
0197 }
0198
0199 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
0200 const struct timespec64 *ts)
0201 {
0202 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
0203
0204 if (!mlx5_modify_mtutc_allowed(mdev))
0205 return 0;
0206
0207 if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
0208 ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
0209 return -EINVAL;
0210
0211 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
0212 MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
0213 MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
0214
0215 return mlx5_set_mtutc(mdev, in, sizeof(in));
0216 }
0217
0218 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
0219 {
0220 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
0221 struct mlx5_timer *timer = &clock->timer;
0222 struct mlx5_core_dev *mdev;
0223 unsigned long flags;
0224 int err;
0225
0226 mdev = container_of(clock, struct mlx5_core_dev, clock);
0227 err = mlx5_ptp_settime_real_time(mdev, ts);
0228 if (err)
0229 return err;
0230
0231 write_seqlock_irqsave(&clock->lock, flags);
0232 timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
0233 mlx5_update_clock_info_page(mdev);
0234 write_sequnlock_irqrestore(&clock->lock, flags);
0235
0236 return 0;
0237 }
0238
0239 static
0240 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
0241 struct ptp_system_timestamp *sts)
0242 {
0243 struct timespec64 ts;
0244 u64 time;
0245
0246 time = mlx5_read_time(mdev, sts, true);
0247 ts = ns_to_timespec64(time);
0248 return ts;
0249 }
0250
0251 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
0252 struct ptp_system_timestamp *sts)
0253 {
0254 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
0255 struct mlx5_timer *timer = &clock->timer;
0256 struct mlx5_core_dev *mdev;
0257 unsigned long flags;
0258 u64 cycles, ns;
0259
0260 mdev = container_of(clock, struct mlx5_core_dev, clock);
0261 if (mlx5_real_time_mode(mdev)) {
0262 *ts = mlx5_ptp_gettimex_real_time(mdev, sts);
0263 goto out;
0264 }
0265
0266 write_seqlock_irqsave(&clock->lock, flags);
0267 cycles = mlx5_read_time(mdev, sts, false);
0268 ns = timecounter_cyc2time(&timer->tc, cycles);
0269 write_sequnlock_irqrestore(&clock->lock, flags);
0270 *ts = ns_to_timespec64(ns);
0271 out:
0272 return 0;
0273 }
0274
0275 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
0276 {
0277 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
0278
0279 if (!mlx5_modify_mtutc_allowed(mdev))
0280 return 0;
0281
0282
0283 if (delta < S16_MIN || delta > S16_MAX) {
0284 struct timespec64 ts;
0285 s64 ns;
0286
0287 ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
0288 ns = timespec64_to_ns(&ts) + delta;
0289 ts = ns_to_timespec64(ns);
0290 return mlx5_ptp_settime_real_time(mdev, &ts);
0291 }
0292
0293 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
0294 MLX5_SET(mtutc_reg, in, time_adjustment, delta);
0295
0296 return mlx5_set_mtutc(mdev, in, sizeof(in));
0297 }
0298
0299 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
0300 {
0301 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
0302 struct mlx5_timer *timer = &clock->timer;
0303 struct mlx5_core_dev *mdev;
0304 unsigned long flags;
0305 int err;
0306
0307 mdev = container_of(clock, struct mlx5_core_dev, clock);
0308
0309 err = mlx5_ptp_adjtime_real_time(mdev, delta);
0310 if (err)
0311 return err;
0312 write_seqlock_irqsave(&clock->lock, flags);
0313 timecounter_adjtime(&timer->tc, delta);
0314 mlx5_update_clock_info_page(mdev);
0315 write_sequnlock_irqrestore(&clock->lock, flags);
0316
0317 return 0;
0318 }
0319
0320 static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
0321 {
0322 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
0323
0324 if (!mlx5_modify_mtutc_allowed(mdev))
0325 return 0;
0326
0327 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
0328 MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
0329
0330 return mlx5_set_mtutc(mdev, in, sizeof(in));
0331 }
0332
0333 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
0334 {
0335 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
0336 struct mlx5_timer *timer = &clock->timer;
0337 struct mlx5_core_dev *mdev;
0338 unsigned long flags;
0339 int neg_adj = 0;
0340 u32 diff;
0341 u64 adj;
0342 int err;
0343
0344 mdev = container_of(clock, struct mlx5_core_dev, clock);
0345 err = mlx5_ptp_adjfreq_real_time(mdev, delta);
0346 if (err)
0347 return err;
0348
0349 if (delta < 0) {
0350 neg_adj = 1;
0351 delta = -delta;
0352 }
0353
0354 adj = timer->nominal_c_mult;
0355 adj *= delta;
0356 diff = div_u64(adj, 1000000000ULL);
0357
0358 write_seqlock_irqsave(&clock->lock, flags);
0359 timecounter_read(&timer->tc);
0360 timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
0361 timer->nominal_c_mult + diff;
0362 mlx5_update_clock_info_page(mdev);
0363 write_sequnlock_irqrestore(&clock->lock, flags);
0364
0365 return 0;
0366 }
0367
0368 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
0369 struct ptp_clock_request *rq,
0370 int on)
0371 {
0372 struct mlx5_clock *clock =
0373 container_of(ptp, struct mlx5_clock, ptp_info);
0374 struct mlx5_core_dev *mdev =
0375 container_of(clock, struct mlx5_core_dev, clock);
0376 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
0377 u32 field_select = 0;
0378 u8 pin_mode = 0;
0379 u8 pattern = 0;
0380 int pin = -1;
0381 int err = 0;
0382
0383 if (!MLX5_PPS_CAP(mdev))
0384 return -EOPNOTSUPP;
0385
0386
0387 if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
0388 PTP_RISING_EDGE |
0389 PTP_FALLING_EDGE |
0390 PTP_STRICT_FLAGS))
0391 return -EOPNOTSUPP;
0392
0393
0394 if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
0395 (rq->extts.flags & PTP_ENABLE_FEATURE) &&
0396 (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
0397 return -EOPNOTSUPP;
0398
0399 if (rq->extts.index >= clock->ptp_info.n_pins)
0400 return -EINVAL;
0401
0402 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
0403 if (pin < 0)
0404 return -EBUSY;
0405
0406 if (on) {
0407 pin_mode = MLX5_PIN_MODE_IN;
0408 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
0409 field_select = MLX5_MTPPS_FS_PIN_MODE |
0410 MLX5_MTPPS_FS_PATTERN |
0411 MLX5_MTPPS_FS_ENABLE;
0412 } else {
0413 field_select = MLX5_MTPPS_FS_ENABLE;
0414 }
0415
0416 MLX5_SET(mtpps_reg, in, pin, pin);
0417 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
0418 MLX5_SET(mtpps_reg, in, pattern, pattern);
0419 MLX5_SET(mtpps_reg, in, enable, on);
0420 MLX5_SET(mtpps_reg, in, field_select, field_select);
0421
0422 err = mlx5_set_mtpps(mdev, in, sizeof(in));
0423 if (err)
0424 return err;
0425
0426 return mlx5_set_mtppse(mdev, pin, 0,
0427 MLX5_EVENT_MODE_REPETETIVE & on);
0428 }
0429
0430 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
0431 {
0432 struct mlx5_clock *clock = &mdev->clock;
0433 u64 cycles_now, cycles_delta;
0434 u64 nsec_now, nsec_delta;
0435 struct mlx5_timer *timer;
0436 unsigned long flags;
0437
0438 timer = &clock->timer;
0439
0440 cycles_now = mlx5_read_time(mdev, NULL, false);
0441 write_seqlock_irqsave(&clock->lock, flags);
0442 nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
0443 nsec_delta = target_ns - nsec_now;
0444 cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
0445 timer->cycles.mult);
0446 write_sequnlock_irqrestore(&clock->lock, flags);
0447
0448 return cycles_now + cycles_delta;
0449 }
0450
0451 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
0452 {
0453 struct timespec64 ts = {};
0454 s64 target_ns;
0455
0456 ts.tv_sec = sec;
0457 target_ns = timespec64_to_ns(&ts);
0458
0459 return find_target_cycles(mdev, target_ns);
0460 }
0461
0462 static u64 perout_conf_real_time(s64 sec)
0463 {
0464 return (u64)sec << 32;
0465 }
0466
0467 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
0468 struct ptp_clock_request *rq,
0469 int on)
0470 {
0471 struct mlx5_clock *clock =
0472 container_of(ptp, struct mlx5_clock, ptp_info);
0473 struct mlx5_core_dev *mdev =
0474 container_of(clock, struct mlx5_core_dev, clock);
0475 bool rt_mode = mlx5_real_time_mode(mdev);
0476 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
0477 struct timespec64 ts;
0478 u32 field_select = 0;
0479 u64 time_stamp = 0;
0480 u8 pin_mode = 0;
0481 u8 pattern = 0;
0482 int pin = -1;
0483 int err = 0;
0484 s64 ns;
0485
0486 if (!MLX5_PPS_CAP(mdev))
0487 return -EOPNOTSUPP;
0488
0489
0490 if (rq->perout.flags)
0491 return -EOPNOTSUPP;
0492
0493 if (rq->perout.index >= clock->ptp_info.n_pins)
0494 return -EINVAL;
0495
0496 field_select = MLX5_MTPPS_FS_ENABLE;
0497 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
0498 if (pin < 0)
0499 return -EBUSY;
0500
0501 if (on) {
0502 bool rt_mode = mlx5_real_time_mode(mdev);
0503 s64 sec = rq->perout.start.sec;
0504
0505 if (rq->perout.start.nsec)
0506 return -EINVAL;
0507
0508 pin_mode = MLX5_PIN_MODE_OUT;
0509 pattern = MLX5_OUT_PATTERN_PERIODIC;
0510 ts.tv_sec = rq->perout.period.sec;
0511 ts.tv_nsec = rq->perout.period.nsec;
0512 ns = timespec64_to_ns(&ts);
0513
0514 if ((ns >> 1) != 500000000LL)
0515 return -EINVAL;
0516
0517 if (rt_mode && sec > U32_MAX)
0518 return -EINVAL;
0519
0520 time_stamp = rt_mode ? perout_conf_real_time(sec) :
0521 perout_conf_internal_timer(mdev, sec);
0522
0523 field_select |= MLX5_MTPPS_FS_PIN_MODE |
0524 MLX5_MTPPS_FS_PATTERN |
0525 MLX5_MTPPS_FS_TIME_STAMP;
0526 }
0527
0528 MLX5_SET(mtpps_reg, in, pin, pin);
0529 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
0530 MLX5_SET(mtpps_reg, in, pattern, pattern);
0531 MLX5_SET(mtpps_reg, in, enable, on);
0532 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
0533 MLX5_SET(mtpps_reg, in, field_select, field_select);
0534
0535 err = mlx5_set_mtpps(mdev, in, sizeof(in));
0536 if (err)
0537 return err;
0538
0539 if (rt_mode)
0540 return 0;
0541
0542 return mlx5_set_mtppse(mdev, pin, 0,
0543 MLX5_EVENT_MODE_REPETETIVE & on);
0544 }
0545
0546 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
0547 struct ptp_clock_request *rq,
0548 int on)
0549 {
0550 struct mlx5_clock *clock =
0551 container_of(ptp, struct mlx5_clock, ptp_info);
0552
0553 clock->pps_info.enabled = !!on;
0554 return 0;
0555 }
0556
0557 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
0558 struct ptp_clock_request *rq,
0559 int on)
0560 {
0561 switch (rq->type) {
0562 case PTP_CLK_REQ_EXTTS:
0563 return mlx5_extts_configure(ptp, rq, on);
0564 case PTP_CLK_REQ_PEROUT:
0565 return mlx5_perout_configure(ptp, rq, on);
0566 case PTP_CLK_REQ_PPS:
0567 return mlx5_pps_configure(ptp, rq, on);
0568 default:
0569 return -EOPNOTSUPP;
0570 }
0571 return 0;
0572 }
0573
0574 enum {
0575 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
0576 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
0577 };
0578
0579 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
0580 enum ptp_pin_function func, unsigned int chan)
0581 {
0582 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
0583 ptp_info);
0584
0585 switch (func) {
0586 case PTP_PF_NONE:
0587 return 0;
0588 case PTP_PF_EXTTS:
0589 return !(clock->pps_info.pin_caps[pin] &
0590 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
0591 case PTP_PF_PEROUT:
0592 return !(clock->pps_info.pin_caps[pin] &
0593 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
0594 default:
0595 return -EOPNOTSUPP;
0596 }
0597 }
0598
0599 static const struct ptp_clock_info mlx5_ptp_clock_info = {
0600 .owner = THIS_MODULE,
0601 .name = "mlx5_ptp",
0602 .max_adj = 100000000,
0603 .n_alarm = 0,
0604 .n_ext_ts = 0,
0605 .n_per_out = 0,
0606 .n_pins = 0,
0607 .pps = 0,
0608 .adjfreq = mlx5_ptp_adjfreq,
0609 .adjtime = mlx5_ptp_adjtime,
0610 .gettimex64 = mlx5_ptp_gettimex,
0611 .settime64 = mlx5_ptp_settime,
0612 .enable = NULL,
0613 .verify = NULL,
0614 };
0615
0616 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
0617 u32 *mtpps, u32 mtpps_size)
0618 {
0619 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
0620
0621 MLX5_SET(mtpps_reg, in, pin, pin);
0622
0623 return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
0624 mtpps_size, MLX5_REG_MTPPS, 0, 0);
0625 }
0626
0627 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
0628 {
0629 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
0630
0631 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
0632 u8 mode;
0633 int err;
0634
0635 err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
0636 if (err || !MLX5_GET(mtpps_reg, out, enable))
0637 return PTP_PF_NONE;
0638
0639 mode = MLX5_GET(mtpps_reg, out, pin_mode);
0640
0641 if (mode == MLX5_PIN_MODE_IN)
0642 return PTP_PF_EXTTS;
0643 else if (mode == MLX5_PIN_MODE_OUT)
0644 return PTP_PF_PEROUT;
0645
0646 return PTP_PF_NONE;
0647 }
0648
0649 static void mlx5_init_pin_config(struct mlx5_clock *clock)
0650 {
0651 int i;
0652
0653 if (!clock->ptp_info.n_pins)
0654 return;
0655
0656 clock->ptp_info.pin_config =
0657 kcalloc(clock->ptp_info.n_pins,
0658 sizeof(*clock->ptp_info.pin_config),
0659 GFP_KERNEL);
0660 if (!clock->ptp_info.pin_config)
0661 return;
0662 clock->ptp_info.enable = mlx5_ptp_enable;
0663 clock->ptp_info.verify = mlx5_ptp_verify;
0664 clock->ptp_info.pps = 1;
0665
0666 for (i = 0; i < clock->ptp_info.n_pins; i++) {
0667 snprintf(clock->ptp_info.pin_config[i].name,
0668 sizeof(clock->ptp_info.pin_config[i].name),
0669 "mlx5_pps%d", i);
0670 clock->ptp_info.pin_config[i].index = i;
0671 clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
0672 clock->ptp_info.pin_config[i].chan = 0;
0673 }
0674 }
0675
0676 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
0677 {
0678 struct mlx5_clock *clock = &mdev->clock;
0679 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
0680
0681 mlx5_query_mtpps(mdev, out, sizeof(out));
0682
0683 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
0684 cap_number_of_pps_pins);
0685 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
0686 cap_max_num_of_pps_in_pins);
0687 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
0688 cap_max_num_of_pps_out_pins);
0689
0690 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
0691 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
0692 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
0693 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
0694 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
0695 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
0696 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
0697 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
0698 }
0699
0700 static void ts_next_sec(struct timespec64 *ts)
0701 {
0702 ts->tv_sec += 1;
0703 ts->tv_nsec = 0;
0704 }
0705
0706 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
0707 struct mlx5_clock *clock)
0708 {
0709 struct timespec64 ts;
0710 s64 target_ns;
0711
0712 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
0713 ts_next_sec(&ts);
0714 target_ns = timespec64_to_ns(&ts);
0715
0716 return find_target_cycles(mdev, target_ns);
0717 }
0718
0719 static int mlx5_pps_event(struct notifier_block *nb,
0720 unsigned long type, void *data)
0721 {
0722 struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
0723 struct ptp_clock_event ptp_event;
0724 struct mlx5_eqe *eqe = data;
0725 int pin = eqe->data.pps.pin;
0726 struct mlx5_core_dev *mdev;
0727 unsigned long flags;
0728 u64 ns;
0729
0730 mdev = container_of(clock, struct mlx5_core_dev, clock);
0731
0732 switch (clock->ptp_info.pin_config[pin].func) {
0733 case PTP_PF_EXTTS:
0734 ptp_event.index = pin;
0735 ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
0736 mlx5_real_time_cyc2time(clock,
0737 be64_to_cpu(eqe->data.pps.time_stamp)) :
0738 mlx5_timecounter_cyc2time(clock,
0739 be64_to_cpu(eqe->data.pps.time_stamp));
0740 if (clock->pps_info.enabled) {
0741 ptp_event.type = PTP_CLOCK_PPSUSR;
0742 ptp_event.pps_times.ts_real =
0743 ns_to_timespec64(ptp_event.timestamp);
0744 } else {
0745 ptp_event.type = PTP_CLOCK_EXTTS;
0746 }
0747
0748 ptp_clock_event(clock->ptp, &ptp_event);
0749 break;
0750 case PTP_PF_PEROUT:
0751 ns = perout_conf_next_event_timer(mdev, clock);
0752 write_seqlock_irqsave(&clock->lock, flags);
0753 clock->pps_info.start[pin] = ns;
0754 write_sequnlock_irqrestore(&clock->lock, flags);
0755 schedule_work(&clock->pps_info.out_work);
0756 break;
0757 default:
0758 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
0759 clock->ptp_info.pin_config[pin].func);
0760 }
0761
0762 return NOTIFY_OK;
0763 }
0764
0765 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
0766 {
0767 struct mlx5_clock *clock = &mdev->clock;
0768 struct mlx5_timer *timer = &clock->timer;
0769 u32 dev_freq;
0770
0771 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
0772 timer->cycles.read = read_internal_timer;
0773 timer->cycles.shift = MLX5_CYCLES_SHIFT;
0774 timer->cycles.mult = clocksource_khz2mult(dev_freq,
0775 timer->cycles.shift);
0776 timer->nominal_c_mult = timer->cycles.mult;
0777 timer->cycles.mask = CLOCKSOURCE_MASK(41);
0778
0779 timecounter_init(&timer->tc, &timer->cycles,
0780 ktime_to_ns(ktime_get_real()));
0781 }
0782
0783 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
0784 {
0785 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
0786 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
0787 struct mlx5_timer *timer = &clock->timer;
0788 u64 overflow_cycles;
0789 u64 frac = 0;
0790 u64 ns;
0791
0792
0793
0794
0795
0796
0797
0798
0799 overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
0800 overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
0801
0802 ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
0803 frac, &frac);
0804 do_div(ns, NSEC_PER_SEC / HZ);
0805 timer->overflow_period = ns;
0806
0807 INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
0808 if (timer->overflow_period)
0809 schedule_delayed_work(&timer->overflow_work, 0);
0810 else
0811 mlx5_core_warn(mdev,
0812 "invalid overflow period, overflow_work is not scheduled\n");
0813
0814 if (clock_info)
0815 clock_info->overflow_period = timer->overflow_period;
0816 }
0817
0818 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
0819 {
0820 struct mlx5_clock *clock = &mdev->clock;
0821 struct mlx5_ib_clock_info *info;
0822 struct mlx5_timer *timer;
0823
0824 mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
0825 if (!mdev->clock_info) {
0826 mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
0827 return;
0828 }
0829
0830 info = mdev->clock_info;
0831 timer = &clock->timer;
0832
0833 info->nsec = timer->tc.nsec;
0834 info->cycles = timer->tc.cycle_last;
0835 info->mask = timer->cycles.mask;
0836 info->mult = timer->nominal_c_mult;
0837 info->shift = timer->cycles.shift;
0838 info->frac = timer->tc.frac;
0839 }
0840
0841 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
0842 {
0843 struct mlx5_clock *clock = &mdev->clock;
0844
0845 mlx5_timecounter_init(mdev);
0846 mlx5_init_clock_info(mdev);
0847 mlx5_init_overflow_period(clock);
0848 clock->ptp_info = mlx5_ptp_clock_info;
0849
0850 if (mlx5_real_time_mode(mdev)) {
0851 struct timespec64 ts;
0852
0853 ktime_get_real_ts64(&ts);
0854 mlx5_ptp_settime(&clock->ptp_info, &ts);
0855 }
0856 }
0857
0858 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
0859 {
0860 struct mlx5_clock *clock = &mdev->clock;
0861
0862 if (!MLX5_PPS_CAP(mdev))
0863 return;
0864
0865 mlx5_get_pps_caps(mdev);
0866 mlx5_init_pin_config(clock);
0867 }
0868
0869 void mlx5_init_clock(struct mlx5_core_dev *mdev)
0870 {
0871 struct mlx5_clock *clock = &mdev->clock;
0872
0873 if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
0874 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
0875 return;
0876 }
0877
0878 seqlock_init(&clock->lock);
0879 mlx5_init_timer_clock(mdev);
0880 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
0881
0882
0883 clock->ptp_info = mlx5_ptp_clock_info;
0884
0885
0886 mlx5_init_pps(mdev);
0887
0888 clock->ptp = ptp_clock_register(&clock->ptp_info,
0889 &mdev->pdev->dev);
0890 if (IS_ERR(clock->ptp)) {
0891 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
0892 PTR_ERR(clock->ptp));
0893 clock->ptp = NULL;
0894 }
0895
0896 MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
0897 mlx5_eq_notifier_register(mdev, &clock->pps_nb);
0898 }
0899
0900 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
0901 {
0902 struct mlx5_clock *clock = &mdev->clock;
0903
0904 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
0905 return;
0906
0907 mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
0908 if (clock->ptp) {
0909 ptp_clock_unregister(clock->ptp);
0910 clock->ptp = NULL;
0911 }
0912
0913 cancel_work_sync(&clock->pps_info.out_work);
0914 cancel_delayed_work_sync(&clock->timer.overflow_work);
0915
0916 if (mdev->clock_info) {
0917 free_page((unsigned long)mdev->clock_info);
0918 mdev->clock_info = NULL;
0919 }
0920
0921 kfree(clock->ptp_info.pin_config);
0922 }