0001
0002
0003
0004 #include "ice.h"
0005 #include "ice_lib.h"
0006 #include "ice_trace.h"
0007
0008 #define E810_OUT_PROP_DELAY_NS 1
0009
0010 #define UNKNOWN_INCVAL_E822 0x100000000ULL
0011
0012 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
0013
0014 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
0015 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
0016 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
0017 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
0018 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
0019 };
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 static int
0030 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
0031 {
0032 u8 data, i;
0033 int status;
0034
0035
0036 status = ice_read_sma_ctrl_e810t(hw, &data);
0037 if (status)
0038 return status;
0039
0040
0041 for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
0042 snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
0043 "%s", ice_pin_desc_e810t[i].name);
0044 ptp_pins[i].index = ice_pin_desc_e810t[i].index;
0045 ptp_pins[i].func = ice_pin_desc_e810t[i].func;
0046 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
0047 }
0048
0049
0050 switch (data & ICE_SMA1_MASK_E810T) {
0051 case ICE_SMA1_MASK_E810T:
0052 default:
0053 ptp_pins[SMA1].func = PTP_PF_NONE;
0054 ptp_pins[UFL1].func = PTP_PF_NONE;
0055 break;
0056 case ICE_SMA1_DIR_EN_E810T:
0057 ptp_pins[SMA1].func = PTP_PF_PEROUT;
0058 ptp_pins[UFL1].func = PTP_PF_NONE;
0059 break;
0060 case ICE_SMA1_TX_EN_E810T:
0061 ptp_pins[SMA1].func = PTP_PF_EXTTS;
0062 ptp_pins[UFL1].func = PTP_PF_NONE;
0063 break;
0064 case 0:
0065 ptp_pins[SMA1].func = PTP_PF_EXTTS;
0066 ptp_pins[UFL1].func = PTP_PF_PEROUT;
0067 break;
0068 }
0069
0070
0071 switch (data & ICE_SMA2_MASK_E810T) {
0072 case ICE_SMA2_MASK_E810T:
0073 default:
0074 ptp_pins[SMA2].func = PTP_PF_NONE;
0075 ptp_pins[UFL2].func = PTP_PF_NONE;
0076 break;
0077 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
0078 ptp_pins[SMA2].func = PTP_PF_EXTTS;
0079 ptp_pins[UFL2].func = PTP_PF_NONE;
0080 break;
0081 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
0082 ptp_pins[SMA2].func = PTP_PF_PEROUT;
0083 ptp_pins[UFL2].func = PTP_PF_NONE;
0084 break;
0085 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
0086 ptp_pins[SMA2].func = PTP_PF_NONE;
0087 ptp_pins[UFL2].func = PTP_PF_EXTTS;
0088 break;
0089 case ICE_SMA2_DIR_EN_E810T:
0090 ptp_pins[SMA2].func = PTP_PF_PEROUT;
0091 ptp_pins[UFL2].func = PTP_PF_EXTTS;
0092 break;
0093 }
0094
0095 return 0;
0096 }
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 static int
0107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
0108 const struct ptp_pin_desc *ptp_pins)
0109 {
0110 int status;
0111 u8 data;
0112
0113
0114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
0115 ptp_pins[UFL1].func == PTP_PF_PEROUT)
0116 return -EINVAL;
0117
0118
0119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
0120 ptp_pins[UFL2].func == PTP_PF_EXTTS)
0121 return -EINVAL;
0122
0123
0124 status = ice_read_sma_ctrl_e810t(hw, &data);
0125 if (status)
0126 return status;
0127
0128
0129 data &= ~ICE_SMA1_MASK_E810T;
0130 if (ptp_pins[SMA1].func == PTP_PF_NONE &&
0131 ptp_pins[UFL1].func == PTP_PF_NONE) {
0132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
0133 data |= ICE_SMA1_MASK_E810T;
0134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
0135 ptp_pins[UFL1].func == PTP_PF_NONE) {
0136 dev_info(ice_hw_to_dev(hw), "SMA1 RX");
0137 data |= ICE_SMA1_TX_EN_E810T;
0138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
0139 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
0140
0141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
0142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
0143 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
0144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
0145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
0146 ptp_pins[UFL1].func == PTP_PF_NONE) {
0147 dev_info(ice_hw_to_dev(hw), "SMA1 TX");
0148 data |= ICE_SMA1_DIR_EN_E810T;
0149 }
0150
0151 data &= ~ICE_SMA2_MASK_E810T;
0152 if (ptp_pins[SMA2].func == PTP_PF_NONE &&
0153 ptp_pins[UFL2].func == PTP_PF_NONE) {
0154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
0155 data |= ICE_SMA2_MASK_E810T;
0156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
0157 ptp_pins[UFL2].func == PTP_PF_NONE) {
0158 dev_info(ice_hw_to_dev(hw), "SMA2 RX");
0159 data |= (ICE_SMA2_TX_EN_E810T |
0160 ICE_SMA2_UFL2_RX_DIS_E810T);
0161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
0162 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
0163 dev_info(ice_hw_to_dev(hw), "UFL2 RX");
0164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
0165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
0166 ptp_pins[UFL2].func == PTP_PF_NONE) {
0167 dev_info(ice_hw_to_dev(hw), "SMA2 TX");
0168 data |= (ICE_SMA2_DIR_EN_E810T |
0169 ICE_SMA2_UFL2_RX_DIS_E810T);
0170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
0171 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
0172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
0173 data |= ICE_SMA2_DIR_EN_E810T;
0174 }
0175
0176 return ice_write_sma_ctrl_e810t(hw, data);
0177 }
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 static int
0188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
0189 enum ptp_pin_function func)
0190 {
0191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
0192 struct ice_pf *pf = ptp_info_to_pf(info);
0193 struct ice_hw *hw = &pf->hw;
0194 int err;
0195
0196 if (pin < SMA1 || func > PTP_PF_PEROUT)
0197 return -EOPNOTSUPP;
0198
0199 err = ice_get_sma_config_e810t(hw, ptp_pins);
0200 if (err)
0201 return err;
0202
0203
0204 if (pin == SMA1 && ptp_pins[UFL1].func == func)
0205 ptp_pins[UFL1].func = PTP_PF_NONE;
0206 if (pin == UFL1 && ptp_pins[SMA1].func == func)
0207 ptp_pins[SMA1].func = PTP_PF_NONE;
0208
0209 if (pin == SMA2 && ptp_pins[UFL2].func == func)
0210 ptp_pins[UFL2].func = PTP_PF_NONE;
0211 if (pin == UFL2 && ptp_pins[SMA2].func == func)
0212 ptp_pins[SMA2].func = PTP_PF_NONE;
0213
0214
0215 ptp_pins[pin].func = func;
0216
0217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
0218 }
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 static int
0232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
0233 enum ptp_pin_function func, unsigned int chan)
0234 {
0235
0236 if (chan != ice_pin_desc_e810t[pin].chan)
0237 return -EOPNOTSUPP;
0238
0239
0240 switch (func) {
0241 case PTP_PF_NONE:
0242 break;
0243 case PTP_PF_EXTTS:
0244 if (pin == UFL1)
0245 return -EOPNOTSUPP;
0246 break;
0247 case PTP_PF_PEROUT:
0248 if (pin == UFL2 || pin == GNSS)
0249 return -EOPNOTSUPP;
0250 break;
0251 case PTP_PF_PHYSYNC:
0252 return -EOPNOTSUPP;
0253 }
0254
0255 return ice_ptp_set_sma_e810t(info, pin, func);
0256 }
0257
0258
0259
0260
0261
0262
0263 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
0264 {
0265 struct ice_vsi *vsi;
0266 u32 val;
0267 u16 i;
0268
0269 vsi = ice_get_main_vsi(pf);
0270 if (!vsi)
0271 return;
0272
0273
0274 ice_for_each_txq(vsi, i) {
0275 if (!vsi->tx_rings[i])
0276 continue;
0277 vsi->tx_rings[i]->ptp_tx = on;
0278 }
0279
0280
0281 val = rd32(&pf->hw, PFINT_OICR_ENA);
0282 if (on)
0283 val |= PFINT_OICR_TSYN_TX_M;
0284 else
0285 val &= ~PFINT_OICR_TSYN_TX_M;
0286 wr32(&pf->hw, PFINT_OICR_ENA, val);
0287
0288 pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
0289 }
0290
0291
0292
0293
0294
0295
0296 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
0297 {
0298 struct ice_vsi *vsi;
0299 u16 i;
0300
0301 vsi = ice_get_main_vsi(pf);
0302 if (!vsi)
0303 return;
0304
0305
0306 ice_for_each_rxq(vsi, i) {
0307 if (!vsi->rx_rings[i])
0308 continue;
0309 vsi->rx_rings[i]->ptp_rx = on;
0310 }
0311
0312 pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
0313 HWTSTAMP_FILTER_NONE;
0314 }
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
0325 {
0326 ice_set_tx_tstamp(pf, ena);
0327 ice_set_rx_tstamp(pf, ena);
0328 }
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344 int ice_get_ptp_clock_index(struct ice_pf *pf)
0345 {
0346 struct device *dev = ice_pf_to_dev(pf);
0347 enum ice_aqc_driver_params param_idx;
0348 struct ice_hw *hw = &pf->hw;
0349 u8 tmr_idx;
0350 u32 value;
0351 int err;
0352
0353
0354 if (pf->ptp.clock)
0355 return ptp_clock_index(pf->ptp.clock);
0356
0357 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
0358 if (!tmr_idx)
0359 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
0360 else
0361 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
0362
0363 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
0364 if (err) {
0365 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
0366 err, ice_aq_str(hw->adminq.sq_last_status));
0367 return -1;
0368 }
0369
0370
0371
0372
0373
0374 if (!(value & PTP_SHARED_CLK_IDX_VALID))
0375 return -1;
0376
0377 return value & ~PTP_SHARED_CLK_IDX_VALID;
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390 static void ice_set_ptp_clock_index(struct ice_pf *pf)
0391 {
0392 struct device *dev = ice_pf_to_dev(pf);
0393 enum ice_aqc_driver_params param_idx;
0394 struct ice_hw *hw = &pf->hw;
0395 u8 tmr_idx;
0396 u32 value;
0397 int err;
0398
0399 if (!pf->ptp.clock)
0400 return;
0401
0402 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
0403 if (!tmr_idx)
0404 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
0405 else
0406 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
0407
0408 value = (u32)ptp_clock_index(pf->ptp.clock);
0409 if (value > INT_MAX) {
0410 dev_err(dev, "PTP Clock index is too large to store\n");
0411 return;
0412 }
0413 value |= PTP_SHARED_CLK_IDX_VALID;
0414
0415 err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
0416 if (err) {
0417 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
0418 err, ice_aq_str(hw->adminq.sq_last_status));
0419 }
0420 }
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 static void ice_clear_ptp_clock_index(struct ice_pf *pf)
0431 {
0432 struct device *dev = ice_pf_to_dev(pf);
0433 enum ice_aqc_driver_params param_idx;
0434 struct ice_hw *hw = &pf->hw;
0435 u8 tmr_idx;
0436 int err;
0437
0438
0439 if (!hw->func_caps.ts_func_info.src_tmr_owned)
0440 return;
0441
0442 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
0443 if (!tmr_idx)
0444 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
0445 else
0446 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
0447
0448 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
0449 if (err) {
0450 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
0451 err, ice_aq_str(hw->adminq.sq_last_status));
0452 }
0453 }
0454
0455
0456
0457
0458
0459
0460
0461 static u64
0462 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
0463 {
0464 struct ice_hw *hw = &pf->hw;
0465 u32 hi, lo, lo2;
0466 u8 tmr_idx;
0467
0468 tmr_idx = ice_get_ptp_src_clock_index(hw);
0469
0470 ptp_read_system_prets(sts);
0471
0472 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
0473
0474
0475 ptp_read_system_postts(sts);
0476
0477 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
0478 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
0479
0480 if (lo2 < lo) {
0481
0482
0483
0484 ptp_read_system_prets(sts);
0485 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
0486 ptp_read_system_postts(sts);
0487 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
0488 }
0489
0490 return ((u64)hi << 32) | lo;
0491 }
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
0509 {
0510 u64 systime;
0511 int i;
0512
0513 if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
0514 return -EAGAIN;
0515
0516
0517 systime = ice_ptp_read_src_clk_reg(pf, NULL);
0518
0519
0520 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
0521
0522 ice_for_each_vsi(pf, i) {
0523 struct ice_vsi *vsi = pf->vsi[i];
0524 int j;
0525
0526 if (!vsi)
0527 continue;
0528
0529 if (vsi->type != ICE_VSI_PF)
0530 continue;
0531
0532 ice_for_each_rxq(vsi, j) {
0533 if (!vsi->rx_rings[j])
0534 continue;
0535 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
0536 }
0537 }
0538 clear_bit(ICE_CFG_BUSY, pf->state);
0539
0540 return 0;
0541 }
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
0582 {
0583 u32 delta, phc_time_lo;
0584 u64 ns;
0585
0586
0587 phc_time_lo = (u32)cached_phc_time;
0588
0589
0590
0591
0592 delta = (in_tstamp - phc_time_lo);
0593
0594
0595
0596
0597
0598
0599 if (delta > (U32_MAX / 2)) {
0600
0601 delta = (phc_time_lo - in_tstamp);
0602 ns = cached_phc_time - delta;
0603 } else {
0604 ns = cached_phc_time + delta;
0605 }
0606
0607 return ns;
0608 }
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
0637 {
0638 const u64 mask = GENMASK_ULL(31, 0);
0639
0640 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
0641 (in_tstamp >> 8) & mask);
0642 }
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655 static void
0656 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
0657 struct ptp_system_timestamp *sts)
0658 {
0659 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
0660
0661 *ts = ns_to_timespec64(time_ns);
0662 }
0663
0664
0665
0666
0667
0668
0669
0670
0671 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
0672 {
0673 u64 ns = timespec64_to_ns(ts);
0674 struct ice_hw *hw = &pf->hw;
0675
0676 return ice_ptp_init_time(hw, ns);
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
0688 {
0689 struct ice_hw *hw = &pf->hw;
0690
0691 return ice_ptp_adj_clock(hw, adj);
0692 }
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703 static u64 ice_base_incval(struct ice_pf *pf)
0704 {
0705 struct ice_hw *hw = &pf->hw;
0706 u64 incval;
0707
0708 if (ice_is_e810(hw))
0709 incval = ICE_PTP_NOMINAL_INCVAL_E810;
0710 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
0711 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
0712 else
0713 incval = UNKNOWN_INCVAL_E822;
0714
0715 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
0716 incval);
0717
0718 return incval;
0719 }
0720
0721
0722
0723
0724
0725
0726 static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad)
0727 {
0728 struct ice_hw *hw = &pf->hw;
0729
0730 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
0731 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
0732 }
0733
0734
0735
0736
0737
0738 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
0739 {
0740 int quad = port->port_num / ICE_PORTS_PER_QUAD;
0741 int offs = port->port_num % ICE_PORTS_PER_QUAD;
0742 struct ice_pf *pf;
0743 struct ice_hw *hw;
0744 u32 val, phy_sts;
0745 int err;
0746
0747 pf = ptp_port_to_pf(port);
0748 hw = &pf->hw;
0749
0750 if (port->tx_fifo_busy_cnt == FIFO_OK)
0751 return 0;
0752
0753
0754 if (offs == 0 || offs == 1)
0755 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
0756 &val);
0757 else
0758 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
0759 &val);
0760
0761 if (err) {
0762 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
0763 port->port_num, err);
0764 return err;
0765 }
0766
0767 if (offs & 0x1)
0768 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
0769 else
0770 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
0771
0772 if (phy_sts & FIFO_EMPTY) {
0773 port->tx_fifo_busy_cnt = FIFO_OK;
0774 return 0;
0775 }
0776
0777 port->tx_fifo_busy_cnt++;
0778
0779 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
0780 port->tx_fifo_busy_cnt, port->port_num);
0781
0782 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
0783 dev_dbg(ice_pf_to_dev(pf),
0784 "Port %d Tx FIFO still not empty; resetting quad %d\n",
0785 port->port_num, quad);
0786 ice_ptp_reset_ts_memory_quad(pf, quad);
0787 port->tx_fifo_busy_cnt = FIFO_OK;
0788 return 0;
0789 }
0790
0791 return -EAGAIN;
0792 }
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802 static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port)
0803 {
0804 struct ice_pf *pf = ptp_port_to_pf(port);
0805 struct device *dev = ice_pf_to_dev(pf);
0806 struct ice_hw *hw = &pf->hw;
0807 u32 val;
0808 int err;
0809
0810 err = ice_ptp_check_tx_fifo(port);
0811 if (err)
0812 return err;
0813
0814 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS,
0815 &val);
0816 if (err) {
0817 dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n",
0818 port->port_num, err);
0819 return -EAGAIN;
0820 }
0821
0822 if (!(val & P_REG_TX_OV_STATUS_OV_M))
0823 return -EAGAIN;
0824
0825 return 0;
0826 }
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836 static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port)
0837 {
0838 struct ice_pf *pf = ptp_port_to_pf(port);
0839 struct device *dev = ice_pf_to_dev(pf);
0840 struct ice_hw *hw = &pf->hw;
0841 int err;
0842 u32 val;
0843
0844 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS,
0845 &val);
0846 if (err) {
0847 dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n",
0848 port->port_num, err);
0849 return err;
0850 }
0851
0852 if (!(val & P_REG_RX_OV_STATUS_OV_M))
0853 return -EAGAIN;
0854
0855 return 0;
0856 }
0857
0858
0859
0860
0861
0862
0863
0864
0865 static int ice_ptp_check_offset_valid(struct ice_ptp_port *port)
0866 {
0867 int tx_err, rx_err;
0868
0869
0870 tx_err = ice_ptp_check_tx_offset_valid(port);
0871 rx_err = ice_ptp_check_rx_offset_valid(port);
0872
0873 if (tx_err || rx_err)
0874 return -EAGAIN;
0875
0876 return 0;
0877 }
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890 static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
0891 {
0892 struct ice_ptp_port *port;
0893 int err;
0894 struct device *dev;
0895 struct ice_pf *pf;
0896 struct ice_hw *hw;
0897
0898 port = container_of(work, struct ice_ptp_port, ov_work.work);
0899 pf = ptp_port_to_pf(port);
0900 hw = &pf->hw;
0901 dev = ice_pf_to_dev(pf);
0902
0903 if (ice_ptp_check_offset_valid(port)) {
0904
0905 kthread_queue_delayed_work(pf->ptp.kworker,
0906 &port->ov_work,
0907 msecs_to_jiffies(100));
0908 return;
0909 }
0910
0911
0912 err = ice_phy_exit_bypass_e822(hw, port->port_num);
0913 if (err) {
0914 dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n",
0915 port->port_num, err);
0916 return;
0917 }
0918 }
0919
0920
0921
0922
0923
0924 static int
0925 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
0926 {
0927 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
0928 u8 port = ptp_port->port_num;
0929 struct ice_hw *hw = &pf->hw;
0930 int err;
0931
0932 if (ice_is_e810(hw))
0933 return 0;
0934
0935 mutex_lock(&ptp_port->ps_lock);
0936
0937 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
0938
0939 err = ice_stop_phy_timer_e822(hw, port, true);
0940 if (err)
0941 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
0942 port, err);
0943
0944 mutex_unlock(&ptp_port->ps_lock);
0945
0946 return err;
0947 }
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957 static int
0958 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
0959 {
0960 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
0961 u8 port = ptp_port->port_num;
0962 struct ice_hw *hw = &pf->hw;
0963 int err;
0964
0965 if (ice_is_e810(hw))
0966 return 0;
0967
0968 if (!ptp_port->link_up)
0969 return ice_ptp_port_phy_stop(ptp_port);
0970
0971 mutex_lock(&ptp_port->ps_lock);
0972
0973 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
0974
0975
0976 ptp_port->tx.calibrating = true;
0977 ptp_port->tx_fifo_busy_cnt = 0;
0978
0979
0980 err = ice_start_phy_timer_e822(hw, port, true);
0981 if (err)
0982 goto out_unlock;
0983
0984
0985 ptp_port->tx.calibrating = false;
0986
0987 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
0988
0989 out_unlock:
0990 if (err)
0991 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
0992 port, err);
0993
0994 mutex_unlock(&ptp_port->ps_lock);
0995
0996 return err;
0997 }
0998
0999
1000
1001
1002
1003
1004
1005 int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
1006 {
1007 struct ice_ptp_port *ptp_port;
1008
1009 if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
1010 return 0;
1011
1012 if (port >= ICE_NUM_EXTERNAL_PORTS)
1013 return -EINVAL;
1014
1015 ptp_port = &pf->ptp.port;
1016 if (ptp_port->port_num != port)
1017 return -EINVAL;
1018
1019
1020 ptp_port->link_up = linkup;
1021
1022 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1023
1024 return -EAGAIN;
1025
1026 return ice_ptp_port_phy_restart(ptp_port);
1027 }
1028
1029
1030
1031
1032
1033 static void ice_ptp_reset_ts_memory(struct ice_pf *pf)
1034 {
1035 int quad;
1036
1037 quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD;
1038 ice_ptp_reset_ts_memory_quad(pf, quad);
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
1050 {
1051 struct ice_hw *hw = &pf->hw;
1052 int err = 0;
1053 int quad;
1054 u32 val;
1055
1056 ice_ptp_reset_ts_memory(pf);
1057
1058 for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
1059 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1060 &val);
1061 if (err)
1062 break;
1063
1064 if (ena) {
1065 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1066 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
1067 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
1068 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
1069 } else {
1070 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1071 }
1072
1073 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1074 val);
1075 if (err)
1076 break;
1077 }
1078
1079 if (err)
1080 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
1081 err);
1082 return err;
1083 }
1084
1085
1086
1087
1088
1089 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1090 {
1091 ice_ptp_port_phy_restart(&pf->ptp.port);
1092 }
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1103 {
1104 struct ice_pf *pf = ptp_info_to_pf(info);
1105 struct ice_hw *hw = &pf->hw;
1106 u64 incval, diff;
1107 int neg_adj = 0;
1108 int err;
1109
1110 incval = ice_base_incval(pf);
1111
1112 if (scaled_ppm < 0) {
1113 neg_adj = 1;
1114 scaled_ppm = -scaled_ppm;
1115 }
1116
1117 diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm,
1118 1000000ULL << 16);
1119 if (neg_adj)
1120 incval -= diff;
1121 else
1122 incval += diff;
1123
1124 err = ice_ptp_write_incval_locked(hw, incval);
1125 if (err) {
1126 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1127 err);
1128 return -EIO;
1129 }
1130
1131 return 0;
1132 }
1133
1134
1135
1136
1137
1138
1139
1140 static void ice_ptp_extts_work(struct kthread_work *work)
1141 {
1142 struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
1143 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
1144 struct ptp_clock_event event;
1145 struct ice_hw *hw = &pf->hw;
1146 u8 chan, tmr_idx;
1147 u32 hi, lo;
1148
1149 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1150
1151
1152
1153
1154
1155 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1156
1157 if (pf->ptp.ext_ts_irq & (1 << chan)) {
1158 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1159 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1160 event.timestamp = (((u64)hi) << 32) | lo;
1161 event.type = PTP_CLOCK_EXTTS;
1162 event.index = chan;
1163
1164
1165 ptp_clock_event(pf->ptp.clock, &event);
1166 pf->ptp.ext_ts_irq &= ~(1 << chan);
1167 }
1168 }
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 static int
1180 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
1181 unsigned int extts_flags)
1182 {
1183 u32 func, aux_reg, gpio_reg, irq_reg;
1184 struct ice_hw *hw = &pf->hw;
1185 u8 tmr_idx;
1186
1187 if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
1188 return -EINVAL;
1189
1190 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1191
1192 irq_reg = rd32(hw, PFINT_OICR_ENA);
1193
1194 if (ena) {
1195
1196 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1197 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1198
1199 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1200 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1201
1202
1203 if (extts_flags & PTP_FALLING_EDGE)
1204 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1205 if (extts_flags & PTP_RISING_EDGE)
1206 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1207
1208
1209
1210
1211
1212 func = 1 + chan + (tmr_idx * 3);
1213 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
1214 GLGEN_GPIO_CTL_PIN_FUNC_M);
1215 pf->ptp.ext_ts_chan |= (1 << chan);
1216 } else {
1217
1218 aux_reg = 0;
1219 gpio_reg = 0;
1220 pf->ptp.ext_ts_chan &= ~(1 << chan);
1221 if (!pf->ptp.ext_ts_chan)
1222 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1223 }
1224
1225 wr32(hw, PFINT_OICR_ENA, irq_reg);
1226 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1227 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1228
1229 return 0;
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1243 struct ice_perout_channel *config, bool store)
1244 {
1245 u64 current_time, period, start_time, phase;
1246 struct ice_hw *hw = &pf->hw;
1247 u32 func, val, gpio_pin;
1248 u8 tmr_idx;
1249
1250 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1251
1252
1253 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1254
1255
1256
1257
1258 if (!config || !config->ena) {
1259 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1260 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1261 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1262
1263 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1264 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1265 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1266
1267
1268 if (store)
1269 memset(&pf->ptp.perout_channels[chan], 0,
1270 sizeof(struct ice_perout_channel));
1271
1272 return 0;
1273 }
1274 period = config->period;
1275 start_time = config->start_time;
1276 div64_u64_rem(start_time, period, &phase);
1277 gpio_pin = config->gpio_pin;
1278
1279
1280 if (period & 0x1) {
1281 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1282 goto err;
1283 }
1284
1285 period >>= 1;
1286
1287
1288
1289 #define MIN_PULSE 3
1290 if (period <= MIN_PULSE || period > U32_MAX) {
1291 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1292 MIN_PULSE * 2);
1293 goto err;
1294 }
1295
1296 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1297
1298
1299 current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1300
1301
1302
1303
1304 if (start_time < current_time)
1305 start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
1306 NSEC_PER_SEC) * NSEC_PER_SEC + phase;
1307
1308 if (ice_is_e810(hw))
1309 start_time -= E810_OUT_PROP_DELAY_NS;
1310 else
1311 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
1312
1313
1314 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1315 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1316
1317
1318 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1319 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1320
1321
1322 func = 8 + chan + (tmr_idx * 4);
1323 val = GLGEN_GPIO_CTL_PIN_DIR_M |
1324 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
1325 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1326
1327
1328 if (store) {
1329 memcpy(&pf->ptp.perout_channels[chan], config,
1330 sizeof(struct ice_perout_channel));
1331 pf->ptp.perout_channels[chan].start_time = phase;
1332 }
1333
1334 return 0;
1335 err:
1336 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1337 return -EFAULT;
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1349 {
1350 uint i;
1351
1352 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1353 if (pf->ptp.perout_channels[i].ena)
1354 ice_ptp_cfg_clkout(pf, i, NULL, false);
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1366 {
1367 uint i;
1368
1369 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1370 if (pf->ptp.perout_channels[i].ena)
1371 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1372 false);
1373 }
1374
1375
1376
1377
1378
1379
1380
1381 static int
1382 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1383 struct ptp_clock_request *rq, int on)
1384 {
1385 struct ice_pf *pf = ptp_info_to_pf(info);
1386 struct ice_perout_channel clk_cfg = {0};
1387 bool sma_pres = false;
1388 unsigned int chan;
1389 u32 gpio_pin;
1390 int err;
1391
1392 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1393 sma_pres = true;
1394
1395 switch (rq->type) {
1396 case PTP_CLK_REQ_PEROUT:
1397 chan = rq->perout.index;
1398 if (sma_pres) {
1399 if (chan == ice_pin_desc_e810t[SMA1].chan)
1400 clk_cfg.gpio_pin = GPIO_20;
1401 else if (chan == ice_pin_desc_e810t[SMA2].chan)
1402 clk_cfg.gpio_pin = GPIO_22;
1403 else
1404 return -1;
1405 } else if (ice_is_e810t(&pf->hw)) {
1406 if (chan == 0)
1407 clk_cfg.gpio_pin = GPIO_20;
1408 else
1409 clk_cfg.gpio_pin = GPIO_22;
1410 } else if (chan == PPS_CLK_GEN_CHAN) {
1411 clk_cfg.gpio_pin = PPS_PIN_INDEX;
1412 } else {
1413 clk_cfg.gpio_pin = chan;
1414 }
1415
1416 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1417 rq->perout.period.nsec);
1418 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1419 rq->perout.start.nsec);
1420 clk_cfg.ena = !!on;
1421
1422 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1423 break;
1424 case PTP_CLK_REQ_EXTTS:
1425 chan = rq->extts.index;
1426 if (sma_pres) {
1427 if (chan < ice_pin_desc_e810t[SMA2].chan)
1428 gpio_pin = GPIO_21;
1429 else
1430 gpio_pin = GPIO_23;
1431 } else if (ice_is_e810t(&pf->hw)) {
1432 if (chan == 0)
1433 gpio_pin = GPIO_21;
1434 else
1435 gpio_pin = GPIO_23;
1436 } else {
1437 gpio_pin = chan;
1438 }
1439
1440 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
1441 rq->extts.flags);
1442 break;
1443 default:
1444 return -EOPNOTSUPP;
1445 }
1446
1447 return err;
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 static int
1461 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1462 struct ptp_system_timestamp *sts)
1463 {
1464 struct ice_pf *pf = ptp_info_to_pf(info);
1465 struct ice_hw *hw = &pf->hw;
1466
1467 if (!ice_ptp_lock(hw)) {
1468 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
1469 return -EBUSY;
1470 }
1471
1472 ice_ptp_read_time(pf, ts, sts);
1473 ice_ptp_unlock(hw);
1474
1475 return 0;
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 static int
1487 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1488 {
1489 struct ice_pf *pf = ptp_info_to_pf(info);
1490 struct timespec64 ts64 = *ts;
1491 struct ice_hw *hw = &pf->hw;
1492 int err;
1493
1494
1495
1496
1497 if (pf->ptp.port.link_up)
1498 ice_ptp_port_phy_stop(&pf->ptp.port);
1499
1500 if (!ice_ptp_lock(hw)) {
1501 err = -EBUSY;
1502 goto exit;
1503 }
1504
1505
1506 ice_ptp_disable_all_clkout(pf);
1507
1508 err = ice_ptp_write_init(pf, &ts64);
1509 ice_ptp_unlock(hw);
1510
1511 if (!err)
1512 ice_ptp_update_cached_phctime(pf);
1513
1514
1515 ice_ptp_enable_all_clkout(pf);
1516
1517
1518 if (pf->ptp.port.link_up)
1519 ice_ptp_port_phy_restart(&pf->ptp.port);
1520 exit:
1521 if (err) {
1522 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1523 return err;
1524 }
1525
1526 return 0;
1527 }
1528
1529
1530
1531
1532
1533
1534 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1535 {
1536 struct timespec64 now, then;
1537 int ret;
1538
1539 then = ns_to_timespec64(delta);
1540 ret = ice_ptp_gettimex64(info, &now, NULL);
1541 if (ret)
1542 return ret;
1543 now = timespec64_add(now, then);
1544
1545 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1546 }
1547
1548
1549
1550
1551
1552
1553 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1554 {
1555 struct ice_pf *pf = ptp_info_to_pf(info);
1556 struct ice_hw *hw = &pf->hw;
1557 struct device *dev;
1558 int err;
1559
1560 dev = ice_pf_to_dev(pf);
1561
1562
1563
1564
1565
1566 if (delta > S32_MAX || delta < S32_MIN) {
1567 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1568 return ice_ptp_adjtime_nonatomic(info, delta);
1569 }
1570
1571 if (!ice_ptp_lock(hw)) {
1572 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1573 return -EBUSY;
1574 }
1575
1576
1577 ice_ptp_disable_all_clkout(pf);
1578
1579 err = ice_ptp_write_adj(pf, delta);
1580
1581
1582 ice_ptp_enable_all_clkout(pf);
1583
1584 ice_ptp_unlock(hw);
1585
1586 if (err) {
1587 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
1588 return err;
1589 }
1590
1591 ice_ptp_update_cached_phctime(pf);
1592
1593 return 0;
1594 }
1595
1596 #ifdef CONFIG_ICE_HWTS
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606 static int
1607 ice_ptp_get_syncdevicetime(ktime_t *device,
1608 struct system_counterval_t *system,
1609 void *ctx)
1610 {
1611 struct ice_pf *pf = (struct ice_pf *)ctx;
1612 struct ice_hw *hw = &pf->hw;
1613 u32 hh_lock, hh_art_ctl;
1614 int i;
1615
1616
1617 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
1618 if (hh_lock & PFHH_SEM_BUSY_M) {
1619 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
1620 return -EFAULT;
1621 }
1622
1623
1624 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
1625 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
1626 wr32(hw, GLHH_ART_CTL, hh_art_ctl);
1627
1628 #define MAX_HH_LOCK_TRIES 100
1629
1630 for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
1631
1632 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
1633 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
1634 udelay(1);
1635 continue;
1636 } else {
1637 u32 hh_ts_lo, hh_ts_hi, tmr_idx;
1638 u64 hh_ts;
1639
1640 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
1641
1642 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
1643 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
1644 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
1645 *system = convert_art_ns_to_tsc(hh_ts);
1646
1647 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
1648 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
1649 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
1650 *device = ns_to_ktime(hh_ts);
1651 break;
1652 }
1653 }
1654
1655 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
1656 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
1657 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
1658
1659 if (i == MAX_HH_LOCK_TRIES)
1660 return -ETIMEDOUT;
1661
1662 return 0;
1663 }
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 static int
1681 ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
1682 struct system_device_crosststamp *cts)
1683 {
1684 struct ice_pf *pf = ptp_info_to_pf(info);
1685
1686 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
1687 pf, NULL, cts);
1688 }
1689 #endif
1690
1691
1692
1693
1694
1695
1696
1697
1698 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
1699 {
1700 struct hwtstamp_config *config;
1701
1702 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1703 return -EIO;
1704
1705 config = &pf->ptp.tstamp_config;
1706
1707 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
1708 -EFAULT : 0;
1709 }
1710
1711
1712
1713
1714
1715
1716 static int
1717 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
1718 {
1719 switch (config->tx_type) {
1720 case HWTSTAMP_TX_OFF:
1721 ice_set_tx_tstamp(pf, false);
1722 break;
1723 case HWTSTAMP_TX_ON:
1724 ice_set_tx_tstamp(pf, true);
1725 break;
1726 default:
1727 return -ERANGE;
1728 }
1729
1730 switch (config->rx_filter) {
1731 case HWTSTAMP_FILTER_NONE:
1732 ice_set_rx_tstamp(pf, false);
1733 break;
1734 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1735 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1736 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1737 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1738 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1739 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1740 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1741 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1742 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1743 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1744 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1745 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1746 case HWTSTAMP_FILTER_NTP_ALL:
1747 case HWTSTAMP_FILTER_ALL:
1748 ice_set_rx_tstamp(pf, true);
1749 break;
1750 default:
1751 return -ERANGE;
1752 }
1753
1754 return 0;
1755 }
1756
1757
1758
1759
1760
1761
1762
1763
1764 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
1765 {
1766 struct hwtstamp_config config;
1767 int err;
1768
1769 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1770 return -EAGAIN;
1771
1772 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1773 return -EFAULT;
1774
1775 err = ice_ptp_set_timestamp_mode(pf, &config);
1776 if (err)
1777 return err;
1778
1779
1780 config = pf->ptp.tstamp_config;
1781
1782 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1783 -EFAULT : 0;
1784 }
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795 void
1796 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
1797 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
1798 {
1799 u32 ts_high;
1800 u64 ts_ns;
1801
1802
1803 if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
1804 struct skb_shared_hwtstamps *hwtstamps;
1805
1806
1807
1808
1809
1810
1811
1812 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
1813 ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
1814
1815 hwtstamps = skb_hwtstamps(skb);
1816 memset(hwtstamps, 0, sizeof(*hwtstamps));
1817 hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
1818 }
1819 }
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 static void
1831 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1832 {
1833 struct device *dev = ice_pf_to_dev(pf);
1834
1835 dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
1836
1837 info->enable = NULL;
1838 info->verify = NULL;
1839 info->n_pins = 0;
1840 info->n_ext_ts = 0;
1841 info->n_per_out = 0;
1842 }
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 static void
1854 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1855 {
1856 struct device *dev = ice_pf_to_dev(pf);
1857 int err;
1858
1859
1860 info->pin_config = devm_kcalloc(dev, info->n_pins,
1861 sizeof(*info->pin_config), GFP_KERNEL);
1862 if (!info->pin_config) {
1863 ice_ptp_disable_sma_pins_e810t(pf, info);
1864 return;
1865 }
1866
1867
1868 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
1869 if (err)
1870 ice_ptp_disable_sma_pins_e810t(pf, info);
1871 }
1872
1873
1874
1875
1876
1877
1878 static void
1879 ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1880 {
1881
1882 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
1883 !ice_is_pca9575_present(&pf->hw))
1884 ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
1885
1886 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
1887 info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
1888 info->n_per_out = N_PER_OUT_E810T_NO_SMA;
1889 return;
1890 }
1891
1892 info->n_per_out = N_PER_OUT_E810T;
1893
1894 if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) {
1895 info->n_ext_ts = N_EXT_TS_E810;
1896 info->n_pins = NUM_PTP_PINS_E810T;
1897 info->verify = ice_verify_pin_e810t;
1898 }
1899
1900
1901 ice_ptp_setup_sma_pins_e810t(pf, info);
1902 }
1903
1904
1905
1906
1907
1908
1909 static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
1910 {
1911 info->n_per_out = N_PER_OUT_E810;
1912
1913 if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
1914 return;
1915
1916 info->n_ext_ts = N_EXT_TS_E810;
1917 }
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929 static void
1930 ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
1931 {
1932 #ifdef CONFIG_ICE_HWTS
1933 if (boot_cpu_has(X86_FEATURE_ART) &&
1934 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
1935 info->getcrosststamp = ice_ptp_getcrosststamp_e822;
1936 #endif
1937 }
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949 static void
1950 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
1951 {
1952 info->enable = ice_ptp_gpio_enable_e810;
1953
1954 if (ice_is_e810t(&pf->hw))
1955 ice_ptp_setup_pins_e810t(pf, info);
1956 else
1957 ice_ptp_setup_pins_e810(pf, info);
1958 }
1959
1960
1961
1962
1963
1964 static void ice_ptp_set_caps(struct ice_pf *pf)
1965 {
1966 struct ptp_clock_info *info = &pf->ptp.info;
1967 struct device *dev = ice_pf_to_dev(pf);
1968
1969 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
1970 dev_driver_string(dev), dev_name(dev));
1971 info->owner = THIS_MODULE;
1972 info->max_adj = 999999999;
1973 info->adjtime = ice_ptp_adjtime;
1974 info->adjfine = ice_ptp_adjfine;
1975 info->gettimex64 = ice_ptp_gettimex64;
1976 info->settime64 = ice_ptp_settime64;
1977
1978 if (ice_is_e810(&pf->hw))
1979 ice_ptp_set_funcs_e810(pf, info);
1980 else
1981 ice_ptp_set_funcs_e822(pf, info);
1982 }
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993 static long ice_ptp_create_clock(struct ice_pf *pf)
1994 {
1995 struct ptp_clock_info *info;
1996 struct ptp_clock *clock;
1997 struct device *dev;
1998
1999
2000 if (pf->ptp.clock)
2001 return 0;
2002
2003 ice_ptp_set_caps(pf);
2004
2005 info = &pf->ptp.info;
2006 dev = ice_pf_to_dev(pf);
2007
2008
2009 clock = ptp_clock_register(info, dev);
2010 if (IS_ERR(clock))
2011 return PTR_ERR(clock);
2012
2013 pf->ptp.clock = clock;
2014
2015 return 0;
2016 }
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052 static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
2053 {
2054 struct ice_ptp_port *ptp_port;
2055 struct ice_ptp_tx *tx;
2056 struct ice_pf *pf;
2057 struct ice_hw *hw;
2058 u8 idx;
2059
2060 tx = container_of(work, struct ice_ptp_tx, work);
2061 if (!tx->init)
2062 return;
2063
2064 ptp_port = container_of(tx, struct ice_ptp_port, tx);
2065 pf = ptp_port_to_pf(ptp_port);
2066 hw = &pf->hw;
2067
2068 for_each_set_bit(idx, tx->in_use, tx->len) {
2069 struct skb_shared_hwtstamps shhwtstamps = {};
2070 u8 phy_idx = idx + tx->quad_offset;
2071 u64 raw_tstamp, tstamp;
2072 struct sk_buff *skb;
2073 int err;
2074
2075 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
2076
2077 err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
2078 &raw_tstamp);
2079 if (err)
2080 continue;
2081
2082 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
2083
2084
2085 if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
2086 raw_tstamp == tx->tstamps[idx].cached_tstamp)
2087 continue;
2088
2089
2090
2091
2092 spin_lock(&tx->lock);
2093 tx->tstamps[idx].cached_tstamp = raw_tstamp;
2094 clear_bit(idx, tx->in_use);
2095 skb = tx->tstamps[idx].skb;
2096 tx->tstamps[idx].skb = NULL;
2097 spin_unlock(&tx->lock);
2098
2099
2100
2101
2102 if (!skb)
2103 continue;
2104
2105
2106 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
2107 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
2108
2109 ice_trace(tx_tstamp_complete, skb, idx);
2110
2111 skb_tstamp_tx(skb, &shhwtstamps);
2112 dev_kfree_skb_any(skb);
2113 }
2114
2115
2116
2117
2118 spin_lock(&tx->lock);
2119 if (!bitmap_empty(tx->in_use, tx->len))
2120 kthread_queue_work(pf->ptp.kworker, &tx->work);
2121 spin_unlock(&tx->lock);
2122 }
2123
2124
2125
2126
2127
2128
2129 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2130 {
2131 u8 idx;
2132
2133
2134 if (!tx->init || tx->calibrating)
2135 return -1;
2136
2137 spin_lock(&tx->lock);
2138
2139 idx = find_first_zero_bit(tx->in_use, tx->len);
2140 if (idx < tx->len) {
2141
2142
2143
2144
2145 set_bit(idx, tx->in_use);
2146 tx->tstamps[idx].start = jiffies;
2147 tx->tstamps[idx].skb = skb_get(skb);
2148 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2149 ice_trace(tx_tstamp_request, skb, idx);
2150 }
2151
2152 spin_unlock(&tx->lock);
2153
2154
2155
2156
2157 if (idx >= tx->len)
2158 return -1;
2159 else
2160 return idx + tx->quad_offset;
2161 }
2162
2163
2164
2165
2166
2167
2168
2169
2170 void ice_ptp_process_ts(struct ice_pf *pf)
2171 {
2172 if (pf->ptp.port.tx.init)
2173 kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
2174 }
2175
2176
2177
2178
2179
2180
2181
2182
2183 static int
2184 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
2185 {
2186 tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
2187 if (!tx->tstamps)
2188 return -ENOMEM;
2189
2190 tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
2191 if (!tx->in_use) {
2192 kfree(tx->tstamps);
2193 tx->tstamps = NULL;
2194 return -ENOMEM;
2195 }
2196
2197 spin_lock_init(&tx->lock);
2198 kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
2199
2200 tx->init = 1;
2201
2202 return 0;
2203 }
2204
2205
2206
2207
2208
2209
2210 static void
2211 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
2212 {
2213 u8 idx;
2214
2215 for (idx = 0; idx < tx->len; idx++) {
2216 u8 phy_idx = idx + tx->quad_offset;
2217
2218 spin_lock(&tx->lock);
2219 if (tx->tstamps[idx].skb) {
2220 dev_kfree_skb_any(tx->tstamps[idx].skb);
2221 tx->tstamps[idx].skb = NULL;
2222 }
2223 clear_bit(idx, tx->in_use);
2224 spin_unlock(&tx->lock);
2225
2226
2227 if (!pf->hw.reset_ongoing)
2228 ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
2229 }
2230 }
2231
2232
2233
2234
2235
2236
2237
2238
2239 static void
2240 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
2241 {
2242 tx->init = 0;
2243
2244 kthread_cancel_work_sync(&tx->work);
2245
2246 ice_ptp_flush_tx_tracker(pf, tx);
2247
2248 kfree(tx->tstamps);
2249 tx->tstamps = NULL;
2250
2251 bitmap_free(tx->in_use);
2252 tx->in_use = NULL;
2253
2254 tx->len = 0;
2255 }
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268 static int
2269 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
2270 {
2271 tx->quad = port / ICE_PORTS_PER_QUAD;
2272 tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
2273 tx->len = INDEX_PER_PORT;
2274
2275 return ice_ptp_alloc_tx_tracker(tx);
2276 }
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286 static int
2287 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
2288 {
2289 tx->quad = pf->hw.port_info->lport;
2290 tx->quad_offset = 0;
2291 tx->len = INDEX_PER_QUAD;
2292
2293 return ice_ptp_alloc_tx_tracker(tx);
2294 }
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307 static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
2308 {
2309 u8 idx;
2310
2311 if (!tx->init)
2312 return;
2313
2314 for_each_set_bit(idx, tx->in_use, tx->len) {
2315 struct sk_buff *skb;
2316 u64 raw_tstamp;
2317
2318
2319 if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
2320 continue;
2321
2322
2323 ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
2324 &raw_tstamp);
2325
2326 spin_lock(&tx->lock);
2327 skb = tx->tstamps[idx].skb;
2328 tx->tstamps[idx].skb = NULL;
2329 clear_bit(idx, tx->in_use);
2330 spin_unlock(&tx->lock);
2331
2332
2333 dev_kfree_skb_any(skb);
2334 }
2335 }
2336
2337 static void ice_ptp_periodic_work(struct kthread_work *work)
2338 {
2339 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2340 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2341 int err;
2342
2343 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2344 return;
2345
2346 err = ice_ptp_update_cached_phctime(pf);
2347
2348 ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
2349
2350
2351 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2352 msecs_to_jiffies(err ? 10 : 500));
2353 }
2354
2355
2356
2357
2358
2359 void ice_ptp_reset(struct ice_pf *pf)
2360 {
2361 struct ice_ptp *ptp = &pf->ptp;
2362 struct ice_hw *hw = &pf->hw;
2363 struct timespec64 ts;
2364 int err, itr = 1;
2365 u64 time_diff;
2366
2367 if (test_bit(ICE_PFR_REQ, pf->state))
2368 goto pfr;
2369
2370 if (!hw->func_caps.ts_func_info.src_tmr_owned)
2371 goto reset_ts;
2372
2373 err = ice_ptp_init_phc(hw);
2374 if (err)
2375 goto err;
2376
2377
2378 if (!ice_ptp_lock(hw)) {
2379 err = -EBUSY;
2380 goto err;
2381 }
2382
2383
2384 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2385 if (err) {
2386 ice_ptp_unlock(hw);
2387 goto err;
2388 }
2389
2390
2391
2392
2393
2394 if (ptp->cached_phc_time) {
2395 time_diff = ktime_get_real_ns() - ptp->reset_time;
2396 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2397 } else {
2398 ts = ktime_to_timespec64(ktime_get_real());
2399 }
2400 err = ice_ptp_write_init(pf, &ts);
2401 if (err) {
2402 ice_ptp_unlock(hw);
2403 goto err;
2404 }
2405
2406
2407 ice_ptp_unlock(hw);
2408
2409 if (!ice_is_e810(hw)) {
2410
2411 err = ice_ptp_tx_ena_intr(pf, true, itr);
2412 if (err)
2413 goto err;
2414 }
2415
2416 reset_ts:
2417
2418 ice_ptp_reset_phy_timestamping(pf);
2419
2420 pfr:
2421
2422 if (ice_is_e810(&pf->hw)) {
2423 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
2424 } else {
2425 kthread_init_delayed_work(&ptp->port.ov_work,
2426 ice_ptp_wait_for_offset_valid);
2427 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
2428 ptp->port.port_num);
2429 }
2430 if (err)
2431 goto err;
2432
2433 set_bit(ICE_FLAG_PTP, pf->flags);
2434
2435
2436 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2437
2438 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2439 return;
2440
2441 err:
2442 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2443 }
2444
2445
2446
2447
2448
2449 void ice_ptp_prepare_for_reset(struct ice_pf *pf)
2450 {
2451 struct ice_ptp *ptp = &pf->ptp;
2452 u8 src_tmr;
2453
2454 clear_bit(ICE_FLAG_PTP, pf->flags);
2455
2456
2457 ice_ptp_cfg_timestamp(pf, false);
2458
2459 kthread_cancel_delayed_work_sync(&ptp->work);
2460 kthread_cancel_work_sync(&ptp->extts_work);
2461
2462 if (test_bit(ICE_PFR_REQ, pf->state))
2463 return;
2464
2465 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2466
2467
2468 ice_ptp_disable_all_clkout(pf);
2469
2470 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2471
2472
2473 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2474
2475
2476 ptp->reset_time = ktime_get_real_ns();
2477 }
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487 static int ice_ptp_init_owner(struct ice_pf *pf)
2488 {
2489 struct ice_hw *hw = &pf->hw;
2490 struct timespec64 ts;
2491 int err, itr = 1;
2492
2493 err = ice_ptp_init_phc(hw);
2494 if (err) {
2495 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
2496 err);
2497 return err;
2498 }
2499
2500
2501 if (!ice_ptp_lock(hw)) {
2502 err = -EBUSY;
2503 goto err_exit;
2504 }
2505
2506
2507 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2508 if (err) {
2509 ice_ptp_unlock(hw);
2510 goto err_exit;
2511 }
2512
2513 ts = ktime_to_timespec64(ktime_get_real());
2514
2515 err = ice_ptp_write_init(pf, &ts);
2516 if (err) {
2517 ice_ptp_unlock(hw);
2518 goto err_exit;
2519 }
2520
2521
2522 ice_ptp_unlock(hw);
2523
2524 if (!ice_is_e810(hw)) {
2525
2526 err = ice_ptp_tx_ena_intr(pf, true, itr);
2527 if (err)
2528 goto err_exit;
2529 }
2530
2531
2532 err = ice_ptp_create_clock(pf);
2533 if (err)
2534 goto err_clk;
2535
2536
2537 ice_set_ptp_clock_index(pf);
2538
2539 return 0;
2540
2541 err_clk:
2542 pf->ptp.clock = NULL;
2543 err_exit:
2544 return err;
2545 }
2546
2547
2548
2549
2550
2551
2552 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
2553 {
2554 struct kthread_worker *kworker;
2555
2556
2557 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
2558 kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
2559
2560
2561
2562
2563 kworker = kthread_create_worker(0, "ice-ptp-%s",
2564 dev_name(ice_pf_to_dev(pf)));
2565 if (IS_ERR(kworker))
2566 return PTR_ERR(kworker);
2567
2568 ptp->kworker = kworker;
2569
2570
2571 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2572
2573 return 0;
2574 }
2575
2576
2577
2578
2579
2580
2581 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
2582 {
2583 mutex_init(&ptp_port->ps_lock);
2584
2585 if (ice_is_e810(&pf->hw))
2586 return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
2587
2588 kthread_init_delayed_work(&ptp_port->ov_work,
2589 ice_ptp_wait_for_offset_valid);
2590 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
2591 }
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605 void ice_ptp_init(struct ice_pf *pf)
2606 {
2607 struct ice_ptp *ptp = &pf->ptp;
2608 struct ice_hw *hw = &pf->hw;
2609 int err;
2610
2611
2612
2613
2614 if (hw->func_caps.ts_func_info.src_tmr_owned) {
2615 err = ice_ptp_init_owner(pf);
2616 if (err)
2617 goto err;
2618 }
2619
2620 ptp->port.port_num = hw->pf_id;
2621 err = ice_ptp_init_port(pf, &ptp->port);
2622 if (err)
2623 goto err;
2624
2625
2626 ice_ptp_reset_phy_timestamping(pf);
2627
2628 set_bit(ICE_FLAG_PTP, pf->flags);
2629 err = ice_ptp_init_work(pf, ptp);
2630 if (err)
2631 goto err;
2632
2633 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
2634 return;
2635
2636 err:
2637
2638 if (pf->ptp.clock) {
2639 ptp_clock_unregister(ptp->clock);
2640 pf->ptp.clock = NULL;
2641 }
2642 clear_bit(ICE_FLAG_PTP, pf->flags);
2643 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
2644 }
2645
2646
2647
2648
2649
2650
2651
2652
2653 void ice_ptp_release(struct ice_pf *pf)
2654 {
2655 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2656 return;
2657
2658
2659 ice_ptp_cfg_timestamp(pf, false);
2660
2661 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2662
2663 clear_bit(ICE_FLAG_PTP, pf->flags);
2664
2665 kthread_cancel_delayed_work_sync(&pf->ptp.work);
2666
2667 ice_ptp_port_phy_stop(&pf->ptp.port);
2668 mutex_destroy(&pf->ptp.port.ps_lock);
2669 if (pf->ptp.kworker) {
2670 kthread_destroy_worker(pf->ptp.kworker);
2671 pf->ptp.kworker = NULL;
2672 }
2673
2674 if (!pf->ptp.clock)
2675 return;
2676
2677
2678 ice_ptp_disable_all_clkout(pf);
2679
2680 ice_clear_ptp_clock_index(pf);
2681 ptp_clock_unregister(pf->ptp.clock);
2682 pf->ptp.clock = NULL;
2683
2684 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
2685 }