Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* QLogic qed NIC Driver
0003  * Copyright (c) 2015-2017  QLogic Corporation
0004  * Copyright (c) 2019-2020 Marvell International Ltd.
0005  */
0006 
0007 #include <linux/types.h>
0008 #include "qed.h"
0009 #include "qed_dev_api.h"
0010 #include "qed_hw.h"
0011 #include "qed_l2.h"
0012 #include "qed_mcp.h"
0013 #include "qed_ptp.h"
0014 #include "qed_reg_addr.h"
0015 
0016 /* 16 nano second time quantas to wait before making a Drift adjustment */
0017 #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT    0
0018 /* Nano seconds to add/subtract when making a Drift adjustment */
0019 #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT     28
0020 /* Add/subtract the Adjustment_Value when making a Drift adjustment */
0021 #define QED_DRIFT_CNTR_DIRECTION_SHIFT      31
0022 #define QED_TIMESTAMP_MASK          BIT(16)
0023 /* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
0024 #define QED_PTP_UCAST_PARAM_MASK              0x70F
0025 
0026 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
0027 {
0028     switch (MFW_PORT(p_hwfn)) {
0029     case 0:
0030         return QED_RESC_LOCK_PTP_PORT0;
0031     case 1:
0032         return QED_RESC_LOCK_PTP_PORT1;
0033     case 2:
0034         return QED_RESC_LOCK_PTP_PORT2;
0035     case 3:
0036         return QED_RESC_LOCK_PTP_PORT3;
0037     default:
0038         return QED_RESC_LOCK_RESC_INVALID;
0039     }
0040 }
0041 
0042 static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
0043 {
0044     struct qed_resc_lock_params params;
0045     enum qed_resc_lock resource;
0046     int rc;
0047 
0048     resource = qed_ptcdev_to_resc(p_hwfn);
0049     if (resource == QED_RESC_LOCK_RESC_INVALID)
0050         return -EINVAL;
0051 
0052     qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
0053 
0054     rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
0055     if (rc && rc != -EINVAL) {
0056         return rc;
0057     } else if (rc == -EINVAL) {
0058         /* MFW doesn't support resource locking, first PF on the port
0059          * has lock ownership.
0060          */
0061         if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
0062             return 0;
0063 
0064         DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
0065         return -EBUSY;
0066     } else if (!params.b_granted) {
0067         DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
0068         return -EBUSY;
0069     }
0070 
0071     return 0;
0072 }
0073 
0074 static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
0075 {
0076     struct qed_resc_unlock_params params;
0077     enum qed_resc_lock resource;
0078     int rc;
0079 
0080     resource = qed_ptcdev_to_resc(p_hwfn);
0081     if (resource == QED_RESC_LOCK_RESC_INVALID)
0082         return -EINVAL;
0083 
0084     qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
0085 
0086     rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
0087     if (rc == -EINVAL) {
0088         /* MFW doesn't support locking, first PF has lock ownership */
0089         if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
0090             rc = 0;
0091         } else {
0092             DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
0093             return -EINVAL;
0094         }
0095     } else if (rc) {
0096         DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
0097     }
0098 
0099     return rc;
0100 }
0101 
0102 /* Read Rx timestamp */
0103 static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
0104 {
0105     struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
0106     struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
0107     u32 val;
0108 
0109     *timestamp = 0;
0110     val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
0111     if (!(val & QED_TIMESTAMP_MASK)) {
0112         DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
0113         return -EINVAL;
0114     }
0115 
0116     val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
0117     *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
0118     *timestamp <<= 32;
0119     *timestamp |= val;
0120 
0121     /* Reset timestamp register to allow new timestamp */
0122     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
0123            QED_TIMESTAMP_MASK);
0124 
0125     return 0;
0126 }
0127 
0128 /* Read Tx timestamp */
0129 static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
0130 {
0131     struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
0132     struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
0133     u32 val;
0134 
0135     *timestamp = 0;
0136     val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
0137     if (!(val & QED_TIMESTAMP_MASK)) {
0138         DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
0139                "Invalid Tx timestamp, buf_seqid = %08x\n", val);
0140         return -EINVAL;
0141     }
0142 
0143     val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
0144     *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
0145     *timestamp <<= 32;
0146     *timestamp |= val;
0147 
0148     /* Reset timestamp register to allow new timestamp */
0149     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
0150 
0151     return 0;
0152 }
0153 
0154 /* Read Phy Hardware Clock */
0155 static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
0156 {
0157     struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
0158     struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
0159     u32 temp = 0;
0160 
0161     temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
0162     *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
0163     *phc_cycles <<= 32;
0164     *phc_cycles |= temp;
0165 
0166     return 0;
0167 }
0168 
0169 /* Filter PTP protocol packets that need to be timestamped */
0170 static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
0171                   enum qed_ptp_filter_type rx_type,
0172                   enum qed_ptp_hwtstamp_tx_type tx_type)
0173 {
0174     struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
0175     struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
0176     u32 rule_mask, enable_cfg = 0x0;
0177 
0178     switch (rx_type) {
0179     case QED_PTP_FILTER_NONE:
0180         enable_cfg = 0x0;
0181         rule_mask = 0x3FFF;
0182         break;
0183     case QED_PTP_FILTER_ALL:
0184         enable_cfg = 0x7;
0185         rule_mask = 0x3CAA;
0186         break;
0187     case QED_PTP_FILTER_V1_L4_EVENT:
0188         enable_cfg = 0x3;
0189         rule_mask = 0x3FFA;
0190         break;
0191     case QED_PTP_FILTER_V1_L4_GEN:
0192         enable_cfg = 0x3;
0193         rule_mask = 0x3FFE;
0194         break;
0195     case QED_PTP_FILTER_V2_L4_EVENT:
0196         enable_cfg = 0x5;
0197         rule_mask = 0x3FAA;
0198         break;
0199     case QED_PTP_FILTER_V2_L4_GEN:
0200         enable_cfg = 0x5;
0201         rule_mask = 0x3FEE;
0202         break;
0203     case QED_PTP_FILTER_V2_L2_EVENT:
0204         enable_cfg = 0x5;
0205         rule_mask = 0x3CFF;
0206         break;
0207     case QED_PTP_FILTER_V2_L2_GEN:
0208         enable_cfg = 0x5;
0209         rule_mask = 0x3EFF;
0210         break;
0211     case QED_PTP_FILTER_V2_EVENT:
0212         enable_cfg = 0x5;
0213         rule_mask = 0x3CAA;
0214         break;
0215     case QED_PTP_FILTER_V2_GEN:
0216         enable_cfg = 0x5;
0217         rule_mask = 0x3EEE;
0218         break;
0219     default:
0220         DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
0221         return -EINVAL;
0222     }
0223 
0224     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
0225            QED_PTP_UCAST_PARAM_MASK);
0226     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
0227     qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
0228 
0229     if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
0230         qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
0231         qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
0232         qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
0233     } else {
0234         qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
0235         qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
0236                QED_PTP_UCAST_PARAM_MASK);
0237         qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
0238     }
0239 
0240     /* Reset possibly old timestamps */
0241     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
0242            QED_TIMESTAMP_MASK);
0243 
0244     return 0;
0245 }
0246 
0247 /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
0248  * FW/HW accepts the adjustment value in terms of 3 parameters:
0249  *   Drift period - adjustment happens once in certain number of nano seconds.
0250  *   Drift value - time is adjusted by a certain value, for example by 5 ns.
0251  *   Drift direction - add or subtract the adjustment value.
0252  * The routine translates ppb into the adjustment triplet in an optimal manner.
0253  */
0254 static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
0255 {
0256     s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
0257     struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
0258     struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
0259     u32 drift_ctr_cfg = 0, drift_state;
0260     int drift_dir = 1;
0261 
0262     if (ppb < 0) {
0263         ppb = -ppb;
0264         drift_dir = 0;
0265     }
0266 
0267     if (ppb > 1) {
0268         s64 best_dif = ppb, best_approx_dev = 1;
0269 
0270         /* Adjustment value is up to +/-7ns, find an optimal value in
0271          * this range.
0272          */
0273         for (val = 7; val > 0; val--) {
0274             period = div_s64(val * 1000000000, ppb);
0275             period -= 8;
0276             period >>= 4;
0277             if (period < 1)
0278                 period = 1;
0279             if (period > 0xFFFFFFE)
0280                 period = 0xFFFFFFE;
0281 
0282             /* Check both rounding ends for approximate error */
0283             approx_dev = period * 16 + 8;
0284             dif = ppb * approx_dev - val * 1000000000;
0285             dif2 = dif + 16 * ppb;
0286 
0287             if (dif < 0)
0288                 dif = -dif;
0289             if (dif2 < 0)
0290                 dif2 = -dif2;
0291 
0292             /* Determine which end gives better approximation */
0293             if (dif * (approx_dev + 16) > dif2 * approx_dev) {
0294                 period++;
0295                 approx_dev += 16;
0296                 dif = dif2;
0297             }
0298 
0299             /* Track best approximation found so far */
0300             if (best_dif * approx_dev > dif * best_approx_dev) {
0301                 best_dif = dif;
0302                 best_val = val;
0303                 best_period = period;
0304                 best_approx_dev = approx_dev;
0305             }
0306         }
0307     } else if (ppb == 1) {
0308         /* This is a special case as its the only value which wouldn't
0309          * fit in a s64 variable. In order to prevent castings simple
0310          * handle it seperately.
0311          */
0312         best_val = 4;
0313         best_period = 0xee6b27f;
0314     } else {
0315         best_val = 0;
0316         best_period = 0xFFFFFFF;
0317     }
0318 
0319     drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
0320             (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
0321             (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
0322 
0323     qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
0324 
0325     drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
0326     if (drift_state & 1) {
0327         qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
0328                drift_ctr_cfg);
0329     } else {
0330         DP_INFO(p_hwfn, "Drift counter is not reset\n");
0331         return -EINVAL;
0332     }
0333 
0334     qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
0335 
0336     return 0;
0337 }
0338 
0339 static int qed_ptp_hw_enable(struct qed_dev *cdev)
0340 {
0341     struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
0342     struct qed_ptt *p_ptt;
0343     int rc;
0344 
0345     p_ptt = qed_ptt_acquire(p_hwfn);
0346     if (!p_ptt) {
0347         DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
0348         return -EBUSY;
0349     }
0350 
0351     p_hwfn->p_ptp_ptt = p_ptt;
0352 
0353     rc = qed_ptp_res_lock(p_hwfn, p_ptt);
0354     if (rc) {
0355         DP_INFO(p_hwfn,
0356             "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
0357         qed_ptt_release(p_hwfn, p_ptt);
0358         p_hwfn->p_ptp_ptt = NULL;
0359         return rc;
0360     }
0361 
0362     /* Reset PTP event detection rules - will be configured in the IOCTL */
0363     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
0364     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
0365     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
0366     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
0367 
0368     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
0369     qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
0370 
0371     qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
0372 
0373     /* Pause free running counter */
0374     if (QED_IS_BB_B0(p_hwfn->cdev))
0375         qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
0376     if (QED_IS_AH(p_hwfn->cdev))
0377         qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
0378 
0379     qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
0380     qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
0381     /* Resume free running counter */
0382     if (QED_IS_BB_B0(p_hwfn->cdev))
0383         qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
0384     if (QED_IS_AH(p_hwfn->cdev)) {
0385         qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
0386         qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
0387     }
0388 
0389     /* Disable drift register */
0390     qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
0391     qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
0392 
0393     /* Reset possibly old timestamps */
0394     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
0395            QED_TIMESTAMP_MASK);
0396     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
0397 
0398     return 0;
0399 }
0400 
0401 static int qed_ptp_hw_disable(struct qed_dev *cdev)
0402 {
0403     struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
0404     struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
0405 
0406     qed_ptp_res_unlock(p_hwfn, p_ptt);
0407 
0408     /* Reset PTP event detection rules */
0409     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
0410     qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
0411 
0412     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
0413     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
0414 
0415     /* Disable the PTP feature */
0416     qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
0417     qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
0418 
0419     qed_ptt_release(p_hwfn, p_ptt);
0420     p_hwfn->p_ptp_ptt = NULL;
0421 
0422     return 0;
0423 }
0424 
0425 const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
0426     .cfg_filters = qed_ptp_hw_cfg_filters,
0427     .read_rx_ts = qed_ptp_hw_read_rx_ts,
0428     .read_tx_ts = qed_ptp_hw_read_tx_ts,
0429     .read_cc = qed_ptp_hw_read_cc,
0430     .adjfreq = qed_ptp_hw_adjfreq,
0431     .disable = qed_ptp_hw_disable,
0432     .enable = qed_ptp_hw_enable,
0433 };