0001
0002
0003
0004
0005
0006
0007 #include <linux/delay.h>
0008 #include <linux/ethtool.h>
0009 #include <linux/ethtool_netlink.h>
0010 #include <linux/kernel.h>
0011 #include <linux/mii.h>
0012 #include <linux/module.h>
0013 #include <linux/phy.h>
0014 #include <linux/processor.h>
0015 #include <linux/property.h>
0016 #include <linux/ptp_classify.h>
0017 #include <linux/ptp_clock_kernel.h>
0018 #include <linux/net_tstamp.h>
0019
0020 #define PHY_ID_TJA_1103 0x001BB010
0021
0022 #define PMAPMD_B100T1_PMAPMD_CTL 0x0834
0023 #define B100T1_PMAPMD_CONFIG_EN BIT(15)
0024 #define B100T1_PMAPMD_MASTER BIT(14)
0025 #define MASTER_MODE (B100T1_PMAPMD_CONFIG_EN | \
0026 B100T1_PMAPMD_MASTER)
0027 #define SLAVE_MODE (B100T1_PMAPMD_CONFIG_EN)
0028
0029 #define VEND1_DEVICE_CONTROL 0x0040
0030 #define DEVICE_CONTROL_RESET BIT(15)
0031 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
0032 #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
0033
0034 #define VEND1_PHY_IRQ_ACK 0x80A0
0035 #define VEND1_PHY_IRQ_EN 0x80A1
0036 #define VEND1_PHY_IRQ_STATUS 0x80A2
0037 #define PHY_IRQ_LINK_EVENT BIT(1)
0038
0039 #define VEND1_PHY_CONTROL 0x8100
0040 #define PHY_CONFIG_EN BIT(14)
0041 #define PHY_START_OP BIT(0)
0042
0043 #define VEND1_PHY_CONFIG 0x8108
0044 #define PHY_CONFIG_AUTO BIT(0)
0045
0046 #define VEND1_SIGNAL_QUALITY 0x8320
0047 #define SQI_VALID BIT(14)
0048 #define SQI_MASK GENMASK(2, 0)
0049 #define MAX_SQI SQI_MASK
0050
0051 #define VEND1_CABLE_TEST 0x8330
0052 #define CABLE_TEST_ENABLE BIT(15)
0053 #define CABLE_TEST_START BIT(14)
0054 #define CABLE_TEST_VALID BIT(13)
0055 #define CABLE_TEST_OK 0x00
0056 #define CABLE_TEST_SHORTED 0x01
0057 #define CABLE_TEST_OPEN 0x02
0058 #define CABLE_TEST_UNKNOWN 0x07
0059
0060 #define VEND1_PORT_CONTROL 0x8040
0061 #define PORT_CONTROL_EN BIT(14)
0062
0063 #define VEND1_PORT_ABILITIES 0x8046
0064 #define PTP_ABILITY BIT(3)
0065
0066 #define VEND1_PORT_INFRA_CONTROL 0xAC00
0067 #define PORT_INFRA_CONTROL_EN BIT(14)
0068
0069 #define VEND1_RXID 0xAFCC
0070 #define VEND1_TXID 0xAFCD
0071 #define ID_ENABLE BIT(15)
0072
0073 #define VEND1_ABILITIES 0xAFC4
0074 #define RGMII_ID_ABILITY BIT(15)
0075 #define RGMII_ABILITY BIT(14)
0076 #define RMII_ABILITY BIT(10)
0077 #define REVMII_ABILITY BIT(9)
0078 #define MII_ABILITY BIT(8)
0079 #define SGMII_ABILITY BIT(0)
0080
0081 #define VEND1_MII_BASIC_CONFIG 0xAFC6
0082 #define MII_BASIC_CONFIG_REV BIT(8)
0083 #define MII_BASIC_CONFIG_SGMII 0x9
0084 #define MII_BASIC_CONFIG_RGMII 0x7
0085 #define MII_BASIC_CONFIG_RMII 0x5
0086 #define MII_BASIC_CONFIG_MII 0x4
0087
0088 #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
0089 #define VEND1_LINK_DROP_COUNTER 0x8352
0090 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
0091 #define VEND1_R_GOOD_FRAME_CNT 0xA950
0092 #define VEND1_R_BAD_FRAME_CNT 0xA952
0093 #define VEND1_R_RXER_FRAME_CNT 0xA954
0094 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
0095 #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
0096 #define VEND1_RX_IPG_LENGTH 0xAFD0
0097 #define VEND1_TX_IPG_LENGTH 0xAFD1
0098 #define COUNTER_EN BIT(15)
0099
0100 #define VEND1_PTP_CONFIG 0x1102
0101 #define EXT_TRG_EDGE BIT(1)
0102 #define PPS_OUT_POL BIT(2)
0103 #define PPS_OUT_EN BIT(3)
0104
0105 #define VEND1_LTC_LOAD_CTRL 0x1105
0106 #define READ_LTC BIT(2)
0107 #define LOAD_LTC BIT(0)
0108
0109 #define VEND1_LTC_WR_NSEC_0 0x1106
0110 #define VEND1_LTC_WR_NSEC_1 0x1107
0111 #define VEND1_LTC_WR_SEC_0 0x1108
0112 #define VEND1_LTC_WR_SEC_1 0x1109
0113
0114 #define VEND1_LTC_RD_NSEC_0 0x110A
0115 #define VEND1_LTC_RD_NSEC_1 0x110B
0116 #define VEND1_LTC_RD_SEC_0 0x110C
0117 #define VEND1_LTC_RD_SEC_1 0x110D
0118
0119 #define VEND1_RATE_ADJ_SUBNS_0 0x110F
0120 #define VEND1_RATE_ADJ_SUBNS_1 0x1110
0121 #define CLK_RATE_ADJ_LD BIT(15)
0122 #define CLK_RATE_ADJ_DIR BIT(14)
0123
0124 #define VEND1_HW_LTC_LOCK_CTRL 0x1115
0125 #define HW_LTC_LOCK_EN BIT(0)
0126
0127 #define VEND1_PTP_IRQ_EN 0x1131
0128 #define VEND1_PTP_IRQ_STATUS 0x1132
0129 #define PTP_IRQ_EGR_TS BIT(0)
0130
0131 #define VEND1_RX_TS_INSRT_CTRL 0x114D
0132 #define RX_TS_INSRT_MODE2 0x02
0133
0134 #define VEND1_EGR_RING_DATA_0 0x114E
0135 #define VEND1_EGR_RING_DATA_1_SEQ_ID 0x114F
0136 #define VEND1_EGR_RING_DATA_2_NSEC_15_0 0x1150
0137 #define VEND1_EGR_RING_DATA_3 0x1151
0138 #define VEND1_EGR_RING_CTRL 0x1154
0139
0140 #define VEND1_EXT_TRG_TS_DATA_0 0x1121
0141 #define VEND1_EXT_TRG_TS_DATA_1 0x1122
0142 #define VEND1_EXT_TRG_TS_DATA_2 0x1123
0143 #define VEND1_EXT_TRG_TS_DATA_3 0x1124
0144 #define VEND1_EXT_TRG_TS_DATA_4 0x1125
0145 #define VEND1_EXT_TRG_TS_CTRL 0x1126
0146
0147 #define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0)
0148 #define RING_DATA_0_MSG_TYPE GENMASK(11, 8)
0149 #define RING_DATA_0_SEC_4_2 GENMASK(14, 2)
0150 #define RING_DATA_0_TS_VALID BIT(15)
0151
0152 #define RING_DATA_3_NSEC_29_16 GENMASK(13, 0)
0153 #define RING_DATA_3_SEC_1_0 GENMASK(15, 14)
0154 #define RING_DATA_5_SEC_16_5 GENMASK(15, 4)
0155 #define RING_DONE BIT(0)
0156
0157 #define TS_SEC_MASK GENMASK(1, 0)
0158
0159 #define VEND1_PORT_FUNC_ENABLES 0x8048
0160 #define PTP_ENABLE BIT(3)
0161
0162 #define VEND1_PORT_PTP_CONTROL 0x9000
0163 #define PORT_PTP_CONTROL_BYPASS BIT(11)
0164
0165 #define VEND1_PTP_CLK_PERIOD 0x1104
0166 #define PTP_CLK_PERIOD_100BT1 15ULL
0167
0168 #define VEND1_EVENT_MSG_FILT 0x1148
0169 #define EVENT_MSG_FILT_ALL 0x0F
0170 #define EVENT_MSG_FILT_NONE 0x00
0171
0172 #define VEND1_TX_PIPE_DLY_NS 0x1149
0173 #define VEND1_TX_PIPEDLY_SUBNS 0x114A
0174 #define VEND1_RX_PIPE_DLY_NS 0x114B
0175 #define VEND1_RX_PIPEDLY_SUBNS 0x114C
0176
0177 #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
0178 #define GPIO_FUNC_EN BIT(15)
0179 #define GPIO_FUNC_PTP BIT(6)
0180 #define GPIO_SIGNAL_PTP_TRIGGER 0x01
0181 #define GPIO_SIGNAL_PPS_OUT 0x12
0182 #define GPIO_DISABLE 0
0183 #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
0184 GPIO_SIGNAL_PPS_OUT)
0185 #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
0186 GPIO_SIGNAL_PTP_TRIGGER)
0187
0188 #define RGMII_PERIOD_PS 8000U
0189 #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
0190 #define MIN_ID_PS 1644U
0191 #define MAX_ID_PS 2260U
0192 #define DEFAULT_ID_PS 2000U
0193
0194 #define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
0195 PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
0196
0197 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
0198
0199 struct nxp_c45_skb_cb {
0200 struct ptp_header *header;
0201 unsigned int type;
0202 };
0203
0204 struct nxp_c45_hwts {
0205 u32 nsec;
0206 u32 sec;
0207 u8 domain_number;
0208 u16 sequence_id;
0209 u8 msg_type;
0210 };
0211
0212 struct nxp_c45_phy {
0213 struct phy_device *phydev;
0214 struct mii_timestamper mii_ts;
0215 struct ptp_clock *ptp_clock;
0216 struct ptp_clock_info caps;
0217 struct sk_buff_head tx_queue;
0218 struct sk_buff_head rx_queue;
0219
0220 struct mutex ptp_lock;
0221 int hwts_tx;
0222 int hwts_rx;
0223 u32 tx_delay;
0224 u32 rx_delay;
0225 struct timespec64 extts_ts;
0226 int extts_index;
0227 bool extts;
0228 };
0229
0230 struct nxp_c45_phy_stats {
0231 const char *name;
0232 u8 mmd;
0233 u16 reg;
0234 u8 off;
0235 u16 mask;
0236 };
0237
0238 static bool nxp_c45_poll_txts(struct phy_device *phydev)
0239 {
0240 return phydev->irq <= 0;
0241 }
0242
0243 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
0244 struct timespec64 *ts,
0245 struct ptp_system_timestamp *sts)
0246 {
0247 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0248
0249 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
0250 READ_LTC);
0251 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0252 VEND1_LTC_RD_NSEC_0);
0253 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0254 VEND1_LTC_RD_NSEC_1) << 16;
0255 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0256 VEND1_LTC_RD_SEC_0);
0257 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0258 VEND1_LTC_RD_SEC_1) << 16;
0259
0260 return 0;
0261 }
0262
0263 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
0264 struct timespec64 *ts,
0265 struct ptp_system_timestamp *sts)
0266 {
0267 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0268
0269 mutex_lock(&priv->ptp_lock);
0270 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
0271 mutex_unlock(&priv->ptp_lock);
0272
0273 return 0;
0274 }
0275
0276 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
0277 const struct timespec64 *ts)
0278 {
0279 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0280
0281 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
0282 ts->tv_nsec);
0283 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
0284 ts->tv_nsec >> 16);
0285 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
0286 ts->tv_sec);
0287 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
0288 ts->tv_sec >> 16);
0289 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
0290 LOAD_LTC);
0291
0292 return 0;
0293 }
0294
0295 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
0296 const struct timespec64 *ts)
0297 {
0298 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0299
0300 mutex_lock(&priv->ptp_lock);
0301 _nxp_c45_ptp_settime64(ptp, ts);
0302 mutex_unlock(&priv->ptp_lock);
0303
0304 return 0;
0305 }
0306
0307 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
0308 {
0309 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0310 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
0311 u64 subns_inc_val;
0312 bool inc;
0313
0314 mutex_lock(&priv->ptp_lock);
0315 inc = ppb >= 0;
0316 ppb = abs(ppb);
0317
0318 subns_inc_val = PPM_TO_SUBNS_INC(ppb);
0319
0320 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
0321 subns_inc_val);
0322 subns_inc_val >>= 16;
0323 subns_inc_val |= CLK_RATE_ADJ_LD;
0324 if (inc)
0325 subns_inc_val |= CLK_RATE_ADJ_DIR;
0326
0327 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
0328 subns_inc_val);
0329 mutex_unlock(&priv->ptp_lock);
0330
0331 return 0;
0332 }
0333
0334 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
0335 {
0336 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0337 struct timespec64 now, then;
0338
0339 mutex_lock(&priv->ptp_lock);
0340 then = ns_to_timespec64(delta);
0341 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
0342 now = timespec64_add(now, then);
0343 _nxp_c45_ptp_settime64(ptp, &now);
0344 mutex_unlock(&priv->ptp_lock);
0345
0346 return 0;
0347 }
0348
0349 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
0350 struct nxp_c45_hwts *hwts)
0351 {
0352 ts->tv_nsec = hwts->nsec;
0353 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
0354 ts->tv_sec -= TS_SEC_MASK + 1;
0355 ts->tv_sec &= ~TS_SEC_MASK;
0356 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
0357 }
0358
0359 static bool nxp_c45_match_ts(struct ptp_header *header,
0360 struct nxp_c45_hwts *hwts,
0361 unsigned int type)
0362 {
0363 return ntohs(header->sequence_id) == hwts->sequence_id &&
0364 ptp_get_msgtype(header, type) == hwts->msg_type &&
0365 header->domain_number == hwts->domain_number;
0366 }
0367
0368 static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
0369 struct timespec64 *extts)
0370 {
0371 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0372 VEND1_EXT_TRG_TS_DATA_0);
0373 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0374 VEND1_EXT_TRG_TS_DATA_1) << 16;
0375 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0376 VEND1_EXT_TRG_TS_DATA_2);
0377 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0378 VEND1_EXT_TRG_TS_DATA_3) << 16;
0379 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL,
0380 RING_DONE);
0381 }
0382
0383 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
0384 struct nxp_c45_hwts *hwts)
0385 {
0386 bool valid;
0387 u16 reg;
0388
0389 mutex_lock(&priv->ptp_lock);
0390 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
0391 RING_DONE);
0392 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
0393 valid = !!(reg & RING_DATA_0_TS_VALID);
0394 if (!valid)
0395 goto nxp_c45_get_hwtxts_out;
0396
0397 hwts->domain_number = reg;
0398 hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
0399 hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
0400 hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0401 VEND1_EGR_RING_DATA_1_SEQ_ID);
0402 hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
0403 VEND1_EGR_RING_DATA_2_NSEC_15_0);
0404 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
0405 hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
0406 hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
0407
0408 nxp_c45_get_hwtxts_out:
0409 mutex_unlock(&priv->ptp_lock);
0410 return valid;
0411 }
0412
0413 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
0414 struct nxp_c45_hwts *txts)
0415 {
0416 struct sk_buff *skb, *tmp, *skb_match = NULL;
0417 struct skb_shared_hwtstamps shhwtstamps;
0418 struct timespec64 ts;
0419 unsigned long flags;
0420 bool ts_match;
0421 s64 ts_ns;
0422
0423 spin_lock_irqsave(&priv->tx_queue.lock, flags);
0424 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
0425 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
0426 NXP_C45_SKB_CB(skb)->type);
0427 if (!ts_match)
0428 continue;
0429 skb_match = skb;
0430 __skb_unlink(skb, &priv->tx_queue);
0431 break;
0432 }
0433 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
0434
0435 if (skb_match) {
0436 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
0437 nxp_c45_reconstruct_ts(&ts, txts);
0438 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
0439 ts_ns = timespec64_to_ns(&ts);
0440 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
0441 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
0442 } else {
0443 phydev_warn(priv->phydev,
0444 "the tx timestamp doesn't match with any skb\n");
0445 }
0446 }
0447
0448 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
0449 {
0450 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0451 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
0452 struct skb_shared_hwtstamps *shhwtstamps_rx;
0453 struct ptp_clock_event event;
0454 struct nxp_c45_hwts hwts;
0455 bool reschedule = false;
0456 struct timespec64 ts;
0457 struct sk_buff *skb;
0458 bool txts_valid;
0459 u32 ts_raw;
0460
0461 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
0462 txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
0463 if (unlikely(!txts_valid)) {
0464
0465 reschedule = true;
0466 break;
0467 }
0468
0469 nxp_c45_process_txts(priv, &hwts);
0470 }
0471
0472 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
0473 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
0474 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
0475 hwts.sec = ts_raw >> 30;
0476 hwts.nsec = ts_raw & GENMASK(29, 0);
0477 nxp_c45_reconstruct_ts(&ts, &hwts);
0478 shhwtstamps_rx = skb_hwtstamps(skb);
0479 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
0480 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
0481 netif_rx(skb);
0482 }
0483
0484 if (priv->extts) {
0485 nxp_c45_get_extts(priv, &ts);
0486 if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
0487 priv->extts_ts = ts;
0488 event.index = priv->extts_index;
0489 event.type = PTP_CLOCK_EXTTS;
0490 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
0491 ptp_clock_event(priv->ptp_clock, &event);
0492 }
0493 reschedule = true;
0494 }
0495
0496 return reschedule ? 1 : -1;
0497 }
0498
0499 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
0500 int pin, u16 pin_cfg)
0501 {
0502 struct phy_device *phydev = priv->phydev;
0503
0504 phy_write_mmd(phydev, MDIO_MMD_VEND1,
0505 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
0506 }
0507
0508 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
0509 struct ptp_perout_request *perout, int on)
0510 {
0511 struct phy_device *phydev = priv->phydev;
0512 int pin;
0513
0514 if (perout->flags & ~PTP_PEROUT_PHASE)
0515 return -EOPNOTSUPP;
0516
0517 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
0518 if (pin < 0)
0519 return pin;
0520
0521 if (!on) {
0522 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
0523 PPS_OUT_EN);
0524 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
0525 PPS_OUT_POL);
0526
0527 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
0528
0529 return 0;
0530 }
0531
0532
0533
0534
0535
0536 if (perout->period.sec != 1 || perout->period.nsec != 0) {
0537 phydev_warn(phydev, "The period can be set only to 1 second.");
0538 return -EINVAL;
0539 }
0540
0541 if (!(perout->flags & PTP_PEROUT_PHASE)) {
0542 if (perout->start.sec != 0 || perout->start.nsec != 0) {
0543 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
0544 return -EINVAL;
0545 }
0546 } else {
0547 if (perout->phase.nsec != 0 &&
0548 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
0549 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
0550 return -EINVAL;
0551 }
0552
0553 if (perout->phase.nsec == 0)
0554 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
0555 VEND1_PTP_CONFIG, PPS_OUT_POL);
0556 else
0557 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
0558 VEND1_PTP_CONFIG, PPS_OUT_POL);
0559 }
0560
0561 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
0562
0563 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN);
0564
0565 return 0;
0566 }
0567
0568 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
0569 struct ptp_extts_request *extts, int on)
0570 {
0571 int pin;
0572
0573 if (extts->flags & ~(PTP_ENABLE_FEATURE |
0574 PTP_RISING_EDGE |
0575 PTP_FALLING_EDGE |
0576 PTP_STRICT_FLAGS))
0577 return -EOPNOTSUPP;
0578
0579
0580 if ((extts->flags & PTP_RISING_EDGE) &&
0581 (extts->flags & PTP_FALLING_EDGE))
0582 return -EOPNOTSUPP;
0583
0584 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
0585 if (pin < 0)
0586 return pin;
0587
0588 if (!on) {
0589 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
0590 priv->extts = false;
0591
0592 return 0;
0593 }
0594
0595 if (extts->flags & PTP_RISING_EDGE)
0596 phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
0597 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
0598
0599 if (extts->flags & PTP_FALLING_EDGE)
0600 phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
0601 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
0602
0603 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
0604 priv->extts = true;
0605 priv->extts_index = extts->index;
0606 ptp_schedule_worker(priv->ptp_clock, 0);
0607
0608 return 0;
0609 }
0610
0611 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
0612 struct ptp_clock_request *req, int on)
0613 {
0614 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
0615
0616 switch (req->type) {
0617 case PTP_CLK_REQ_EXTTS:
0618 return nxp_c45_extts_enable(priv, &req->extts, on);
0619 case PTP_CLK_REQ_PEROUT:
0620 return nxp_c45_perout_enable(priv, &req->perout, on);
0621 default:
0622 return -EOPNOTSUPP;
0623 }
0624 }
0625
0626 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
0627 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
0628 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
0629 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
0630 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
0631 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
0632 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
0633 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
0634 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
0635 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
0636 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
0637 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
0638 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
0639 };
0640
0641 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
0642 enum ptp_pin_function func, unsigned int chan)
0643 {
0644 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
0645 return -EINVAL;
0646
0647 switch (func) {
0648 case PTP_PF_NONE:
0649 case PTP_PF_PEROUT:
0650 case PTP_PF_EXTTS:
0651 break;
0652 default:
0653 return -EOPNOTSUPP;
0654 }
0655
0656 return 0;
0657 }
0658
0659 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
0660 {
0661 priv->caps = (struct ptp_clock_info) {
0662 .owner = THIS_MODULE,
0663 .name = "NXP C45 PHC",
0664 .max_adj = 16666666,
0665 .adjfine = nxp_c45_ptp_adjfine,
0666 .adjtime = nxp_c45_ptp_adjtime,
0667 .gettimex64 = nxp_c45_ptp_gettimex64,
0668 .settime64 = nxp_c45_ptp_settime64,
0669 .enable = nxp_c45_ptp_enable,
0670 .verify = nxp_c45_ptp_verify_pin,
0671 .do_aux_work = nxp_c45_do_aux_work,
0672 .pin_config = nxp_c45_ptp_pins,
0673 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
0674 .n_ext_ts = 1,
0675 .n_per_out = 1,
0676 };
0677
0678 priv->ptp_clock = ptp_clock_register(&priv->caps,
0679 &priv->phydev->mdio.dev);
0680
0681 if (IS_ERR(priv->ptp_clock))
0682 return PTR_ERR(priv->ptp_clock);
0683
0684 if (!priv->ptp_clock)
0685 return -ENOMEM;
0686
0687 return 0;
0688 }
0689
0690 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
0691 struct sk_buff *skb, int type)
0692 {
0693 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
0694 mii_ts);
0695
0696 switch (priv->hwts_tx) {
0697 case HWTSTAMP_TX_ON:
0698 NXP_C45_SKB_CB(skb)->type = type;
0699 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
0700 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
0701 skb_queue_tail(&priv->tx_queue, skb);
0702 if (nxp_c45_poll_txts(priv->phydev))
0703 ptp_schedule_worker(priv->ptp_clock, 0);
0704 break;
0705 case HWTSTAMP_TX_OFF:
0706 default:
0707 kfree_skb(skb);
0708 break;
0709 }
0710 }
0711
0712 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
0713 struct sk_buff *skb, int type)
0714 {
0715 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
0716 mii_ts);
0717 struct ptp_header *header = ptp_parse_header(skb, type);
0718
0719 if (!header)
0720 return false;
0721
0722 if (!priv->hwts_rx)
0723 return false;
0724
0725 NXP_C45_SKB_CB(skb)->header = header;
0726 skb_queue_tail(&priv->rx_queue, skb);
0727 ptp_schedule_worker(priv->ptp_clock, 0);
0728
0729 return true;
0730 }
0731
0732 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
0733 struct ifreq *ifreq)
0734 {
0735 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
0736 mii_ts);
0737 struct phy_device *phydev = priv->phydev;
0738 struct hwtstamp_config cfg;
0739
0740 if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
0741 return -EFAULT;
0742
0743 if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
0744 return -ERANGE;
0745
0746 priv->hwts_tx = cfg.tx_type;
0747
0748 switch (cfg.rx_filter) {
0749 case HWTSTAMP_FILTER_NONE:
0750 priv->hwts_rx = 0;
0751 break;
0752 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
0753 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
0754 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
0755 priv->hwts_rx = 1;
0756 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
0757 break;
0758 default:
0759 return -ERANGE;
0760 }
0761
0762 if (priv->hwts_rx || priv->hwts_tx) {
0763 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
0764 EVENT_MSG_FILT_ALL);
0765 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
0766 VEND1_PORT_PTP_CONTROL,
0767 PORT_PTP_CONTROL_BYPASS);
0768 } else {
0769 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
0770 EVENT_MSG_FILT_NONE);
0771 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
0772 PORT_PTP_CONTROL_BYPASS);
0773 }
0774
0775 if (nxp_c45_poll_txts(priv->phydev))
0776 goto nxp_c45_no_ptp_irq;
0777
0778 if (priv->hwts_tx)
0779 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
0780 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
0781 else
0782 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
0783 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
0784
0785 nxp_c45_no_ptp_irq:
0786 return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
0787 }
0788
0789 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
0790 struct ethtool_ts_info *ts_info)
0791 {
0792 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
0793 mii_ts);
0794
0795 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
0796 SOF_TIMESTAMPING_RX_HARDWARE |
0797 SOF_TIMESTAMPING_RAW_HARDWARE;
0798 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
0799 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
0800 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
0801 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
0802 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
0803 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
0804
0805 return 0;
0806 }
0807
0808 static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
0809 { "phy_symbol_error_cnt", MDIO_MMD_VEND1,
0810 VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
0811 { "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
0812 VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
0813 { "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
0814 VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
0815 { "phy_link_loss_cnt", MDIO_MMD_VEND1,
0816 VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
0817 { "phy_link_failure_cnt", MDIO_MMD_VEND1,
0818 VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
0819 { "r_good_frame_cnt", MDIO_MMD_VEND1,
0820 VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
0821 { "r_bad_frame_cnt", MDIO_MMD_VEND1,
0822 VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
0823 { "r_rxer_frame_cnt", MDIO_MMD_VEND1,
0824 VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
0825 { "rx_preamble_count", MDIO_MMD_VEND1,
0826 VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
0827 { "tx_preamble_count", MDIO_MMD_VEND1,
0828 VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
0829 { "rx_ipg_length", MDIO_MMD_VEND1,
0830 VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
0831 { "tx_ipg_length", MDIO_MMD_VEND1,
0832 VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
0833 };
0834
0835 static int nxp_c45_get_sset_count(struct phy_device *phydev)
0836 {
0837 return ARRAY_SIZE(nxp_c45_hw_stats);
0838 }
0839
0840 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
0841 {
0842 size_t i;
0843
0844 for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
0845 strncpy(data + i * ETH_GSTRING_LEN,
0846 nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
0847 }
0848 }
0849
0850 static void nxp_c45_get_stats(struct phy_device *phydev,
0851 struct ethtool_stats *stats, u64 *data)
0852 {
0853 size_t i;
0854 int ret;
0855
0856 for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
0857 ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
0858 nxp_c45_hw_stats[i].reg);
0859 if (ret < 0) {
0860 data[i] = U64_MAX;
0861 } else {
0862 data[i] = ret & nxp_c45_hw_stats[i].mask;
0863 data[i] >>= nxp_c45_hw_stats[i].off;
0864 }
0865 }
0866 }
0867
0868 static int nxp_c45_config_enable(struct phy_device *phydev)
0869 {
0870 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
0871 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
0872 DEVICE_CONTROL_CONFIG_ALL_EN);
0873 usleep_range(400, 450);
0874
0875 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
0876 PORT_CONTROL_EN);
0877 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
0878 PHY_CONFIG_EN);
0879 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
0880 PORT_INFRA_CONTROL_EN);
0881
0882 return 0;
0883 }
0884
0885 static int nxp_c45_start_op(struct phy_device *phydev)
0886 {
0887 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
0888 PHY_START_OP);
0889 }
0890
0891 static int nxp_c45_config_intr(struct phy_device *phydev)
0892 {
0893 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
0894 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
0895 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
0896 else
0897 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
0898 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
0899 }
0900
0901 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
0902 {
0903 struct nxp_c45_phy *priv = phydev->priv;
0904 irqreturn_t ret = IRQ_NONE;
0905 struct nxp_c45_hwts hwts;
0906 int irq;
0907
0908 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
0909 if (irq & PHY_IRQ_LINK_EVENT) {
0910 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
0911 PHY_IRQ_LINK_EVENT);
0912 phy_trigger_machine(phydev);
0913 ret = IRQ_HANDLED;
0914 }
0915
0916
0917
0918
0919
0920 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
0921 if (irq & PTP_IRQ_EGR_TS) {
0922 while (nxp_c45_get_hwtxts(priv, &hwts))
0923 nxp_c45_process_txts(priv, &hwts);
0924
0925 ret = IRQ_HANDLED;
0926 }
0927
0928 return ret;
0929 }
0930
0931 static int nxp_c45_soft_reset(struct phy_device *phydev)
0932 {
0933 int ret;
0934
0935 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
0936 DEVICE_CONTROL_RESET);
0937 if (ret)
0938 return ret;
0939
0940 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
0941 VEND1_DEVICE_CONTROL, ret,
0942 !(ret & DEVICE_CONTROL_RESET), 20000,
0943 240000, false);
0944 }
0945
0946 static int nxp_c45_cable_test_start(struct phy_device *phydev)
0947 {
0948 return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
0949 CABLE_TEST_ENABLE | CABLE_TEST_START);
0950 }
0951
0952 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
0953 bool *finished)
0954 {
0955 int ret;
0956 u8 cable_test_result;
0957
0958 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
0959 if (!(ret & CABLE_TEST_VALID)) {
0960 *finished = false;
0961 return 0;
0962 }
0963
0964 *finished = true;
0965 cable_test_result = ret & GENMASK(2, 0);
0966
0967 switch (cable_test_result) {
0968 case CABLE_TEST_OK:
0969 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
0970 ETHTOOL_A_CABLE_RESULT_CODE_OK);
0971 break;
0972 case CABLE_TEST_SHORTED:
0973 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
0974 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
0975 break;
0976 case CABLE_TEST_OPEN:
0977 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
0978 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
0979 break;
0980 default:
0981 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
0982 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
0983 }
0984
0985 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
0986 CABLE_TEST_ENABLE);
0987
0988 return nxp_c45_start_op(phydev);
0989 }
0990
0991 static int nxp_c45_setup_master_slave(struct phy_device *phydev)
0992 {
0993 switch (phydev->master_slave_set) {
0994 case MASTER_SLAVE_CFG_MASTER_FORCE:
0995 case MASTER_SLAVE_CFG_MASTER_PREFERRED:
0996 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
0997 MASTER_MODE);
0998 break;
0999 case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
1000 case MASTER_SLAVE_CFG_SLAVE_FORCE:
1001 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
1002 SLAVE_MODE);
1003 break;
1004 case MASTER_SLAVE_CFG_UNKNOWN:
1005 case MASTER_SLAVE_CFG_UNSUPPORTED:
1006 return 0;
1007 default:
1008 phydev_warn(phydev, "Unsupported Master/Slave mode\n");
1009 return -EOPNOTSUPP;
1010 }
1011
1012 return 0;
1013 }
1014
1015 static int nxp_c45_read_master_slave(struct phy_device *phydev)
1016 {
1017 int reg;
1018
1019 phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
1020 phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
1021
1022 reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
1023 if (reg < 0)
1024 return reg;
1025
1026 if (reg & B100T1_PMAPMD_MASTER) {
1027 phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
1028 phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
1029 } else {
1030 phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
1031 phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
1032 }
1033
1034 return 0;
1035 }
1036
1037 static int nxp_c45_config_aneg(struct phy_device *phydev)
1038 {
1039 return nxp_c45_setup_master_slave(phydev);
1040 }
1041
1042 static int nxp_c45_read_status(struct phy_device *phydev)
1043 {
1044 int ret;
1045
1046 ret = genphy_c45_read_status(phydev);
1047 if (ret)
1048 return ret;
1049
1050 ret = nxp_c45_read_master_slave(phydev);
1051 if (ret)
1052 return ret;
1053
1054 return 0;
1055 }
1056
1057 static int nxp_c45_get_sqi(struct phy_device *phydev)
1058 {
1059 int reg;
1060
1061 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1062 if (!(reg & SQI_VALID))
1063 return -EINVAL;
1064
1065 reg &= SQI_MASK;
1066
1067 return reg;
1068 }
1069
1070 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1071 {
1072 return MAX_SQI;
1073 }
1074
1075 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1076 {
1077 if (delay < MIN_ID_PS) {
1078 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1079 return -EINVAL;
1080 }
1081
1082 if (delay > MAX_ID_PS) {
1083 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1084 return -EINVAL;
1085 }
1086
1087 return 0;
1088 }
1089
1090 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1091 {
1092
1093
1094
1095
1096 phase_offset_raw *= 10;
1097 phase_offset_raw -= 738;
1098 return div_u64(phase_offset_raw, 9);
1099 }
1100
1101 static void nxp_c45_disable_delays(struct phy_device *phydev)
1102 {
1103 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1104 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1105 }
1106
1107 static void nxp_c45_set_delays(struct phy_device *phydev)
1108 {
1109 struct nxp_c45_phy *priv = phydev->priv;
1110 u64 tx_delay = priv->tx_delay;
1111 u64 rx_delay = priv->rx_delay;
1112 u64 degree;
1113
1114 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1115 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1116 degree = div_u64(tx_delay, PS_PER_DEGREE);
1117 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1118 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1119 } else {
1120 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1121 ID_ENABLE);
1122 }
1123
1124 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1125 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1126 degree = div_u64(rx_delay, PS_PER_DEGREE);
1127 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1128 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1129 } else {
1130 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1131 ID_ENABLE);
1132 }
1133 }
1134
1135 static int nxp_c45_get_delays(struct phy_device *phydev)
1136 {
1137 struct nxp_c45_phy *priv = phydev->priv;
1138 int ret;
1139
1140 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1141 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1142 ret = device_property_read_u32(&phydev->mdio.dev,
1143 "tx-internal-delay-ps",
1144 &priv->tx_delay);
1145 if (ret)
1146 priv->tx_delay = DEFAULT_ID_PS;
1147
1148 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1149 if (ret) {
1150 phydev_err(phydev,
1151 "tx-internal-delay-ps invalid value\n");
1152 return ret;
1153 }
1154 }
1155
1156 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1157 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1158 ret = device_property_read_u32(&phydev->mdio.dev,
1159 "rx-internal-delay-ps",
1160 &priv->rx_delay);
1161 if (ret)
1162 priv->rx_delay = DEFAULT_ID_PS;
1163
1164 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1165 if (ret) {
1166 phydev_err(phydev,
1167 "rx-internal-delay-ps invalid value\n");
1168 return ret;
1169 }
1170 }
1171
1172 return 0;
1173 }
1174
1175 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1176 {
1177 int ret;
1178
1179 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1180 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1181
1182 switch (phydev->interface) {
1183 case PHY_INTERFACE_MODE_RGMII:
1184 if (!(ret & RGMII_ABILITY)) {
1185 phydev_err(phydev, "rgmii mode not supported\n");
1186 return -EINVAL;
1187 }
1188 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1189 MII_BASIC_CONFIG_RGMII);
1190 nxp_c45_disable_delays(phydev);
1191 break;
1192 case PHY_INTERFACE_MODE_RGMII_ID:
1193 case PHY_INTERFACE_MODE_RGMII_TXID:
1194 case PHY_INTERFACE_MODE_RGMII_RXID:
1195 if (!(ret & RGMII_ID_ABILITY)) {
1196 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1197 return -EINVAL;
1198 }
1199 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1200 MII_BASIC_CONFIG_RGMII);
1201 ret = nxp_c45_get_delays(phydev);
1202 if (ret)
1203 return ret;
1204
1205 nxp_c45_set_delays(phydev);
1206 break;
1207 case PHY_INTERFACE_MODE_MII:
1208 if (!(ret & MII_ABILITY)) {
1209 phydev_err(phydev, "mii mode not supported\n");
1210 return -EINVAL;
1211 }
1212 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1213 MII_BASIC_CONFIG_MII);
1214 break;
1215 case PHY_INTERFACE_MODE_REVMII:
1216 if (!(ret & REVMII_ABILITY)) {
1217 phydev_err(phydev, "rev-mii mode not supported\n");
1218 return -EINVAL;
1219 }
1220 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1221 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1222 break;
1223 case PHY_INTERFACE_MODE_RMII:
1224 if (!(ret & RMII_ABILITY)) {
1225 phydev_err(phydev, "rmii mode not supported\n");
1226 return -EINVAL;
1227 }
1228 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1229 MII_BASIC_CONFIG_RMII);
1230 break;
1231 case PHY_INTERFACE_MODE_SGMII:
1232 if (!(ret & SGMII_ABILITY)) {
1233 phydev_err(phydev, "sgmii mode not supported\n");
1234 return -EINVAL;
1235 }
1236 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1237 MII_BASIC_CONFIG_SGMII);
1238 break;
1239 case PHY_INTERFACE_MODE_INTERNAL:
1240 break;
1241 default:
1242 return -EINVAL;
1243 }
1244
1245 return 0;
1246 }
1247
1248 static int nxp_c45_config_init(struct phy_device *phydev)
1249 {
1250 int ret;
1251
1252 ret = nxp_c45_config_enable(phydev);
1253 if (ret) {
1254 phydev_err(phydev, "Failed to enable config\n");
1255 return ret;
1256 }
1257
1258
1259
1260
1261 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1262 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1263
1264 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1265 PHY_CONFIG_AUTO);
1266
1267 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1268 COUNTER_EN);
1269 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1270 COUNTER_EN);
1271 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1272 COUNTER_EN);
1273 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1274 COUNTER_EN);
1275 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1276 COUNTER_EN);
1277
1278 ret = nxp_c45_set_phy_mode(phydev);
1279 if (ret)
1280 return ret;
1281
1282 phydev->autoneg = AUTONEG_DISABLE;
1283
1284 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
1285 PTP_CLK_PERIOD_100BT1);
1286 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
1287 HW_LTC_LOCK_EN);
1288 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1289 RX_TS_INSRT_MODE2);
1290 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1291 PTP_ENABLE);
1292
1293 return nxp_c45_start_op(phydev);
1294 }
1295
1296 static int nxp_c45_probe(struct phy_device *phydev)
1297 {
1298 struct nxp_c45_phy *priv;
1299 int ptp_ability;
1300 int ret = 0;
1301
1302 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1303 if (!priv)
1304 return -ENOMEM;
1305
1306 skb_queue_head_init(&priv->tx_queue);
1307 skb_queue_head_init(&priv->rx_queue);
1308
1309 priv->phydev = phydev;
1310
1311 phydev->priv = priv;
1312
1313 mutex_init(&priv->ptp_lock);
1314
1315 ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1316 VEND1_PORT_ABILITIES);
1317 ptp_ability = !!(ptp_ability & PTP_ABILITY);
1318 if (!ptp_ability) {
1319 phydev_dbg(phydev, "the phy does not support PTP");
1320 goto no_ptp_support;
1321 }
1322
1323 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1324 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1325 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1326 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1327 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1328 priv->mii_ts.ts_info = nxp_c45_ts_info;
1329 phydev->mii_ts = &priv->mii_ts;
1330 ret = nxp_c45_init_ptp_clock(priv);
1331 } else {
1332 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1333 }
1334
1335 no_ptp_support:
1336
1337 return ret;
1338 }
1339
1340 static struct phy_driver nxp_c45_driver[] = {
1341 {
1342 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1343 .name = "NXP C45 TJA1103",
1344 .features = PHY_BASIC_T1_FEATURES,
1345 .probe = nxp_c45_probe,
1346 .soft_reset = nxp_c45_soft_reset,
1347 .config_aneg = nxp_c45_config_aneg,
1348 .config_init = nxp_c45_config_init,
1349 .config_intr = nxp_c45_config_intr,
1350 .handle_interrupt = nxp_c45_handle_interrupt,
1351 .read_status = nxp_c45_read_status,
1352 .suspend = genphy_c45_pma_suspend,
1353 .resume = genphy_c45_pma_resume,
1354 .get_sset_count = nxp_c45_get_sset_count,
1355 .get_strings = nxp_c45_get_strings,
1356 .get_stats = nxp_c45_get_stats,
1357 .cable_test_start = nxp_c45_cable_test_start,
1358 .cable_test_get_status = nxp_c45_cable_test_get_status,
1359 .set_loopback = genphy_c45_loopback,
1360 .get_sqi = nxp_c45_get_sqi,
1361 .get_sqi_max = nxp_c45_get_sqi_max,
1362 },
1363 };
1364
1365 module_phy_driver(nxp_c45_driver);
1366
1367 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1368 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1369 { },
1370 };
1371
1372 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1373
1374 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1375 MODULE_DESCRIPTION("NXP C45 PHY driver");
1376 MODULE_LICENSE("GPL v2");