0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/gpio/consumer.h>
0011 #include <linux/ip.h>
0012 #include <linux/net_tstamp.h>
0013 #include <linux/mii.h>
0014 #include <linux/phy.h>
0015 #include <linux/ptp_classify.h>
0016 #include <linux/ptp_clock_kernel.h>
0017 #include <linux/udp.h>
0018 #include <asm/unaligned.h>
0019
0020 #include "mscc.h"
0021 #include "mscc_ptp.h"
0022
0023
0024
0025
0026
0027 static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
0028 {
0029 struct vsc8531_private *priv = phydev->priv;
0030
0031 WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
0032 return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
0033 val);
0034 }
0035
0036
0037 static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
0038 {
0039 struct vsc8531_private *priv = phydev->priv;
0040
0041 WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
0042 return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
0043 }
0044
0045 enum ts_blk_hw {
0046 INGRESS_ENGINE_0,
0047 EGRESS_ENGINE_0,
0048 INGRESS_ENGINE_1,
0049 EGRESS_ENGINE_1,
0050 INGRESS_ENGINE_2,
0051 EGRESS_ENGINE_2,
0052 PROCESSOR_0,
0053 PROCESSOR_1,
0054 };
0055
0056 enum ts_blk {
0057 INGRESS,
0058 EGRESS,
0059 PROCESSOR,
0060 };
0061
0062 static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
0063 u16 addr)
0064 {
0065 struct vsc8531_private *priv = phydev->priv;
0066 bool base_port = phydev->mdio.addr == priv->ts_base_addr;
0067 u32 val, cnt = 0;
0068 enum ts_blk_hw blk_hw;
0069
0070 switch (blk) {
0071 case INGRESS:
0072 blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
0073 break;
0074 case EGRESS:
0075 blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
0076 break;
0077 case PROCESSOR:
0078 default:
0079 blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
0080 break;
0081 }
0082
0083 phy_lock_mdio_bus(phydev);
0084
0085 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
0086
0087 phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
0088 BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
0089 BIU_CSR_ADDR(addr));
0090
0091 do {
0092 val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
0093 } while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
0094
0095 val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
0096 val <<= 16;
0097 val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
0098
0099 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
0100
0101 phy_unlock_mdio_bus(phydev);
0102
0103 return val;
0104 }
0105
0106 static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
0107 u16 addr, u32 val)
0108 {
0109 struct vsc8531_private *priv = phydev->priv;
0110 bool base_port = phydev->mdio.addr == priv->ts_base_addr;
0111 u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
0112 bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
0113 addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
0114 addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
0115 addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
0116 addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
0117 blk == PROCESSOR;
0118 enum ts_blk_hw blk_hw;
0119
0120 switch (blk) {
0121 case INGRESS:
0122 blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
0123 break;
0124 case EGRESS:
0125 blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
0126 break;
0127 case PROCESSOR:
0128 default:
0129 blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
0130 break;
0131 }
0132
0133 phy_lock_mdio_bus(phydev);
0134
0135 bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
0136
0137 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
0138
0139 if (!cond || upper)
0140 phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
0141
0142 phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
0143
0144 phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
0145 BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
0146 BIU_CSR_ADDR(addr));
0147
0148 do {
0149 reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
0150 } while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
0151
0152 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
0153
0154 if (cond && upper)
0155 phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
0156
0157 phy_unlock_mdio_bus(phydev);
0158 }
0159
0160
0161 #define PTP_HEADER_TRNSP_MSG 26
0162 #define PTP_HEADER_DOMAIN_NUM 25
0163 #define PTP_HEADER_BYTE_8_31(x) (31 - (x))
0164 #define MAC_ADDRESS_BYTE(x) ((x) + (35 - ETH_ALEN + 1))
0165
0166 static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
0167 {
0168 u8 sig_sel[16] = {};
0169 signed char i, pos = 0;
0170
0171
0172 for (i = 1; i >= 0; i--)
0173 sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
0174
0175
0176 sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
0177
0178
0179 sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
0180
0181
0182 for (i = ETH_ALEN - 1; i >= 0; i--)
0183 sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
0184
0185
0186 for (; pos < ARRAY_SIZE(sig_sel); pos++)
0187 sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
0188
0189 for (i = 0; i <= 2; i++) {
0190 u32 val = 0;
0191
0192 for (pos = i * 5 + 4; pos >= i * 5; pos--)
0193 val = (val << 6) | sig_sel[pos];
0194
0195 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
0196 val);
0197 }
0198
0199 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
0200 sig_sel[15]);
0201
0202 return 0;
0203 }
0204
0205 static const u32 vsc85xx_egr_latency[] = {
0206
0207 1272,
0208 12516,
0209 125444,
0210
0211 1277,
0212 12537,
0213 };
0214
0215 static const u32 vsc85xx_egr_latency_macsec[] = {
0216
0217 3496,
0218 34760,
0219 347844,
0220
0221 3502,
0222 34780,
0223 };
0224
0225 static const u32 vsc85xx_ingr_latency[] = {
0226
0227 208,
0228 304,
0229 2023,
0230
0231 98,
0232 197,
0233 };
0234
0235 static const u32 vsc85xx_ingr_latency_macsec[] = {
0236
0237 2408,
0238 22300,
0239 222009,
0240
0241 2299,
0242 22192,
0243 };
0244
0245 static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
0246 {
0247 u32 val, ingr_latency, egr_latency;
0248 u8 idx;
0249
0250
0251 if (!phydev->link)
0252 return;
0253
0254 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
0255 STALL_EGR_LATENCY(phydev->speed));
0256
0257 switch (phydev->speed) {
0258 case SPEED_100:
0259 idx = 1;
0260 break;
0261 case SPEED_1000:
0262 idx = 0;
0263 break;
0264 default:
0265 idx = 2;
0266 break;
0267 }
0268
0269 ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
0270 vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
0271 egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
0272 vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
0273
0274 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
0275 PTP_INGR_LOCAL_LATENCY(ingr_latency));
0276
0277 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
0278 MSCC_PHY_PTP_INGR_TSP_CTRL);
0279 val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
0280 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
0281 val);
0282
0283 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
0284 PTP_EGR_LOCAL_LATENCY(egr_latency));
0285
0286 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
0287 val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
0288 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
0289 }
0290
0291 static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
0292 {
0293 u8 i;
0294
0295 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
0296 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
0297 IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
0298 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
0299 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
0300 IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
0301 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
0302 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
0303 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
0304
0305 for (i = 0; i < COMP_MAX_FLOWS; i++) {
0306 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
0307 IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
0308 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
0309 IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
0310 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
0311 ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
0312 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
0313 ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
0314 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
0315 MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
0316
0317 if (i >= PTP_COMP_MAX_FLOWS)
0318 continue;
0319
0320 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
0321 vsc85xx_ts_write_csr(phydev, blk,
0322 MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
0323 vsc85xx_ts_write_csr(phydev, blk,
0324 MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
0325 vsc85xx_ts_write_csr(phydev, blk,
0326 MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
0327 vsc85xx_ts_write_csr(phydev, blk,
0328 MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
0329 vsc85xx_ts_write_csr(phydev, blk,
0330 MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
0331 vsc85xx_ts_write_csr(phydev, blk,
0332 MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
0333 vsc85xx_ts_write_csr(phydev, blk,
0334 MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
0335 vsc85xx_ts_write_csr(phydev, blk,
0336 MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
0337 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
0338 0);
0339 }
0340
0341 return 0;
0342 }
0343
0344 static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
0345 {
0346 u32 val;
0347
0348 val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
0349 val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
0350 val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
0351 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
0352
0353 val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
0354 val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
0355 val |= ANA_FSB_ADDR_FROM_ETH1;
0356 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
0357
0358 return 0;
0359 }
0360
0361 static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
0362 struct iphdr *iphdr,
0363 struct udphdr *udphdr)
0364 {
0365 if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
0366 return NULL;
0367
0368 return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
0369 }
0370
0371 static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
0372 {
0373 struct ethhdr *ethhdr = eth_hdr(skb);
0374 struct udphdr *udphdr;
0375 struct iphdr *iphdr;
0376
0377 if (ethhdr->h_proto == htons(ETH_P_1588))
0378 return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
0379 skb_mac_header_len(skb));
0380
0381 if (ethhdr->h_proto != htons(ETH_P_IP))
0382 return NULL;
0383
0384 iphdr = ip_hdr(skb);
0385 udphdr = udp_hdr(skb);
0386
0387 return get_ptp_header_l4(skb, iphdr, udphdr);
0388 }
0389
0390 static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
0391 enum hwtstamp_rx_filters rx_filter)
0392 {
0393 struct udphdr *udphdr;
0394 struct iphdr *iphdr;
0395
0396 if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
0397 return (struct vsc85xx_ptphdr *)skb->data;
0398
0399 iphdr = (struct iphdr *)skb->data;
0400 udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
0401
0402 return get_ptp_header_l4(skb, iphdr, udphdr);
0403 }
0404
0405 static int get_sig(struct sk_buff *skb, u8 *sig)
0406 {
0407 struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
0408 struct ethhdr *ethhdr = eth_hdr(skb);
0409 unsigned int i;
0410
0411 if (!ptphdr)
0412 return -EOPNOTSUPP;
0413
0414 sig[0] = (__force u16)ptphdr->seq_id >> 8;
0415 sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
0416 sig[2] = ptphdr->domain;
0417 sig[3] = ptphdr->tsmt & GENMASK(3, 0);
0418
0419 memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
0420
0421
0422 for (i = 10; i < 16; i++)
0423 sig[i] = ptphdr->tsmt & GENMASK(3, 0);
0424
0425 return 0;
0426 }
0427
0428 static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
0429 {
0430 struct skb_shared_hwtstamps shhwtstamps;
0431 struct vsc85xx_ts_fifo fifo;
0432 struct sk_buff *skb;
0433 u8 skb_sig[16], *p;
0434 int i, len;
0435 u32 reg;
0436
0437 memset(&fifo, 0, sizeof(fifo));
0438 p = (u8 *)&fifo;
0439
0440 reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
0441 MSCC_PHY_PTP_EGR_TS_FIFO(0));
0442 if (reg & PTP_EGR_TS_FIFO_EMPTY)
0443 return;
0444
0445 *p++ = reg & 0xff;
0446 *p++ = (reg >> 8) & 0xff;
0447
0448
0449 for (i = 1; i < 7; i++) {
0450 reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
0451 MSCC_PHY_PTP_EGR_TS_FIFO(i));
0452 *p++ = reg & 0xff;
0453 *p++ = (reg >> 8) & 0xff;
0454 *p++ = (reg >> 16) & 0xff;
0455 *p++ = (reg >> 24) & 0xff;
0456 }
0457
0458 len = skb_queue_len(&ptp->tx_queue);
0459 if (len < 1)
0460 return;
0461
0462 while (len--) {
0463 skb = __skb_dequeue(&ptp->tx_queue);
0464 if (!skb)
0465 return;
0466
0467
0468
0469
0470 if (get_sig(skb, skb_sig) < 0) {
0471 kfree_skb(skb);
0472 continue;
0473 }
0474
0475
0476 if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
0477 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
0478 shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
0479 skb_complete_tx_timestamp(skb, &shhwtstamps);
0480
0481 return;
0482 }
0483
0484
0485
0486
0487
0488 __skb_queue_tail(&ptp->tx_queue, skb);
0489 }
0490 }
0491
0492 static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
0493 {
0494 u32 reg;
0495
0496 do {
0497 vsc85xx_dequeue_skb(ptp);
0498
0499
0500 reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
0501 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
0502 } while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
0503 }
0504
0505 static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
0506 {
0507 struct vsc8531_private *vsc8531 = phydev->priv;
0508 bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
0509 static const u8 msgs[] = {
0510 PTP_MSGTYPE_SYNC,
0511 PTP_MSGTYPE_DELAY_REQ
0512 };
0513 u32 val;
0514 u8 i;
0515
0516 for (i = 0; i < ARRAY_SIZE(msgs); i++) {
0517 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
0518 base ? PTP_FLOW_VALID_CH0 :
0519 PTP_FLOW_VALID_CH1);
0520
0521 val = vsc85xx_ts_read_csr(phydev, blk,
0522 MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
0523 val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
0524 vsc85xx_ts_write_csr(phydev, blk,
0525 MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
0526
0527 vsc85xx_ts_write_csr(phydev, blk,
0528 MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
0529 msgs[i] << 24);
0530
0531 vsc85xx_ts_write_csr(phydev, blk,
0532 MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
0533 PTP_FLOW_MSG_TYPE_MASK);
0534 }
0535
0536 return 0;
0537 }
0538
0539 static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
0540 {
0541 struct vsc8531_private *vsc8531 = phydev->priv;
0542 bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
0543 u32 val;
0544
0545 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
0546 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
0547 ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
0548
0549 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
0550 base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
0551 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
0552 ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
0553 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
0554 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
0555 vsc85xx_ts_write_csr(phydev, blk,
0556 MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
0557 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
0558 vsc85xx_ts_write_csr(phydev, blk,
0559 MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
0560
0561 val = vsc85xx_ts_read_csr(phydev, blk,
0562 MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
0563 val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
0564 val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
0565 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
0566 val);
0567
0568 return 0;
0569 }
0570
0571 static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
0572 {
0573 struct vsc8531_private *vsc8531 = phydev->priv;
0574 bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
0575 u32 val;
0576
0577 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
0578 PTP_EV_PORT);
0579
0580 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
0581 0xffff);
0582 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
0583 0);
0584 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
0585
0586 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
0587 val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
0588 val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
0589 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
0590
0591
0592 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
0593 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
0594 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
0595 0);
0596 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
0597 0);
0598 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
0599 0);
0600 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
0601 0);
0602 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
0603 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
0604
0605 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
0606
0607 return 0;
0608 }
0609
0610 static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
0611 {
0612 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
0613 struct phy_device *phydev = ptp->phydev;
0614 struct vsc8531_private *priv = phydev->priv;
0615 u64 adj = 0;
0616 u32 val;
0617
0618 if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
0619 return 0;
0620
0621 adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
0622 if (adj > 1000000000L)
0623 adj = 1000000000L;
0624
0625 val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
0626 val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
0627
0628 mutex_lock(&priv->phc_lock);
0629
0630
0631 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
0632 val);
0633
0634
0635 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
0636 val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
0637 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
0638
0639 mutex_unlock(&priv->phc_lock);
0640
0641 return 0;
0642 }
0643
0644 static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
0645 {
0646 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
0647 struct phy_device *phydev = ptp->phydev;
0648 struct vsc85xx_shared_private *shared =
0649 (struct vsc85xx_shared_private *)phydev->shared->priv;
0650 struct vsc8531_private *priv = phydev->priv;
0651 u32 val;
0652
0653 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
0654 val |= PTP_LTC_CTRL_SAVE_ENA;
0655 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
0656
0657
0658
0659
0660 mutex_lock(&shared->gpio_lock);
0661 gpiod_set_value(priv->load_save, 1);
0662
0663 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
0664 MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
0665
0666 ts->tv_sec = ((time64_t)val) << 32;
0667
0668 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
0669 MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
0670 ts->tv_sec += val;
0671
0672 ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
0673 MSCC_PHY_PTP_LTC_SAVED_NS);
0674
0675 gpiod_set_value(priv->load_save, 0);
0676 mutex_unlock(&shared->gpio_lock);
0677
0678 return 0;
0679 }
0680
0681 static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
0682 {
0683 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
0684 struct phy_device *phydev = ptp->phydev;
0685 struct vsc8531_private *priv = phydev->priv;
0686
0687 mutex_lock(&priv->phc_lock);
0688 __vsc85xx_gettime(info, ts);
0689 mutex_unlock(&priv->phc_lock);
0690
0691 return 0;
0692 }
0693
0694 static int __vsc85xx_settime(struct ptp_clock_info *info,
0695 const struct timespec64 *ts)
0696 {
0697 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
0698 struct phy_device *phydev = ptp->phydev;
0699 struct vsc85xx_shared_private *shared =
0700 (struct vsc85xx_shared_private *)phydev->shared->priv;
0701 struct vsc8531_private *priv = phydev->priv;
0702 u32 val;
0703
0704 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
0705 PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
0706 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
0707 PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
0708 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
0709 PTP_LTC_LOAD_NS(ts->tv_nsec));
0710
0711 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
0712 val |= PTP_LTC_CTRL_LOAD_ENA;
0713 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
0714
0715
0716
0717
0718 mutex_lock(&shared->gpio_lock);
0719 gpiod_set_value(priv->load_save, 1);
0720
0721 val &= ~PTP_LTC_CTRL_LOAD_ENA;
0722 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
0723
0724 gpiod_set_value(priv->load_save, 0);
0725 mutex_unlock(&shared->gpio_lock);
0726
0727 return 0;
0728 }
0729
0730 static int vsc85xx_settime(struct ptp_clock_info *info,
0731 const struct timespec64 *ts)
0732 {
0733 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
0734 struct phy_device *phydev = ptp->phydev;
0735 struct vsc8531_private *priv = phydev->priv;
0736
0737 mutex_lock(&priv->phc_lock);
0738 __vsc85xx_settime(info, ts);
0739 mutex_unlock(&priv->phc_lock);
0740
0741 return 0;
0742 }
0743
0744 static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
0745 {
0746 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
0747 struct phy_device *phydev = ptp->phydev;
0748 struct vsc8531_private *priv = phydev->priv;
0749 u32 val;
0750
0751
0752 if (abs(delta) >= NSEC_PER_SEC) {
0753 struct timespec64 ts;
0754 u64 now;
0755
0756 mutex_lock(&priv->phc_lock);
0757
0758 __vsc85xx_gettime(info, &ts);
0759 now = ktime_to_ns(timespec64_to_ktime(ts));
0760 ts = ns_to_timespec64(now + delta);
0761 __vsc85xx_settime(info, &ts);
0762
0763 mutex_unlock(&priv->phc_lock);
0764
0765 return 0;
0766 }
0767
0768 mutex_lock(&priv->phc_lock);
0769
0770 val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
0771 if (delta > 0)
0772 val |= PTP_LTC_OFFSET_ADD;
0773 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
0774
0775 mutex_unlock(&priv->phc_lock);
0776
0777 return 0;
0778 }
0779
0780 static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
0781 u32 next_comp, u32 etype)
0782 {
0783 u32 val;
0784
0785 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
0786 val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
0787 val |= next_comp;
0788 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
0789
0790 val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
0791 ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
0792 vsc85xx_ts_write_csr(phydev, blk,
0793 MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
0794
0795 return 0;
0796 }
0797
0798 static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
0799 u32 next_comp, u32 header)
0800 {
0801 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
0802 ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
0803 next_comp);
0804
0805 return 0;
0806 }
0807
0808 static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
0809 {
0810 u32 val;
0811
0812
0813 val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
0814 vsc85xx_ts_write_csr(phydev, blk,
0815 MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
0816
0817 val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
0818 PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
0819 PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
0820 PTP_NOP : cmd);
0821 if (cmd == PTP_SAVE_IN_TS_FIFO)
0822 val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
0823 else if (cmd == PTP_WRITE_NS)
0824 val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
0825 PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
0826 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
0827 val);
0828
0829 if (cmd == PTP_WRITE_1588)
0830
0831 val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
0832 PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
0833 else if (cmd == PTP_SAVE_IN_TS_FIFO)
0834
0835 val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
0836 PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
0837 else
0838
0839 val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
0840 PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
0841 vsc85xx_ts_write_csr(phydev, blk,
0842 MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
0843
0844 return 0;
0845 }
0846
0847 static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
0848 bool one_step, bool enable)
0849 {
0850 static const u8 msgs[] = {
0851 PTP_MSGTYPE_SYNC,
0852 PTP_MSGTYPE_DELAY_REQ
0853 };
0854 u32 val;
0855 u8 i;
0856
0857 for (i = 0; i < ARRAY_SIZE(msgs); i++) {
0858 if (blk == INGRESS)
0859 vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
0860 PTP_WRITE_NS);
0861 else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step)
0862
0863 vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
0864 PTP_WRITE_1588);
0865 else
0866 vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
0867 PTP_SAVE_IN_TS_FIFO);
0868
0869 val = vsc85xx_ts_read_csr(phydev, blk,
0870 MSCC_ANA_PTP_FLOW_ENA(i));
0871 val &= ~PTP_FLOW_ENA;
0872 if (enable)
0873 val |= PTP_FLOW_ENA;
0874 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
0875 val);
0876 }
0877
0878 return 0;
0879 }
0880
0881 static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
0882 bool enable)
0883 {
0884 struct vsc8531_private *vsc8531 = phydev->priv;
0885 u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
0886
0887 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
0888
0889 u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
0890
0891 val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
0892 get_unaligned_be16(&ptp_multicast[4]);
0893 vsc85xx_ts_write_csr(phydev, blk,
0894 MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
0895 vsc85xx_ts_write_csr(phydev, blk,
0896 MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
0897 get_unaligned_be32(ptp_multicast));
0898 } else {
0899 val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
0900 vsc85xx_ts_write_csr(phydev, blk,
0901 MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
0902 vsc85xx_ts_write_csr(phydev, blk,
0903 MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
0904 }
0905
0906 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
0907 val &= ~ETH1_FLOW_ENA;
0908 if (enable)
0909 val |= ETH1_FLOW_ENA;
0910 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
0911
0912 return 0;
0913 }
0914
0915 static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
0916 bool enable)
0917 {
0918 u32 val;
0919
0920 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
0921 ANA_IP1_NXT_PROT_IPV4 |
0922 ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
0923
0924
0925 val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
0926 ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
0927 ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
0928 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
0929 val);
0930
0931
0932 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
0933 ANA_IP1_NXT_PROT_OFFSET2(20));
0934
0935 val = vsc85xx_ts_read_csr(phydev, blk,
0936 MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
0937 val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
0938 IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
0939 val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
0940
0941 val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
0942 IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
0943
0944
0945
0946 val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
0947 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
0948 val);
0949
0950 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
0951 val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
0952 val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
0953 if (enable)
0954 val |= IP1_FLOW_ENA;
0955 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
0956
0957 return 0;
0958 }
0959
0960 static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
0961 {
0962 struct vsc8531_private *vsc8531 = phydev->priv;
0963 bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
0964 u8 eng_id = base ? 0 : 1;
0965 u32 val;
0966
0967 ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
0968
0969 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
0970 MSCC_PHY_PTP_ANALYZER_MODE);
0971
0972 val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
0973 PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
0974 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
0975 val);
0976
0977 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
0978 vsc85xx_eth1_next_comp(phydev, INGRESS,
0979 ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
0980 vsc85xx_eth1_next_comp(phydev, EGRESS,
0981 ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
0982 } else {
0983 vsc85xx_eth1_next_comp(phydev, INGRESS,
0984 ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
0985 ETH_P_IP);
0986 vsc85xx_eth1_next_comp(phydev, EGRESS,
0987 ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
0988 ETH_P_IP);
0989
0990 vsc85xx_ip1_next_comp(phydev, INGRESS,
0991 ANA_ETH1_NTX_PROT_PTP_OAM, 28);
0992 vsc85xx_ip1_next_comp(phydev, EGRESS,
0993 ANA_ETH1_NTX_PROT_PTP_OAM, 28);
0994 }
0995
0996 vsc85xx_eth1_conf(phydev, INGRESS,
0997 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
0998 vsc85xx_ip1_conf(phydev, INGRESS,
0999 ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1000 vsc85xx_ptp_conf(phydev, INGRESS, one_step,
1001 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1002
1003 vsc85xx_eth1_conf(phydev, EGRESS,
1004 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1005 vsc85xx_ip1_conf(phydev, EGRESS,
1006 ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1007 vsc85xx_ptp_conf(phydev, EGRESS, one_step,
1008 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1009
1010 val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1011 if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
1012 val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1013
1014 val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1015 if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
1016 val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1017
1018 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1019 val);
1020
1021 return 0;
1022 }
1023
1024 void vsc85xx_link_change_notify(struct phy_device *phydev)
1025 {
1026 struct vsc8531_private *priv = phydev->priv;
1027
1028 mutex_lock(&priv->ts_lock);
1029 vsc85xx_ts_set_latencies(phydev);
1030 mutex_unlock(&priv->ts_lock);
1031 }
1032
1033 static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
1034 {
1035 u32 val;
1036
1037 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1038 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1039 val |= PTP_EGR_TS_FIFO_RESET;
1040 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1041 val);
1042
1043 val &= ~PTP_EGR_TS_FIFO_RESET;
1044 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1045 val);
1046 }
1047
1048 static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
1049 {
1050 struct vsc8531_private *vsc8531 =
1051 container_of(mii_ts, struct vsc8531_private, mii_ts);
1052 struct phy_device *phydev = vsc8531->ptp->phydev;
1053 struct hwtstamp_config cfg;
1054 bool one_step = false;
1055 u32 val;
1056
1057 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1058 return -EFAULT;
1059
1060 switch (cfg.tx_type) {
1061 case HWTSTAMP_TX_ONESTEP_SYNC:
1062 one_step = true;
1063 break;
1064 case HWTSTAMP_TX_ON:
1065 break;
1066 case HWTSTAMP_TX_OFF:
1067 break;
1068 default:
1069 return -ERANGE;
1070 }
1071
1072 vsc8531->ptp->tx_type = cfg.tx_type;
1073
1074 switch (cfg.rx_filter) {
1075 case HWTSTAMP_FILTER_NONE:
1076 break;
1077 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1078
1079 break;
1080 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1081
1082 break;
1083 default:
1084 return -ERANGE;
1085 }
1086
1087 vsc8531->ptp->rx_filter = cfg.rx_filter;
1088
1089 mutex_lock(&vsc8531->ts_lock);
1090
1091 __skb_queue_purge(&vsc8531->ptp->tx_queue);
1092 __skb_queue_head_init(&vsc8531->ptp->tx_queue);
1093
1094
1095 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1096 MSCC_PHY_PTP_INGR_PREDICTOR);
1097 val &= ~PTP_INGR_PREDICTOR_EN;
1098 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1099 val);
1100 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1101 MSCC_PHY_PTP_EGR_PREDICTOR);
1102 val &= ~PTP_EGR_PREDICTOR_EN;
1103 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1104 val);
1105
1106
1107 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1108 val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
1109 if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1110 val |= PTP_IFACE_CTRL_EGR_BYPASS;
1111 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
1112 val |= PTP_IFACE_CTRL_INGR_BYPASS;
1113 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1114
1115
1116 vsc85xx_ts_reset_fifo(phydev);
1117
1118 vsc85xx_ts_engine_init(phydev, one_step);
1119
1120
1121 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1122 MSCC_PHY_PTP_INGR_PREDICTOR);
1123 val |= PTP_INGR_PREDICTOR_EN;
1124 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1125 val);
1126 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1127 MSCC_PHY_PTP_EGR_PREDICTOR);
1128 val |= PTP_EGR_PREDICTOR_EN;
1129 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1130 val);
1131
1132 vsc8531->ptp->configured = 1;
1133 mutex_unlock(&vsc8531->ts_lock);
1134
1135 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1136 }
1137
1138 static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
1139 struct ethtool_ts_info *info)
1140 {
1141 struct vsc8531_private *vsc8531 =
1142 container_of(mii_ts, struct vsc8531_private, mii_ts);
1143
1144 info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
1145 info->so_timestamping =
1146 SOF_TIMESTAMPING_TX_HARDWARE |
1147 SOF_TIMESTAMPING_RX_HARDWARE |
1148 SOF_TIMESTAMPING_RAW_HARDWARE;
1149 info->tx_types =
1150 (1 << HWTSTAMP_TX_OFF) |
1151 (1 << HWTSTAMP_TX_ON) |
1152 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
1153 info->rx_filters =
1154 (1 << HWTSTAMP_FILTER_NONE) |
1155 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1156 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1157
1158 return 0;
1159 }
1160
1161 static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
1162 struct sk_buff *skb, int type)
1163 {
1164 struct vsc8531_private *vsc8531 =
1165 container_of(mii_ts, struct vsc8531_private, mii_ts);
1166
1167 if (!vsc8531->ptp->configured)
1168 return;
1169
1170 if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
1171 kfree_skb(skb);
1172 return;
1173 }
1174
1175 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1176
1177 mutex_lock(&vsc8531->ts_lock);
1178 __skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
1179 mutex_unlock(&vsc8531->ts_lock);
1180 }
1181
1182 static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
1183 struct sk_buff *skb, int type)
1184 {
1185 struct vsc8531_private *vsc8531 =
1186 container_of(mii_ts, struct vsc8531_private, mii_ts);
1187 struct skb_shared_hwtstamps *shhwtstamps = NULL;
1188 struct vsc85xx_ptphdr *ptphdr;
1189 struct timespec64 ts;
1190 unsigned long ns;
1191
1192 if (!vsc8531->ptp->configured)
1193 return false;
1194
1195 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
1196 type == PTP_CLASS_NONE)
1197 return false;
1198
1199 vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
1200
1201 ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
1202 if (!ptphdr)
1203 return false;
1204
1205 shhwtstamps = skb_hwtstamps(skb);
1206 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1207
1208 ns = ntohl(ptphdr->rsrvd2);
1209
1210
1211 if (ts.tv_nsec < ns)
1212 ts.tv_sec--;
1213
1214 shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
1215 netif_rx(skb);
1216
1217 return true;
1218 }
1219
1220 static const struct ptp_clock_info vsc85xx_clk_caps = {
1221 .owner = THIS_MODULE,
1222 .name = "VSC85xx timer",
1223 .max_adj = S32_MAX,
1224 .n_alarm = 0,
1225 .n_pins = 0,
1226 .n_ext_ts = 0,
1227 .n_per_out = 0,
1228 .pps = 0,
1229 .adjtime = &vsc85xx_adjtime,
1230 .adjfine = &vsc85xx_adjfine,
1231 .gettime64 = &vsc85xx_gettime,
1232 .settime64 = &vsc85xx_settime,
1233 };
1234
1235 static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
1236 {
1237 struct vsc8531_private *vsc8531 = phydev->priv;
1238
1239 if (vsc8531->ts_base_addr != phydev->mdio.addr) {
1240 struct mdio_device *dev;
1241
1242 dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
1243 phydev = container_of(dev, struct phy_device, mdio);
1244
1245 return phydev->priv;
1246 }
1247
1248 return vsc8531;
1249 }
1250
1251 static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
1252 {
1253 struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1254
1255 return vsc8531->input_clk_init;
1256 }
1257
1258 static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
1259 {
1260 struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1261
1262 vsc8531->input_clk_init = true;
1263 }
1264
1265 static int __vsc8584_init_ptp(struct phy_device *phydev)
1266 {
1267 struct vsc8531_private *vsc8531 = phydev->priv;
1268 static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
1269 static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
1270 u32 val;
1271
1272 if (!vsc8584_is_1588_input_clk_configured(phydev)) {
1273 phy_lock_mdio_bus(phydev);
1274
1275
1276
1277
1278 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1279 MSCC_PHY_PAGE_1588);
1280 phy_ts_base_write(phydev, 29, 0x7ae0);
1281 phy_ts_base_write(phydev, 30, 0xb71c);
1282 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1283 MSCC_PHY_PAGE_STANDARD);
1284
1285 phy_unlock_mdio_bus(phydev);
1286
1287 vsc8584_set_input_clk_configured(phydev);
1288 }
1289
1290
1291 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1292 MSCC_PHY_PTP_INGR_PREDICTOR);
1293 val &= ~PTP_INGR_PREDICTOR_EN;
1294 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1295 val);
1296 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1297 MSCC_PHY_PTP_EGR_PREDICTOR);
1298 val &= ~PTP_EGR_PREDICTOR_EN;
1299 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1300 val);
1301
1302
1303 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
1304 val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
1305 val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
1306 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
1307
1308 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
1309 val &= ~PTP_LTC_SEQUENCE_A_MASK;
1310 val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
1311 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
1312
1313 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
1314 val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
1315 if (ltc_seq_e[PHC_CLK_250MHZ])
1316 val |= PTP_LTC_SEQ_ADD_SUB;
1317 val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
1318 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
1319
1320 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
1321 PPS_WIDTH_ADJ);
1322
1323 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
1324 IS_ENABLED(CONFIG_MACSEC) ?
1325 PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
1326 PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
1327
1328 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
1329 IS_ENABLED(CONFIG_MACSEC) ?
1330 PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
1331 PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
1332
1333
1334 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1335 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1336 val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
1337 PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
1338 PTP_ACCUR_LOAD_SAVE_BYPASS);
1339 val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1340 PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1341 PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1342 PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1343 PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1344 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1345 val);
1346
1347 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1348 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1349 val |= PTP_ACCUR_CALIB_TRIGG;
1350 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1351 val);
1352
1353 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1354 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1355 val &= ~PTP_ACCUR_CALIB_TRIGG;
1356 val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1357 PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1358 PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1359 PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1360 PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1361 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1362 val);
1363
1364 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1365 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1366 val |= PTP_ACCUR_CALIB_TRIGG;
1367 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1368 val);
1369
1370 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1371 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1372 val &= ~PTP_ACCUR_CALIB_TRIGG;
1373 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1374 val);
1375
1376
1377 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1378 MSCC_PHY_PTP_TSTAMP_FIFO_SI);
1379 val &= ~PTP_TSTAMP_FIFO_SI_EN;
1380 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
1381 val);
1382
1383 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1384 MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1385 val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
1386 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1387 val);
1388 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1389 MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1390 val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
1391 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1392 val);
1393
1394
1395 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1396 MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1397 val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
1398 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1399 val);
1400 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1401 MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1402 val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
1403 val &= ~PTP_EGR_REWRITER_FLAG_VAL;
1404 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1405 val);
1406
1407
1408
1409
1410 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1411 MSCC_PHY_PTP_INGR_TSP_CTRL);
1412 val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
1413 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
1414 val);
1415
1416 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
1417 val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
1418 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
1419
1420 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1421 MSCC_PHY_PTP_SERIAL_TOD_IFACE);
1422 val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
1423 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
1424 val);
1425
1426 vsc85xx_ts_fsb_init(phydev);
1427
1428
1429 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1430 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1431 val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
1432
1433 val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
1434 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1435 val);
1436
1437 vsc85xx_ts_reset_fifo(phydev);
1438
1439 val = PTP_IFACE_CTRL_CLK_ENA;
1440 if (!IS_ENABLED(CONFIG_MACSEC))
1441 val |= PTP_IFACE_CTRL_GMII_PROT;
1442 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1443
1444 vsc85xx_ts_set_latencies(phydev);
1445
1446 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
1447
1448 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1449 val |= PTP_IFACE_CTRL_EGR_BYPASS;
1450 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1451
1452 vsc85xx_ts_disable_flows(phydev, EGRESS);
1453 vsc85xx_ts_disable_flows(phydev, INGRESS);
1454
1455 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1456 MSCC_PHY_PTP_ANALYZER_MODE);
1457
1458 val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
1459 PTP_ANALYZER_MODE_INGR_ENA_MASK |
1460 PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
1461 PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
1462
1463
1464
1465 val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
1466 PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
1467 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1468 val);
1469
1470
1471
1472
1473
1474
1475 vsc85xx_eth_cmp1_init(phydev, INGRESS);
1476 vsc85xx_ip_cmp1_init(phydev, INGRESS);
1477 vsc85xx_ptp_cmp_init(phydev, INGRESS);
1478 vsc85xx_eth_cmp1_init(phydev, EGRESS);
1479 vsc85xx_ip_cmp1_init(phydev, EGRESS);
1480 vsc85xx_ptp_cmp_init(phydev, EGRESS);
1481
1482 vsc85xx_ts_eth_cmp1_sig(phydev);
1483
1484 vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
1485 vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
1486 vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
1487 vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
1488 phydev->mii_ts = &vsc8531->mii_ts;
1489
1490 memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
1491
1492 vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
1493 &phydev->mdio.dev);
1494 return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
1495 }
1496
1497 void vsc8584_config_ts_intr(struct phy_device *phydev)
1498 {
1499 struct vsc8531_private *priv = phydev->priv;
1500
1501 mutex_lock(&priv->ts_lock);
1502 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
1503 VSC85XX_1588_INT_MASK_MASK);
1504 mutex_unlock(&priv->ts_lock);
1505 }
1506
1507 int vsc8584_ptp_init(struct phy_device *phydev)
1508 {
1509 switch (phydev->phy_id & phydev->drv->phy_id_mask) {
1510 case PHY_ID_VSC8572:
1511 case PHY_ID_VSC8574:
1512 case PHY_ID_VSC8575:
1513 case PHY_ID_VSC8582:
1514 case PHY_ID_VSC8584:
1515 return __vsc8584_init_ptp(phydev);
1516 }
1517
1518 return 0;
1519 }
1520
1521 irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
1522 {
1523 struct vsc8531_private *priv = phydev->priv;
1524 int rc;
1525
1526 mutex_lock(&priv->ts_lock);
1527 rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1528 MSCC_PHY_1588_VSC85XX_INT_STATUS);
1529
1530 vsc85xx_ts_write_csr(phydev, PROCESSOR,
1531 MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
1532
1533 if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
1534 mutex_unlock(&priv->ts_lock);
1535 return IRQ_NONE;
1536 }
1537
1538 if (rc & VSC85XX_1588_INT_FIFO_ADD) {
1539 vsc85xx_get_tx_ts(priv->ptp);
1540 } else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
1541 __skb_queue_purge(&priv->ptp->tx_queue);
1542 vsc85xx_ts_reset_fifo(phydev);
1543 }
1544
1545 mutex_unlock(&priv->ts_lock);
1546 return IRQ_HANDLED;
1547 }
1548
1549 int vsc8584_ptp_probe(struct phy_device *phydev)
1550 {
1551 struct vsc8531_private *vsc8531 = phydev->priv;
1552
1553 vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
1554 GFP_KERNEL);
1555 if (!vsc8531->ptp)
1556 return -ENOMEM;
1557
1558 mutex_init(&vsc8531->phc_lock);
1559 mutex_init(&vsc8531->ts_lock);
1560
1561
1562
1563
1564
1565
1566 vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
1567 GPIOD_FLAGS_BIT_NONEXCLUSIVE |
1568 GPIOD_OUT_LOW);
1569 if (IS_ERR(vsc8531->load_save)) {
1570 phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
1571 PTR_ERR(vsc8531->load_save));
1572 return PTR_ERR(vsc8531->load_save);
1573 }
1574
1575 vsc8531->ptp->phydev = phydev;
1576
1577 return 0;
1578 }
1579
1580 int vsc8584_ptp_probe_once(struct phy_device *phydev)
1581 {
1582 struct vsc85xx_shared_private *shared =
1583 (struct vsc85xx_shared_private *)phydev->shared->priv;
1584
1585
1586 mutex_init(&shared->gpio_lock);
1587
1588 return 0;
1589 }