0001
0002
0003
0004
0005
0006
0007 #include "sparx5_main_regs.h"
0008 #include "sparx5_main.h"
0009
0010 #define XTR_EOF_0 ntohl((__force __be32)0x80000000u)
0011 #define XTR_EOF_1 ntohl((__force __be32)0x80000001u)
0012 #define XTR_EOF_2 ntohl((__force __be32)0x80000002u)
0013 #define XTR_EOF_3 ntohl((__force __be32)0x80000003u)
0014 #define XTR_PRUNED ntohl((__force __be32)0x80000004u)
0015 #define XTR_ABORT ntohl((__force __be32)0x80000005u)
0016 #define XTR_ESCAPE ntohl((__force __be32)0x80000006u)
0017 #define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
0018
0019 #define XTR_VALID_BYTES(x) (4 - ((x) & 3))
0020
0021 #define INJ_TIMEOUT_NS 50000
0022
0023 void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
0024 {
0025
0026 spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH);
0027
0028
0029 mdelay(1);
0030
0031
0032 spx5_wr(0, sparx5, QS_XTR_FLUSH);
0033 }
0034
0035 void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
0036 {
0037 u8 *xtr_hdr = (u8 *)ifh;
0038
0039
0040 u32 fwd =
0041 ((u32)xtr_hdr[27] << 24) |
0042 ((u32)xtr_hdr[28] << 16) |
0043 ((u32)xtr_hdr[29] << 8) |
0044 ((u32)xtr_hdr[30] << 0);
0045 fwd = (fwd >> 5);
0046 info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
0047
0048 info->timestamp =
0049 ((u64)xtr_hdr[2] << 24) |
0050 ((u64)xtr_hdr[3] << 16) |
0051 ((u64)xtr_hdr[4] << 8) |
0052 ((u64)xtr_hdr[5] << 0);
0053 }
0054
0055 static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
0056 {
0057 bool eof_flag = false, pruned_flag = false, abort_flag = false;
0058 struct net_device *netdev;
0059 struct sparx5_port *port;
0060 struct frame_info fi;
0061 int i, byte_cnt = 0;
0062 struct sk_buff *skb;
0063 u32 ifh[IFH_LEN];
0064 u32 *rxbuf;
0065
0066
0067 for (i = 0; i < IFH_LEN; i++)
0068 ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
0069
0070
0071 sparx5_ifh_parse(ifh, &fi);
0072
0073
0074 port = fi.src_port < SPX5_PORTS ?
0075 sparx5->ports[fi.src_port] : NULL;
0076 if (!port || !port->ndev) {
0077 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
0078 sparx5_xtr_flush(sparx5, grp);
0079 return;
0080 }
0081
0082
0083 netdev = port->ndev;
0084 skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN);
0085 if (!skb) {
0086 sparx5_xtr_flush(sparx5, grp);
0087 dev_err(sparx5->dev, "No skb allocated\n");
0088 netdev->stats.rx_dropped++;
0089 return;
0090 }
0091 rxbuf = (u32 *)skb->data;
0092
0093
0094 while (!eof_flag) {
0095 u32 val = spx5_rd(sparx5, QS_XTR_RD(grp));
0096 u32 cmp = val;
0097
0098 if (byte_swap)
0099 cmp = ntohl((__force __be32)val);
0100
0101 switch (cmp) {
0102 case XTR_NOT_READY:
0103 break;
0104 case XTR_ABORT:
0105
0106 abort_flag = true;
0107 eof_flag = true;
0108 break;
0109 case XTR_EOF_0:
0110 case XTR_EOF_1:
0111 case XTR_EOF_2:
0112 case XTR_EOF_3:
0113
0114
0115
0116 if (!byte_swap)
0117 val = ntohl((__force __be32)val);
0118 byte_cnt -= (4 - XTR_VALID_BYTES(val));
0119 eof_flag = true;
0120 break;
0121 case XTR_PRUNED:
0122
0123 eof_flag = true;
0124 pruned_flag = true;
0125 fallthrough;
0126 case XTR_ESCAPE:
0127 *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp));
0128 byte_cnt += 4;
0129 rxbuf++;
0130 break;
0131 default:
0132 *rxbuf = val;
0133 byte_cnt += 4;
0134 rxbuf++;
0135 }
0136 }
0137
0138 if (abort_flag || pruned_flag || !eof_flag) {
0139 netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n",
0140 abort_flag, pruned_flag, eof_flag);
0141 kfree_skb(skb);
0142 netdev->stats.rx_dropped++;
0143 return;
0144 }
0145
0146
0147
0148
0149 if (test_bit(port->portno, sparx5->bridge_mask))
0150 skb->offload_fwd_mark = 1;
0151
0152
0153 skb_put(skb, byte_cnt - ETH_FCS_LEN);
0154 eth_skb_pad(skb);
0155 sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
0156 skb->protocol = eth_type_trans(skb, netdev);
0157 netdev->stats.rx_bytes += skb->len;
0158 netdev->stats.rx_packets++;
0159 netif_rx(skb);
0160 }
0161
0162 static int sparx5_inject(struct sparx5 *sparx5,
0163 u32 *ifh,
0164 struct sk_buff *skb,
0165 struct net_device *ndev)
0166 {
0167 int grp = INJ_QUEUE;
0168 u32 val, w, count;
0169 u8 *buf;
0170
0171 val = spx5_rd(sparx5, QS_INJ_STATUS);
0172 if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) {
0173 pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
0174 QS_INJ_STATUS_FIFO_RDY_GET(val));
0175 return -EBUSY;
0176 }
0177
0178
0179 spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
0180 QS_INJ_CTRL_GAP_SIZE_SET(1),
0181 sparx5, QS_INJ_CTRL(grp));
0182
0183
0184 for (w = 0; w < IFH_LEN; w++)
0185 spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp));
0186
0187
0188 count = DIV_ROUND_UP(skb->len, 4);
0189 buf = skb->data;
0190 for (w = 0; w < count; w++, buf += 4) {
0191 val = get_unaligned((const u32 *)buf);
0192 spx5_wr(val, sparx5, QS_INJ_WR(grp));
0193 }
0194
0195
0196 while (w < (60 / 4)) {
0197 spx5_wr(0, sparx5, QS_INJ_WR(grp));
0198 w++;
0199 }
0200
0201
0202 spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
0203 QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) |
0204 QS_INJ_CTRL_EOF_SET(1),
0205 sparx5, QS_INJ_CTRL(grp));
0206
0207
0208 spx5_wr(0, sparx5, QS_INJ_WR(grp));
0209 w++;
0210
0211 val = spx5_rd(sparx5, QS_INJ_STATUS);
0212 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
0213 struct sparx5_port *port = netdev_priv(ndev);
0214
0215 pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
0216 QS_INJ_STATUS_WMARK_REACHED_GET(val));
0217 netif_stop_queue(ndev);
0218 hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS,
0219 HRTIMER_MODE_REL);
0220 }
0221
0222 return NETDEV_TX_OK;
0223 }
0224
0225 int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
0226 {
0227 struct net_device_stats *stats = &dev->stats;
0228 struct sparx5_port *port = netdev_priv(dev);
0229 struct sparx5 *sparx5 = port->sparx5;
0230 u32 ifh[IFH_LEN];
0231 int ret;
0232
0233 memset(ifh, 0, IFH_LEN * 4);
0234 sparx5_set_port_ifh(ifh, port->portno);
0235
0236 if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
0237 ret = sparx5_ptp_txtstamp_request(port, skb);
0238 if (ret)
0239 return ret;
0240
0241 sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
0242 sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
0243 sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
0244 sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
0245 }
0246
0247 skb_tx_timestamp(skb);
0248 if (sparx5->fdma_irq > 0)
0249 ret = sparx5_fdma_xmit(sparx5, ifh, skb);
0250 else
0251 ret = sparx5_inject(sparx5, ifh, skb, dev);
0252
0253 if (ret == NETDEV_TX_OK) {
0254 stats->tx_bytes += skb->len;
0255 stats->tx_packets++;
0256
0257 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
0258 SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
0259 return ret;
0260
0261 dev_kfree_skb_any(skb);
0262 } else {
0263 stats->tx_dropped++;
0264
0265 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
0266 SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
0267 sparx5_ptp_txtstamp_release(port, skb);
0268 }
0269 return ret;
0270 }
0271
0272 static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
0273 {
0274 struct sparx5_port *port = container_of(tmr, struct sparx5_port,
0275 inj_timer);
0276 int grp = INJ_QUEUE;
0277 u32 val;
0278
0279 val = spx5_rd(port->sparx5, QS_INJ_STATUS);
0280 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
0281 pr_err_ratelimited("Injection: Reset watermark count\n");
0282
0283 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
0284 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
0285 port->sparx5,
0286 DSM_DEV_TX_STOP_WM_CFG(port->portno));
0287 }
0288 netif_wake_queue(port->ndev);
0289 return HRTIMER_NORESTART;
0290 }
0291
0292 int sparx5_manual_injection_mode(struct sparx5 *sparx5)
0293 {
0294 const int byte_swap = 1;
0295 int portno;
0296
0297
0298 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
0299 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
0300 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
0301 sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
0302 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
0303 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
0304 sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
0305
0306
0307 for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
0308
0309 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
0310 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
0311 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1),
0312 sparx5, ASM_PORT_CFG(portno));
0313
0314
0315 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
0316 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
0317 sparx5,
0318 DSM_DEV_TX_STOP_WM_CFG(portno));
0319
0320
0321 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
0322 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
0323 sparx5,
0324 DSM_DEV_TX_STOP_WM_CFG(portno));
0325
0326
0327
0328 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
0329 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
0330 sparx5,
0331 DSM_BUF_CFG(portno));
0332 }
0333 return 0;
0334 }
0335
0336 irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
0337 {
0338 struct sparx5 *s5 = _sparx5;
0339 int poll = 64;
0340
0341
0342 while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0)
0343 sparx5_xtr_grp(s5, XTR_QUEUE, false);
0344
0345 return IRQ_HANDLED;
0346 }
0347
0348 void sparx5_port_inj_timer_setup(struct sparx5_port *port)
0349 {
0350 hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
0351 port->inj_timer.function = sparx5_injection_timeout;
0352 }