0001
0002
0003
0004 #include <linux/bpf_trace.h>
0005 #include <linux/netdevice.h>
0006 #include <linux/overflow.h>
0007 #include <linux/sizes.h>
0008 #include <linux/bitfield.h>
0009
0010 #include "../nfp_app.h"
0011 #include "../nfp_net.h"
0012 #include "../nfp_net_dp.h"
0013 #include "../crypto/crypto.h"
0014 #include "../crypto/fw.h"
0015 #include "nfdk.h"
0016
0017 static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
0018 {
0019 return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2);
0020 }
0021
0022 static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
0023 {
0024 return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT);
0025 }
0026
0027 static void nfp_nfdk_tx_ring_stop(struct netdev_queue *nd_q,
0028 struct nfp_net_tx_ring *tx_ring)
0029 {
0030 netif_tx_stop_queue(nd_q);
0031
0032
0033 smp_mb();
0034 if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring)))
0035 netif_tx_start_queue(nd_q);
0036 }
0037
0038 static __le64
0039 nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
0040 struct sk_buff *skb)
0041 {
0042 u32 segs, hdrlen, l3_offset, l4_offset;
0043 struct nfp_nfdk_tx_desc txd;
0044 u16 mss;
0045
0046 if (!skb->encapsulation) {
0047 l3_offset = skb_network_offset(skb);
0048 l4_offset = skb_transport_offset(skb);
0049 hdrlen = skb_tcp_all_headers(skb);
0050 } else {
0051 l3_offset = skb_inner_network_offset(skb);
0052 l4_offset = skb_inner_transport_offset(skb);
0053 hdrlen = skb_inner_tcp_all_headers(skb);
0054 }
0055
0056 segs = skb_shinfo(skb)->gso_segs;
0057 mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
0058
0059 txd.l3_offset = l3_offset;
0060 txd.l4_offset = l4_offset;
0061 txd.lso_meta_res = 0;
0062 txd.mss = cpu_to_le16(mss);
0063 txd.lso_hdrlen = hdrlen;
0064 txd.lso_totsegs = segs;
0065
0066 txbuf->pkt_cnt = segs;
0067 txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1);
0068
0069 u64_stats_update_begin(&r_vec->tx_sync);
0070 r_vec->tx_lso++;
0071 u64_stats_update_end(&r_vec->tx_sync);
0072
0073 return txd.raw;
0074 }
0075
0076 static u8
0077 nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
0078 unsigned int pkt_cnt, struct sk_buff *skb, u64 flags)
0079 {
0080 struct ipv6hdr *ipv6h;
0081 struct iphdr *iph;
0082
0083 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
0084 return flags;
0085
0086 if (skb->ip_summed != CHECKSUM_PARTIAL)
0087 return flags;
0088
0089 flags |= NFDK_DESC_TX_L4_CSUM;
0090
0091 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
0092 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
0093
0094
0095 if (iph->version == 4) {
0096 flags |= NFDK_DESC_TX_L3_CSUM;
0097 } else if (ipv6h->version != 6) {
0098 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
0099 return flags;
0100 }
0101
0102 u64_stats_update_begin(&r_vec->tx_sync);
0103 if (!skb->encapsulation) {
0104 r_vec->hw_csum_tx += pkt_cnt;
0105 } else {
0106 flags |= NFDK_DESC_TX_ENCAP;
0107 r_vec->hw_csum_tx_inner += pkt_cnt;
0108 }
0109 u64_stats_update_end(&r_vec->tx_sync);
0110
0111 return flags;
0112 }
0113
0114 static int
0115 nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
0116 struct sk_buff *skb)
0117 {
0118 unsigned int n_descs, wr_p, nop_slots;
0119 const skb_frag_t *frag, *fend;
0120 struct nfp_nfdk_tx_desc *txd;
0121 unsigned int nr_frags;
0122 unsigned int wr_idx;
0123 int err;
0124
0125 recount_descs:
0126 n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
0127 nr_frags = skb_shinfo(skb)->nr_frags;
0128 frag = skb_shinfo(skb)->frags;
0129 fend = frag + nr_frags;
0130 for (; frag < fend; frag++)
0131 n_descs += DIV_ROUND_UP(skb_frag_size(frag),
0132 NFDK_TX_MAX_DATA_PER_DESC);
0133
0134 if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) {
0135 if (skb_is_nonlinear(skb)) {
0136 err = skb_linearize(skb);
0137 if (err)
0138 return err;
0139 goto recount_descs;
0140 }
0141 return -EINVAL;
0142 }
0143
0144
0145 n_descs += !!skb_is_gso(skb);
0146
0147 if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
0148 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
0149 goto close_block;
0150
0151 if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK)
0152 goto close_block;
0153
0154 return 0;
0155
0156 close_block:
0157 wr_p = tx_ring->wr_p;
0158 nop_slots = D_BLOCK_CPL(wr_p);
0159
0160 wr_idx = D_IDX(tx_ring, wr_p);
0161 tx_ring->ktxbufs[wr_idx].skb = NULL;
0162 txd = &tx_ring->ktxds[wr_idx];
0163
0164 memset(txd, 0, array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
0165
0166 tx_ring->data_pending = 0;
0167 tx_ring->wr_p += nop_slots;
0168 tx_ring->wr_ptr_add += nop_slots;
0169
0170 return 0;
0171 }
0172
0173 static int
0174 nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
0175 struct sk_buff *skb)
0176 {
0177 struct metadata_dst *md_dst = skb_metadata_dst(skb);
0178 unsigned char *data;
0179 bool vlan_insert;
0180 u32 meta_id = 0;
0181 int md_bytes;
0182
0183 if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
0184 md_dst = NULL;
0185
0186 vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
0187
0188 if (!(md_dst || vlan_insert))
0189 return 0;
0190
0191 md_bytes = sizeof(meta_id) +
0192 !!md_dst * NFP_NET_META_PORTID_SIZE +
0193 vlan_insert * NFP_NET_META_VLAN_SIZE;
0194
0195 if (unlikely(skb_cow_head(skb, md_bytes)))
0196 return -ENOMEM;
0197
0198 data = skb_push(skb, md_bytes) + md_bytes;
0199 if (md_dst) {
0200 data -= NFP_NET_META_PORTID_SIZE;
0201 put_unaligned_be32(md_dst->u.port_info.port_id, data);
0202 meta_id = NFP_NET_META_PORTID;
0203 }
0204 if (vlan_insert) {
0205 data -= NFP_NET_META_VLAN_SIZE;
0206
0207
0208
0209 memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
0210 put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
0211 meta_id <<= NFP_NET_META_FIELD_SIZE;
0212 meta_id |= NFP_NET_META_VLAN;
0213 }
0214
0215 meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
0216 FIELD_PREP(NFDK_META_FIELDS, meta_id);
0217
0218 data -= sizeof(meta_id);
0219 put_unaligned_be32(meta_id, data);
0220
0221 return NFDK_DESC_TX_CHAIN_META;
0222 }
0223
0224
0225
0226
0227
0228
0229
0230
0231 netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
0232 {
0233 struct nfp_net *nn = netdev_priv(netdev);
0234 struct nfp_nfdk_tx_buf *txbuf, *etxbuf;
0235 u32 cnt, tmp_dlen, dlen_type = 0;
0236 struct nfp_net_tx_ring *tx_ring;
0237 struct nfp_net_r_vector *r_vec;
0238 const skb_frag_t *frag, *fend;
0239 struct nfp_nfdk_tx_desc *txd;
0240 unsigned int real_len, qidx;
0241 unsigned int dma_len, type;
0242 struct netdev_queue *nd_q;
0243 struct nfp_net_dp *dp;
0244 int nr_frags, wr_idx;
0245 dma_addr_t dma_addr;
0246 u64 metadata;
0247
0248 dp = &nn->dp;
0249 qidx = skb_get_queue_mapping(skb);
0250 tx_ring = &dp->tx_rings[qidx];
0251 r_vec = tx_ring->r_vec;
0252 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
0253
0254
0255 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
0256 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
0257 qidx, tx_ring->wr_p, tx_ring->rd_p);
0258 netif_tx_stop_queue(nd_q);
0259 nfp_net_tx_xmit_more_flush(tx_ring);
0260 u64_stats_update_begin(&r_vec->tx_sync);
0261 r_vec->tx_busy++;
0262 u64_stats_update_end(&r_vec->tx_sync);
0263 return NETDEV_TX_BUSY;
0264 }
0265
0266 metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
0267 if (unlikely((int)metadata < 0))
0268 goto err_flush;
0269
0270 if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
0271 goto err_flush;
0272
0273
0274
0275
0276 nr_frags = skb_shinfo(skb)->nr_frags;
0277
0278 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
0279 txd = &tx_ring->ktxds[wr_idx];
0280 txbuf = &tx_ring->ktxbufs[wr_idx];
0281
0282 dma_len = skb_headlen(skb);
0283 if (skb_is_gso(skb))
0284 type = NFDK_DESC_TX_TYPE_TSO;
0285 else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
0286 type = NFDK_DESC_TX_TYPE_SIMPLE;
0287 else
0288 type = NFDK_DESC_TX_TYPE_GATHER;
0289
0290 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
0291 if (dma_mapping_error(dp->dev, dma_addr))
0292 goto err_warn_dma;
0293
0294 txbuf->skb = skb;
0295 txbuf++;
0296
0297 txbuf->dma_addr = dma_addr;
0298 txbuf++;
0299
0300
0301 dma_len -= 1;
0302
0303
0304
0305
0306
0307
0308
0309 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
0310 dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
0311 NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
0312 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
0313
0314 txd->dma_len_type = cpu_to_le16(dlen_type);
0315 nfp_desc_set_dma_addr_48b(txd, dma_addr);
0316
0317
0318 BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
0319
0320
0321
0322
0323 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
0324 dma_len -= tmp_dlen;
0325 dma_addr += tmp_dlen + 1;
0326 txd++;
0327
0328
0329
0330
0331 frag = skb_shinfo(skb)->frags;
0332 fend = frag + nr_frags;
0333
0334 while (true) {
0335 while (dma_len > 0) {
0336 dma_len -= 1;
0337 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
0338
0339 txd->dma_len_type = cpu_to_le16(dlen_type);
0340 nfp_desc_set_dma_addr_48b(txd, dma_addr);
0341
0342 dma_len -= dlen_type;
0343 dma_addr += dlen_type + 1;
0344 txd++;
0345 }
0346
0347 if (frag >= fend)
0348 break;
0349
0350 dma_len = skb_frag_size(frag);
0351 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len,
0352 DMA_TO_DEVICE);
0353 if (dma_mapping_error(dp->dev, dma_addr))
0354 goto err_unmap;
0355
0356 txbuf->dma_addr = dma_addr;
0357 txbuf++;
0358
0359 frag++;
0360 }
0361
0362 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
0363
0364 if (!skb_is_gso(skb)) {
0365 real_len = skb->len;
0366
0367 metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
0368 txd->raw = cpu_to_le64(metadata);
0369 txd++;
0370 } else {
0371
0372 (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
0373 real_len = txbuf->real_len;
0374
0375 metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
0376 txd->raw = cpu_to_le64(metadata);
0377 txd += 2;
0378 txbuf++;
0379 }
0380
0381 cnt = txd - tx_ring->ktxds - wr_idx;
0382 if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
0383 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
0384 goto err_warn_overflow;
0385
0386 skb_tx_timestamp(skb);
0387
0388 tx_ring->wr_p += cnt;
0389 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
0390 tx_ring->data_pending += skb->len;
0391 else
0392 tx_ring->data_pending = 0;
0393
0394 if (nfp_nfdk_tx_ring_should_stop(tx_ring))
0395 nfp_nfdk_tx_ring_stop(nd_q, tx_ring);
0396
0397 tx_ring->wr_ptr_add += cnt;
0398 if (__netdev_tx_sent_queue(nd_q, real_len, netdev_xmit_more()))
0399 nfp_net_tx_xmit_more_flush(tx_ring);
0400
0401 return NETDEV_TX_OK;
0402
0403 err_warn_overflow:
0404 WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
0405 wr_idx, skb_headlen(skb), nr_frags, cnt);
0406 if (skb_is_gso(skb))
0407 txbuf--;
0408 err_unmap:
0409
0410 etxbuf = txbuf;
0411
0412 txbuf = &tx_ring->ktxbufs[wr_idx + 1];
0413 if (txbuf < etxbuf) {
0414 dma_unmap_single(dp->dev, txbuf->dma_addr,
0415 skb_headlen(skb), DMA_TO_DEVICE);
0416 txbuf->raw = 0;
0417 txbuf++;
0418 }
0419 frag = skb_shinfo(skb)->frags;
0420 while (etxbuf < txbuf) {
0421 dma_unmap_page(dp->dev, txbuf->dma_addr,
0422 skb_frag_size(frag), DMA_TO_DEVICE);
0423 txbuf->raw = 0;
0424 frag++;
0425 txbuf++;
0426 }
0427 err_warn_dma:
0428 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
0429 err_flush:
0430 nfp_net_tx_xmit_more_flush(tx_ring);
0431 u64_stats_update_begin(&r_vec->tx_sync);
0432 r_vec->tx_errors++;
0433 u64_stats_update_end(&r_vec->tx_sync);
0434 dev_kfree_skb_any(skb);
0435 return NETDEV_TX_OK;
0436 }
0437
0438
0439
0440
0441
0442
0443 static void nfp_nfdk_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
0444 {
0445 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
0446 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
0447 u32 done_pkts = 0, done_bytes = 0;
0448 struct nfp_nfdk_tx_buf *ktxbufs;
0449 struct device *dev = dp->dev;
0450 struct netdev_queue *nd_q;
0451 u32 rd_p, qcp_rd_p;
0452 int todo;
0453
0454 rd_p = tx_ring->rd_p;
0455 if (tx_ring->wr_p == rd_p)
0456 return;
0457
0458
0459 qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
0460
0461 if (qcp_rd_p == tx_ring->qcp_rd_p)
0462 return;
0463
0464 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
0465 ktxbufs = tx_ring->ktxbufs;
0466
0467 while (todo > 0) {
0468 const skb_frag_t *frag, *fend;
0469 unsigned int size, n_descs = 1;
0470 struct nfp_nfdk_tx_buf *txbuf;
0471 struct sk_buff *skb;
0472
0473 txbuf = &ktxbufs[D_IDX(tx_ring, rd_p)];
0474 skb = txbuf->skb;
0475 txbuf++;
0476
0477
0478 if (!skb) {
0479 n_descs = D_BLOCK_CPL(rd_p);
0480 goto next;
0481 }
0482
0483
0484 size = skb_headlen(skb);
0485 n_descs += nfp_nfdk_headlen_to_segs(size);
0486 dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
0487 txbuf++;
0488
0489
0490 frag = skb_shinfo(skb)->frags;
0491 fend = frag + skb_shinfo(skb)->nr_frags;
0492 for (; frag < fend; frag++) {
0493 size = skb_frag_size(frag);
0494 n_descs += DIV_ROUND_UP(size,
0495 NFDK_TX_MAX_DATA_PER_DESC);
0496 dma_unmap_page(dev, txbuf->dma_addr,
0497 skb_frag_size(frag), DMA_TO_DEVICE);
0498 txbuf++;
0499 }
0500
0501 if (!skb_is_gso(skb)) {
0502 done_bytes += skb->len;
0503 done_pkts++;
0504 } else {
0505 done_bytes += txbuf->real_len;
0506 done_pkts += txbuf->pkt_cnt;
0507 n_descs++;
0508 }
0509
0510 napi_consume_skb(skb, budget);
0511 next:
0512 rd_p += n_descs;
0513 todo -= n_descs;
0514 }
0515
0516 tx_ring->rd_p = rd_p;
0517 tx_ring->qcp_rd_p = qcp_rd_p;
0518
0519 u64_stats_update_begin(&r_vec->tx_sync);
0520 r_vec->tx_bytes += done_bytes;
0521 r_vec->tx_pkts += done_pkts;
0522 u64_stats_update_end(&r_vec->tx_sync);
0523
0524 if (!dp->netdev)
0525 return;
0526
0527 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
0528 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
0529 if (nfp_nfdk_tx_ring_should_wake(tx_ring)) {
0530
0531 smp_mb();
0532
0533 if (unlikely(netif_tx_queue_stopped(nd_q)))
0534 netif_tx_wake_queue(nd_q);
0535 }
0536
0537 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
0538 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
0539 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
0540 }
0541
0542
0543 static void *
0544 nfp_nfdk_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
0545 {
0546 void *frag;
0547
0548 if (!dp->xdp_prog) {
0549 frag = napi_alloc_frag(dp->fl_bufsz);
0550 if (unlikely(!frag))
0551 return NULL;
0552 } else {
0553 struct page *page;
0554
0555 page = dev_alloc_page();
0556 if (unlikely(!page))
0557 return NULL;
0558 frag = page_address(page);
0559 }
0560
0561 *dma_addr = nfp_net_dma_map_rx(dp, frag);
0562 if (dma_mapping_error(dp->dev, *dma_addr)) {
0563 nfp_net_free_frag(frag, dp->xdp_prog);
0564 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
0565 return NULL;
0566 }
0567
0568 return frag;
0569 }
0570
0571
0572
0573
0574
0575
0576
0577
0578 static void
0579 nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
0580 struct nfp_net_rx_ring *rx_ring,
0581 void *frag, dma_addr_t dma_addr)
0582 {
0583 unsigned int wr_idx;
0584
0585 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
0586
0587 nfp_net_dma_sync_dev_rx(dp, dma_addr);
0588
0589
0590 rx_ring->rxbufs[wr_idx].frag = frag;
0591 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
0592
0593
0594 rx_ring->rxds[wr_idx].fld.reserved = 0;
0595 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
0596 nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
0597 dma_addr + dp->rx_dma_off);
0598
0599 rx_ring->wr_p++;
0600 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
0601
0602
0603
0604 wmb();
0605 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
0606 }
0607 }
0608
0609
0610
0611
0612
0613
0614 void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
0615 struct nfp_net_rx_ring *rx_ring)
0616 {
0617 unsigned int i;
0618
0619 for (i = 0; i < rx_ring->cnt - 1; i++)
0620 nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
0621 rx_ring->rxbufs[i].dma_addr);
0622 }
0623
0624
0625
0626
0627
0628 static int nfp_nfdk_rx_csum_has_errors(u16 flags)
0629 {
0630 u16 csum_all_checked, csum_all_ok;
0631
0632 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
0633 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
0634
0635 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
0636 }
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 static void
0647 nfp_nfdk_rx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
0648 struct nfp_net_rx_desc *rxd, struct nfp_meta_parsed *meta,
0649 struct sk_buff *skb)
0650 {
0651 skb_checksum_none_assert(skb);
0652
0653 if (!(dp->netdev->features & NETIF_F_RXCSUM))
0654 return;
0655
0656 if (meta->csum_type) {
0657 skb->ip_summed = meta->csum_type;
0658 skb->csum = meta->csum;
0659 u64_stats_update_begin(&r_vec->rx_sync);
0660 r_vec->hw_csum_rx_complete++;
0661 u64_stats_update_end(&r_vec->rx_sync);
0662 return;
0663 }
0664
0665 if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
0666 u64_stats_update_begin(&r_vec->rx_sync);
0667 r_vec->hw_csum_rx_error++;
0668 u64_stats_update_end(&r_vec->rx_sync);
0669 return;
0670 }
0671
0672
0673
0674
0675
0676 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
0677 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
0678 __skb_incr_checksum_unnecessary(skb);
0679 u64_stats_update_begin(&r_vec->rx_sync);
0680 r_vec->hw_csum_rx_ok++;
0681 u64_stats_update_end(&r_vec->rx_sync);
0682 }
0683
0684 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
0685 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
0686 __skb_incr_checksum_unnecessary(skb);
0687 u64_stats_update_begin(&r_vec->rx_sync);
0688 r_vec->hw_csum_rx_inner_ok++;
0689 u64_stats_update_end(&r_vec->rx_sync);
0690 }
0691 }
0692
0693 static void
0694 nfp_nfdk_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
0695 unsigned int type, __be32 *hash)
0696 {
0697 if (!(netdev->features & NETIF_F_RXHASH))
0698 return;
0699
0700 switch (type) {
0701 case NFP_NET_RSS_IPV4:
0702 case NFP_NET_RSS_IPV6:
0703 case NFP_NET_RSS_IPV6_EX:
0704 meta->hash_type = PKT_HASH_TYPE_L3;
0705 break;
0706 default:
0707 meta->hash_type = PKT_HASH_TYPE_L4;
0708 break;
0709 }
0710
0711 meta->hash = get_unaligned_be32(hash);
0712 }
0713
0714 static bool
0715 nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
0716 void *data, void *pkt, unsigned int pkt_len, int meta_len)
0717 {
0718 u32 meta_info, vlan_info;
0719
0720 meta_info = get_unaligned_be32(data);
0721 data += 4;
0722
0723 while (meta_info) {
0724 switch (meta_info & NFP_NET_META_FIELD_MASK) {
0725 case NFP_NET_META_HASH:
0726 meta_info >>= NFP_NET_META_FIELD_SIZE;
0727 nfp_nfdk_set_hash(netdev, meta,
0728 meta_info & NFP_NET_META_FIELD_MASK,
0729 (__be32 *)data);
0730 data += 4;
0731 break;
0732 case NFP_NET_META_MARK:
0733 meta->mark = get_unaligned_be32(data);
0734 data += 4;
0735 break;
0736 case NFP_NET_META_VLAN:
0737 vlan_info = get_unaligned_be32(data);
0738 if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
0739 meta->vlan.stripped = true;
0740 meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
0741 vlan_info);
0742 meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
0743 vlan_info);
0744 }
0745 data += 4;
0746 break;
0747 case NFP_NET_META_PORTID:
0748 meta->portid = get_unaligned_be32(data);
0749 data += 4;
0750 break;
0751 case NFP_NET_META_CSUM:
0752 meta->csum_type = CHECKSUM_COMPLETE;
0753 meta->csum =
0754 (__force __wsum)__get_unaligned_cpu32(data);
0755 data += 4;
0756 break;
0757 case NFP_NET_META_RESYNC_INFO:
0758 if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
0759 pkt_len))
0760 return false;
0761 data += sizeof(struct nfp_net_tls_resync_req);
0762 break;
0763 default:
0764 return true;
0765 }
0766
0767 meta_info >>= NFP_NET_META_FIELD_SIZE;
0768 }
0769
0770 return data != pkt;
0771 }
0772
0773 static void
0774 nfp_nfdk_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
0775 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
0776 struct sk_buff *skb)
0777 {
0778 u64_stats_update_begin(&r_vec->rx_sync);
0779 r_vec->rx_drops++;
0780
0781
0782
0783 if (skb && rxbuf)
0784 r_vec->rx_replace_buf_alloc_fail++;
0785 u64_stats_update_end(&r_vec->rx_sync);
0786
0787
0788
0789
0790 if (skb && rxbuf && skb->head == rxbuf->frag)
0791 page_ref_inc(virt_to_head_page(rxbuf->frag));
0792 if (rxbuf)
0793 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
0794 if (skb)
0795 dev_kfree_skb_any(skb);
0796 }
0797
0798 static bool nfp_nfdk_xdp_complete(struct nfp_net_tx_ring *tx_ring)
0799 {
0800 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
0801 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
0802 struct nfp_net_rx_ring *rx_ring;
0803 u32 qcp_rd_p, done = 0;
0804 bool done_all;
0805 int todo;
0806
0807
0808 qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
0809 if (qcp_rd_p == tx_ring->qcp_rd_p)
0810 return true;
0811
0812 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
0813
0814 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
0815 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
0816
0817 rx_ring = r_vec->rx_ring;
0818 while (todo > 0) {
0819 int idx = D_IDX(tx_ring, tx_ring->rd_p + done);
0820 struct nfp_nfdk_tx_buf *txbuf;
0821 unsigned int step = 1;
0822
0823 txbuf = &tx_ring->ktxbufs[idx];
0824 if (!txbuf->raw)
0825 goto next;
0826
0827 if (NFDK_TX_BUF_INFO(txbuf->val) != NFDK_TX_BUF_INFO_SOP) {
0828 WARN_ONCE(1, "Unexpected TX buffer in XDP TX ring\n");
0829 goto next;
0830 }
0831
0832
0833
0834
0835 nfp_nfdk_rx_give_one(dp, rx_ring,
0836 (void *)NFDK_TX_BUF_PTR(txbuf[0].val),
0837 txbuf[1].dma_addr);
0838 txbuf[0].raw = 0;
0839 txbuf[1].raw = 0;
0840 step = 2;
0841
0842 u64_stats_update_begin(&r_vec->tx_sync);
0843
0844 r_vec->tx_pkts++;
0845 u64_stats_update_end(&r_vec->tx_sync);
0846 next:
0847 todo -= step;
0848 done += step;
0849 }
0850
0851 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + done);
0852 tx_ring->rd_p += done;
0853
0854 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
0855 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
0856 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
0857
0858 return done_all;
0859 }
0860
0861 static bool
0862 nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
0863 struct nfp_net_tx_ring *tx_ring,
0864 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
0865 unsigned int pkt_len, bool *completed)
0866 {
0867 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
0868 unsigned int dma_len, type, cnt, dlen_type, tmp_dlen;
0869 struct nfp_nfdk_tx_buf *txbuf;
0870 struct nfp_nfdk_tx_desc *txd;
0871 unsigned int n_descs;
0872 dma_addr_t dma_addr;
0873 int wr_idx;
0874
0875
0876 if (pkt_len + dma_off > dma_map_sz)
0877 return false;
0878
0879
0880
0881
0882
0883 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
0884 if (!*completed) {
0885 nfp_nfdk_xdp_complete(tx_ring);
0886 *completed = true;
0887 }
0888
0889 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
0890 nfp_nfdk_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
0891 NULL);
0892 return false;
0893 }
0894 }
0895
0896
0897 n_descs = nfp_nfdk_headlen_to_segs(pkt_len);
0898 if ((round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
0899 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) ||
0900 ((u32)tx_ring->data_pending + pkt_len >
0901 NFDK_TX_MAX_DATA_PER_BLOCK)) {
0902 unsigned int nop_slots = D_BLOCK_CPL(tx_ring->wr_p);
0903
0904 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
0905 txd = &tx_ring->ktxds[wr_idx];
0906 memset(txd, 0,
0907 array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
0908
0909 tx_ring->data_pending = 0;
0910 tx_ring->wr_p += nop_slots;
0911 tx_ring->wr_ptr_add += nop_slots;
0912 }
0913
0914 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
0915
0916 txbuf = &tx_ring->ktxbufs[wr_idx];
0917
0918 txbuf[0].val = (unsigned long)rxbuf->frag | NFDK_TX_BUF_INFO_SOP;
0919 txbuf[1].dma_addr = rxbuf->dma_addr;
0920
0921
0922 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
0923 pkt_len, DMA_BIDIRECTIONAL);
0924
0925
0926 txd = &tx_ring->ktxds[wr_idx];
0927 dma_len = pkt_len;
0928 dma_addr = rxbuf->dma_addr + dma_off;
0929
0930 if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
0931 type = NFDK_DESC_TX_TYPE_SIMPLE;
0932 else
0933 type = NFDK_DESC_TX_TYPE_GATHER;
0934
0935
0936 dma_len -= 1;
0937 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
0938 dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
0939 NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
0940 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
0941
0942 txd->dma_len_type = cpu_to_le16(dlen_type);
0943 nfp_desc_set_dma_addr_48b(txd, dma_addr);
0944
0945 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
0946 dma_len -= tmp_dlen;
0947 dma_addr += tmp_dlen + 1;
0948 txd++;
0949
0950 while (dma_len > 0) {
0951 dma_len -= 1;
0952 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
0953 txd->dma_len_type = cpu_to_le16(dlen_type);
0954 nfp_desc_set_dma_addr_48b(txd, dma_addr);
0955
0956 dlen_type &= NFDK_DESC_TX_DMA_LEN;
0957 dma_len -= dlen_type;
0958 dma_addr += dlen_type + 1;
0959 txd++;
0960 }
0961
0962 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
0963
0964
0965 txd->raw = 0;
0966 txd++;
0967
0968 cnt = txd - tx_ring->ktxds - wr_idx;
0969 tx_ring->wr_p += cnt;
0970 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
0971 tx_ring->data_pending += pkt_len;
0972 else
0973 tx_ring->data_pending = 0;
0974
0975 tx_ring->wr_ptr_add += cnt;
0976 return true;
0977 }
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990 static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
0991 {
0992 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
0993 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
0994 struct nfp_net_tx_ring *tx_ring;
0995 struct bpf_prog *xdp_prog;
0996 bool xdp_tx_cmpl = false;
0997 unsigned int true_bufsz;
0998 struct sk_buff *skb;
0999 int pkts_polled = 0;
1000 struct xdp_buff xdp;
1001 int idx;
1002
1003 xdp_prog = READ_ONCE(dp->xdp_prog);
1004 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1005 xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
1006 &rx_ring->xdp_rxq);
1007 tx_ring = r_vec->xdp_ring;
1008
1009 while (pkts_polled < budget) {
1010 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1011 struct nfp_net_rx_buf *rxbuf;
1012 struct nfp_net_rx_desc *rxd;
1013 struct nfp_meta_parsed meta;
1014 bool redir_egress = false;
1015 struct net_device *netdev;
1016 dma_addr_t new_dma_addr;
1017 u32 meta_len_xdp = 0;
1018 void *new_frag;
1019
1020 idx = D_IDX(rx_ring, rx_ring->rd_p);
1021
1022 rxd = &rx_ring->rxds[idx];
1023 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1024 break;
1025
1026
1027
1028
1029 dma_rmb();
1030
1031 memset(&meta, 0, sizeof(meta));
1032
1033 rx_ring->rd_p++;
1034 pkts_polled++;
1035
1036 rxbuf = &rx_ring->rxbufs[idx];
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1050 data_len = le16_to_cpu(rxd->rxd.data_len);
1051 pkt_len = data_len - meta_len;
1052
1053 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1054 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1055 pkt_off += meta_len;
1056 else
1057 pkt_off += dp->rx_offset;
1058 meta_off = pkt_off - meta_len;
1059
1060
1061 u64_stats_update_begin(&r_vec->rx_sync);
1062 r_vec->rx_pkts++;
1063 r_vec->rx_bytes += pkt_len;
1064 u64_stats_update_end(&r_vec->rx_sync);
1065
1066 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1067 (dp->rx_offset && meta_len > dp->rx_offset))) {
1068 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1069 meta_len);
1070 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1071 continue;
1072 }
1073
1074 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1075 data_len);
1076
1077 if (meta_len) {
1078 if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta,
1079 rxbuf->frag + meta_off,
1080 rxbuf->frag + pkt_off,
1081 pkt_len, meta_len))) {
1082 nn_dp_warn(dp, "invalid RX packet metadata\n");
1083 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
1084 NULL);
1085 continue;
1086 }
1087 }
1088
1089 if (xdp_prog && !meta.portid) {
1090 void *orig_data = rxbuf->frag + pkt_off;
1091 unsigned int dma_off;
1092 int act;
1093
1094 xdp_prepare_buff(&xdp,
1095 rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
1096 pkt_off - NFP_NET_RX_BUF_HEADROOM,
1097 pkt_len, true);
1098
1099 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1100
1101 pkt_len = xdp.data_end - xdp.data;
1102 pkt_off += xdp.data - orig_data;
1103
1104 switch (act) {
1105 case XDP_PASS:
1106 meta_len_xdp = xdp.data - xdp.data_meta;
1107 break;
1108 case XDP_TX:
1109 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1110 if (unlikely(!nfp_nfdk_tx_xdp_buf(dp, rx_ring,
1111 tx_ring,
1112 rxbuf,
1113 dma_off,
1114 pkt_len,
1115 &xdp_tx_cmpl)))
1116 trace_xdp_exception(dp->netdev,
1117 xdp_prog, act);
1118 continue;
1119 default:
1120 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
1121 fallthrough;
1122 case XDP_ABORTED:
1123 trace_xdp_exception(dp->netdev, xdp_prog, act);
1124 fallthrough;
1125 case XDP_DROP:
1126 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
1127 rxbuf->dma_addr);
1128 continue;
1129 }
1130 }
1131
1132 if (likely(!meta.portid)) {
1133 netdev = dp->netdev;
1134 } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
1135 struct nfp_net *nn = netdev_priv(dp->netdev);
1136
1137 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
1138 pkt_len);
1139 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
1140 rxbuf->dma_addr);
1141 continue;
1142 } else {
1143 struct nfp_net *nn;
1144
1145 nn = netdev_priv(dp->netdev);
1146 netdev = nfp_app_dev_get(nn->app, meta.portid,
1147 &redir_egress);
1148 if (unlikely(!netdev)) {
1149 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
1150 NULL);
1151 continue;
1152 }
1153
1154 if (nfp_netdev_is_nfp_repr(netdev))
1155 nfp_repr_inc_rx_stats(netdev, pkt_len);
1156 }
1157
1158 skb = build_skb(rxbuf->frag, true_bufsz);
1159 if (unlikely(!skb)) {
1160 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1161 continue;
1162 }
1163 new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
1164 if (unlikely(!new_frag)) {
1165 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1166 continue;
1167 }
1168
1169 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1170
1171 nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1172
1173 skb_reserve(skb, pkt_off);
1174 skb_put(skb, pkt_len);
1175
1176 skb->mark = meta.mark;
1177 skb_set_hash(skb, meta.hash, meta.hash_type);
1178
1179 skb_record_rx_queue(skb, rx_ring->idx);
1180 skb->protocol = eth_type_trans(skb, netdev);
1181
1182 nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
1183
1184 if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
1185 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
1186 continue;
1187 }
1188
1189 if (meta_len_xdp)
1190 skb_metadata_set(skb, meta_len_xdp);
1191
1192 if (likely(!redir_egress)) {
1193 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1194 } else {
1195 skb->dev = netdev;
1196 skb_reset_network_header(skb);
1197 __skb_push(skb, ETH_HLEN);
1198 dev_queue_xmit(skb);
1199 }
1200 }
1201
1202 if (xdp_prog) {
1203 if (tx_ring->wr_ptr_add)
1204 nfp_net_tx_xmit_more_flush(tx_ring);
1205 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1206 !xdp_tx_cmpl)
1207 if (!nfp_nfdk_xdp_complete(tx_ring))
1208 pkts_polled = budget;
1209 }
1210
1211 return pkts_polled;
1212 }
1213
1214
1215
1216
1217
1218
1219
1220
1221 int nfp_nfdk_poll(struct napi_struct *napi, int budget)
1222 {
1223 struct nfp_net_r_vector *r_vec =
1224 container_of(napi, struct nfp_net_r_vector, napi);
1225 unsigned int pkts_polled = 0;
1226
1227 if (r_vec->tx_ring)
1228 nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
1229 if (r_vec->rx_ring)
1230 pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
1231
1232 if (pkts_polled < budget)
1233 if (napi_complete_done(napi, pkts_polled))
1234 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1235
1236 if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
1237 struct dim_sample dim_sample = {};
1238 unsigned int start;
1239 u64 pkts, bytes;
1240
1241 do {
1242 start = u64_stats_fetch_begin(&r_vec->rx_sync);
1243 pkts = r_vec->rx_pkts;
1244 bytes = r_vec->rx_bytes;
1245 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
1246
1247 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1248 net_dim(&r_vec->rx_dim, dim_sample);
1249 }
1250
1251 if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
1252 struct dim_sample dim_sample = {};
1253 unsigned int start;
1254 u64 pkts, bytes;
1255
1256 do {
1257 start = u64_stats_fetch_begin(&r_vec->tx_sync);
1258 pkts = r_vec->tx_pkts;
1259 bytes = r_vec->tx_bytes;
1260 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
1261
1262 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1263 net_dim(&r_vec->tx_dim, dim_sample);
1264 }
1265
1266 return pkts_polled;
1267 }
1268
1269
1270
1271
1272 bool
1273 nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1274 struct sk_buff *skb, bool old)
1275 {
1276 u32 cnt, tmp_dlen, dlen_type = 0;
1277 struct nfp_net_tx_ring *tx_ring;
1278 struct nfp_nfdk_tx_buf *txbuf;
1279 struct nfp_nfdk_tx_desc *txd;
1280 unsigned int dma_len, type;
1281 struct nfp_net_dp *dp;
1282 dma_addr_t dma_addr;
1283 u64 metadata = 0;
1284 int wr_idx;
1285
1286 dp = &r_vec->nfp_net->dp;
1287 tx_ring = r_vec->tx_ring;
1288
1289 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1290 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1291 goto err_free;
1292 }
1293
1294
1295 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
1296 u64_stats_update_begin(&r_vec->tx_sync);
1297 r_vec->tx_busy++;
1298 u64_stats_update_end(&r_vec->tx_sync);
1299 if (!old)
1300 __skb_queue_tail(&r_vec->queue, skb);
1301 else
1302 __skb_queue_head(&r_vec->queue, skb);
1303 return NETDEV_TX_BUSY;
1304 }
1305
1306 if (nfp_app_ctrl_has_meta(nn->app)) {
1307 if (unlikely(skb_headroom(skb) < 8)) {
1308 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1309 goto err_free;
1310 }
1311 metadata = NFDK_DESC_TX_CHAIN_META;
1312 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1313 put_unaligned_be32(FIELD_PREP(NFDK_META_LEN, 8) |
1314 FIELD_PREP(NFDK_META_FIELDS,
1315 NFP_NET_META_PORTID),
1316 skb_push(skb, 4));
1317 }
1318
1319 if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
1320 goto err_free;
1321
1322
1323 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1324 txd = &tx_ring->ktxds[wr_idx];
1325 txbuf = &tx_ring->ktxbufs[wr_idx];
1326
1327 dma_len = skb_headlen(skb);
1328 if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
1329 type = NFDK_DESC_TX_TYPE_SIMPLE;
1330 else
1331 type = NFDK_DESC_TX_TYPE_GATHER;
1332
1333 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
1334 if (dma_mapping_error(dp->dev, dma_addr))
1335 goto err_warn_dma;
1336
1337 txbuf->skb = skb;
1338 txbuf++;
1339
1340 txbuf->dma_addr = dma_addr;
1341 txbuf++;
1342
1343 dma_len -= 1;
1344 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
1345 dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
1346 NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
1347 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
1348
1349 txd->dma_len_type = cpu_to_le16(dlen_type);
1350 nfp_desc_set_dma_addr_48b(txd, dma_addr);
1351
1352 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
1353 dma_len -= tmp_dlen;
1354 dma_addr += tmp_dlen + 1;
1355 txd++;
1356
1357 while (dma_len > 0) {
1358 dma_len -= 1;
1359 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
1360 txd->dma_len_type = cpu_to_le16(dlen_type);
1361 nfp_desc_set_dma_addr_48b(txd, dma_addr);
1362
1363 dlen_type &= NFDK_DESC_TX_DMA_LEN;
1364 dma_len -= dlen_type;
1365 dma_addr += dlen_type + 1;
1366 txd++;
1367 }
1368
1369 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
1370
1371
1372 txd->raw = cpu_to_le64(metadata);
1373 txd++;
1374
1375 cnt = txd - tx_ring->ktxds - wr_idx;
1376 if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
1377 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
1378 goto err_warn_overflow;
1379
1380 tx_ring->wr_p += cnt;
1381 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
1382 tx_ring->data_pending += skb->len;
1383 else
1384 tx_ring->data_pending = 0;
1385
1386 tx_ring->wr_ptr_add += cnt;
1387 nfp_net_tx_xmit_more_flush(tx_ring);
1388
1389 return NETDEV_TX_OK;
1390
1391 err_warn_overflow:
1392 WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
1393 wr_idx, skb_headlen(skb), 0, cnt);
1394 txbuf--;
1395 dma_unmap_single(dp->dev, txbuf->dma_addr,
1396 skb_headlen(skb), DMA_TO_DEVICE);
1397 txbuf->raw = 0;
1398 err_warn_dma:
1399 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
1400 err_free:
1401 u64_stats_update_begin(&r_vec->tx_sync);
1402 r_vec->tx_errors++;
1403 u64_stats_update_end(&r_vec->tx_sync);
1404 dev_kfree_skb_any(skb);
1405 return NETDEV_TX_OK;
1406 }
1407
1408 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1409 {
1410 struct sk_buff *skb;
1411
1412 while ((skb = __skb_dequeue(&r_vec->queue)))
1413 if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1414 return;
1415 }
1416
1417 static bool
1418 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1419 {
1420 u32 meta_type, meta_tag;
1421
1422 if (!nfp_app_ctrl_has_meta(nn->app))
1423 return !meta_len;
1424
1425 if (meta_len != 8)
1426 return false;
1427
1428 meta_type = get_unaligned_be32(data);
1429 meta_tag = get_unaligned_be32(data + 4);
1430
1431 return (meta_type == NFP_NET_META_PORTID &&
1432 meta_tag == NFP_META_PORT_ID_CTRL);
1433 }
1434
1435 static bool
1436 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
1437 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
1438 {
1439 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1440 struct nfp_net_rx_buf *rxbuf;
1441 struct nfp_net_rx_desc *rxd;
1442 dma_addr_t new_dma_addr;
1443 struct sk_buff *skb;
1444 void *new_frag;
1445 int idx;
1446
1447 idx = D_IDX(rx_ring, rx_ring->rd_p);
1448
1449 rxd = &rx_ring->rxds[idx];
1450 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1451 return false;
1452
1453
1454
1455
1456 dma_rmb();
1457
1458 rx_ring->rd_p++;
1459
1460 rxbuf = &rx_ring->rxbufs[idx];
1461 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1462 data_len = le16_to_cpu(rxd->rxd.data_len);
1463 pkt_len = data_len - meta_len;
1464
1465 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1466 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1467 pkt_off += meta_len;
1468 else
1469 pkt_off += dp->rx_offset;
1470 meta_off = pkt_off - meta_len;
1471
1472
1473 u64_stats_update_begin(&r_vec->rx_sync);
1474 r_vec->rx_pkts++;
1475 r_vec->rx_bytes += pkt_len;
1476 u64_stats_update_end(&r_vec->rx_sync);
1477
1478 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
1479
1480 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
1481 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
1482 meta_len);
1483 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1484 return true;
1485 }
1486
1487 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
1488 if (unlikely(!skb)) {
1489 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1490 return true;
1491 }
1492 new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
1493 if (unlikely(!new_frag)) {
1494 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1495 return true;
1496 }
1497
1498 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1499
1500 nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1501
1502 skb_reserve(skb, pkt_off);
1503 skb_put(skb, pkt_len);
1504
1505 nfp_app_ctrl_rx(nn->app, skb);
1506
1507 return true;
1508 }
1509
1510 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
1511 {
1512 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
1513 struct nfp_net *nn = r_vec->nfp_net;
1514 struct nfp_net_dp *dp = &nn->dp;
1515 unsigned int budget = 512;
1516
1517 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
1518 continue;
1519
1520 return budget;
1521 }
1522
1523 void nfp_nfdk_ctrl_poll(struct tasklet_struct *t)
1524 {
1525 struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
1526
1527 spin_lock(&r_vec->lock);
1528 nfp_nfdk_tx_complete(r_vec->tx_ring, 0);
1529 __nfp_ctrl_tx_queued(r_vec);
1530 spin_unlock(&r_vec->lock);
1531
1532 if (nfp_ctrl_rx(r_vec)) {
1533 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1534 } else {
1535 tasklet_schedule(&r_vec->tasklet);
1536 nn_dp_warn(&r_vec->nfp_net->dp,
1537 "control message budget exceeded!\n");
1538 }
1539 }