0001
0002
0003
0004 #include <linux/bpf_trace.h>
0005 #include <net/xdp_sock_drv.h>
0006 #include <net/xdp.h>
0007 #include "ice.h"
0008 #include "ice_base.h"
0009 #include "ice_type.h"
0010 #include "ice_xsk.h"
0011 #include "ice_txrx.h"
0012 #include "ice_txrx_lib.h"
0013 #include "ice_lib.h"
0014
0015 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
0016 {
0017 return &rx_ring->xdp_buf[idx];
0018 }
0019
0020
0021
0022
0023
0024
0025 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
0026 {
0027 memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
0028 sizeof(vsi->rx_rings[q_idx]->rx_stats));
0029 memset(&vsi->tx_rings[q_idx]->stats, 0,
0030 sizeof(vsi->tx_rings[q_idx]->stats));
0031 if (ice_is_xdp_ena_vsi(vsi))
0032 memset(&vsi->xdp_rings[q_idx]->stats, 0,
0033 sizeof(vsi->xdp_rings[q_idx]->stats));
0034 }
0035
0036
0037
0038
0039
0040
0041 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
0042 {
0043 ice_clean_tx_ring(vsi->tx_rings[q_idx]);
0044 if (ice_is_xdp_ena_vsi(vsi)) {
0045 synchronize_rcu();
0046 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
0047 }
0048 ice_clean_rx_ring(vsi->rx_rings[q_idx]);
0049 }
0050
0051
0052
0053
0054
0055
0056
0057 static void
0058 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
0059 bool enable)
0060 {
0061 if (!vsi->netdev || !q_vector)
0062 return;
0063
0064 if (enable)
0065 napi_enable(&q_vector->napi);
0066 else
0067 napi_disable(&q_vector->napi);
0068 }
0069
0070
0071
0072
0073
0074
0075
0076 static void
0077 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
0078 struct ice_q_vector *q_vector)
0079 {
0080 struct ice_pf *pf = vsi->back;
0081 struct ice_hw *hw = &pf->hw;
0082 int base = vsi->base_vector;
0083 u16 reg;
0084 u32 val;
0085
0086
0087
0088
0089 reg = rx_ring->reg_idx;
0090 val = rd32(hw, QINT_RQCTL(reg));
0091 val &= ~QINT_RQCTL_CAUSE_ENA_M;
0092 wr32(hw, QINT_RQCTL(reg), val);
0093
0094 if (q_vector) {
0095 u16 v_idx = q_vector->v_idx;
0096
0097 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
0098 ice_flush(hw);
0099 synchronize_irq(pf->msix_entries[v_idx + base].vector);
0100 }
0101 }
0102
0103
0104
0105
0106
0107
0108 static void
0109 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
0110 {
0111 u16 reg_idx = q_vector->reg_idx;
0112 struct ice_pf *pf = vsi->back;
0113 struct ice_hw *hw = &pf->hw;
0114 struct ice_tx_ring *tx_ring;
0115 struct ice_rx_ring *rx_ring;
0116
0117 ice_cfg_itr(hw, q_vector);
0118
0119 ice_for_each_tx_ring(tx_ring, q_vector->tx)
0120 ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
0121 q_vector->tx.itr_idx);
0122
0123 ice_for_each_rx_ring(rx_ring, q_vector->rx)
0124 ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
0125 q_vector->rx.itr_idx);
0126
0127 ice_flush(hw);
0128 }
0129
0130
0131
0132
0133
0134
0135 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
0136 {
0137 struct ice_pf *pf = vsi->back;
0138 struct ice_hw *hw = &pf->hw;
0139
0140 ice_irq_dynamic_ena(hw, vsi, q_vector);
0141
0142 ice_flush(hw);
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
0153 {
0154 struct ice_txq_meta txq_meta = { };
0155 struct ice_q_vector *q_vector;
0156 struct ice_tx_ring *tx_ring;
0157 struct ice_rx_ring *rx_ring;
0158 int timeout = 50;
0159 int err;
0160
0161 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
0162 return -EINVAL;
0163
0164 tx_ring = vsi->tx_rings[q_idx];
0165 rx_ring = vsi->rx_rings[q_idx];
0166 q_vector = rx_ring->q_vector;
0167
0168 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
0169 timeout--;
0170 if (!timeout)
0171 return -EBUSY;
0172 usleep_range(1000, 2000);
0173 }
0174 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
0175
0176 ice_qvec_dis_irq(vsi, rx_ring, q_vector);
0177
0178 ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
0179 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
0180 if (err)
0181 return err;
0182 if (ice_is_xdp_ena_vsi(vsi)) {
0183 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
0184
0185 memset(&txq_meta, 0, sizeof(txq_meta));
0186 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
0187 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
0188 &txq_meta);
0189 if (err)
0190 return err;
0191 }
0192 err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
0193 if (err)
0194 return err;
0195 ice_clean_rx_ring(rx_ring);
0196
0197 ice_qvec_toggle_napi(vsi, q_vector, false);
0198 ice_qp_clean_rings(vsi, q_idx);
0199 ice_qp_reset_stats(vsi, q_idx);
0200
0201 return 0;
0202 }
0203
0204
0205
0206
0207
0208
0209
0210
0211 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
0212 {
0213 struct ice_aqc_add_tx_qgrp *qg_buf;
0214 struct ice_q_vector *q_vector;
0215 struct ice_tx_ring *tx_ring;
0216 struct ice_rx_ring *rx_ring;
0217 u16 size;
0218 int err;
0219
0220 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
0221 return -EINVAL;
0222
0223 size = struct_size(qg_buf, txqs, 1);
0224 qg_buf = kzalloc(size, GFP_KERNEL);
0225 if (!qg_buf)
0226 return -ENOMEM;
0227
0228 qg_buf->num_txqs = 1;
0229
0230 tx_ring = vsi->tx_rings[q_idx];
0231 rx_ring = vsi->rx_rings[q_idx];
0232 q_vector = rx_ring->q_vector;
0233
0234 err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
0235 if (err)
0236 goto free_buf;
0237
0238 if (ice_is_xdp_ena_vsi(vsi)) {
0239 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
0240
0241 memset(qg_buf, 0, size);
0242 qg_buf->num_txqs = 1;
0243 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
0244 if (err)
0245 goto free_buf;
0246 ice_set_ring_xdp(xdp_ring);
0247 ice_tx_xsk_pool(vsi, q_idx);
0248 }
0249
0250 err = ice_vsi_cfg_rxq(rx_ring);
0251 if (err)
0252 goto free_buf;
0253
0254 ice_qvec_cfg_msix(vsi, q_vector);
0255
0256 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
0257 if (err)
0258 goto free_buf;
0259
0260 clear_bit(ICE_CFG_BUSY, vsi->state);
0261 ice_qvec_toggle_napi(vsi, q_vector, true);
0262 ice_qvec_ena_irq(vsi, q_vector);
0263
0264 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
0265 free_buf:
0266 kfree(qg_buf);
0267 return err;
0268 }
0269
0270
0271
0272
0273
0274
0275
0276
0277 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
0278 {
0279 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
0280
0281 if (!pool)
0282 return -EINVAL;
0283
0284 clear_bit(qid, vsi->af_xdp_zc_qps);
0285 xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
0286
0287 return 0;
0288 }
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 static int
0299 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
0300 {
0301 int err;
0302
0303 if (vsi->type != ICE_VSI_PF)
0304 return -EINVAL;
0305
0306 if (qid >= vsi->netdev->real_num_rx_queues ||
0307 qid >= vsi->netdev->real_num_tx_queues)
0308 return -EINVAL;
0309
0310 err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
0311 ICE_RX_DMA_ATTR);
0312 if (err)
0313 return err;
0314
0315 set_bit(qid, vsi->af_xdp_zc_qps);
0316
0317 return 0;
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329 static int
0330 ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
0331 {
0332 size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
0333 sizeof(*rx_ring->rx_buf);
0334 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
0335
0336 if (!sw_ring)
0337 return -ENOMEM;
0338
0339 if (pool_present) {
0340 kfree(rx_ring->rx_buf);
0341 rx_ring->rx_buf = NULL;
0342 rx_ring->xdp_buf = sw_ring;
0343 } else {
0344 kfree(rx_ring->xdp_buf);
0345 rx_ring->xdp_buf = NULL;
0346 rx_ring->rx_buf = sw_ring;
0347 }
0348
0349 return 0;
0350 }
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
0362 {
0363 struct ice_rx_ring *rx_ring;
0364 unsigned long q;
0365
0366 for_each_set_bit(q, vsi->af_xdp_zc_qps,
0367 max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
0368 rx_ring = vsi->rx_rings[q];
0369 if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
0370 return -ENOMEM;
0371 }
0372
0373 return 0;
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
0385 {
0386 bool if_running, pool_present = !!pool;
0387 int ret = 0, pool_failure = 0;
0388
0389 if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
0390 netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
0391 pool_failure = -EINVAL;
0392 goto failure;
0393 }
0394
0395 if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
0396
0397 if (if_running) {
0398 struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
0399
0400 ret = ice_qp_dis(vsi, qid);
0401 if (ret) {
0402 netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
0403 goto xsk_pool_if_up;
0404 }
0405
0406 ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
0407 if (ret)
0408 goto xsk_pool_if_up;
0409 }
0410
0411 pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
0412 ice_xsk_pool_disable(vsi, qid);
0413
0414 xsk_pool_if_up:
0415 if (if_running) {
0416 ret = ice_qp_ena(vsi, qid);
0417 if (!ret && pool_present)
0418 napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
0419 else if (ret)
0420 netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
0421 }
0422
0423 failure:
0424 if (pool_failure) {
0425 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
0426 pool_present ? "en" : "dis", pool_failure);
0427 return pool_failure;
0428 }
0429
0430 return ret;
0431 }
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
0448 union ice_32b_rx_flex_desc *rx_desc, u16 count)
0449 {
0450 dma_addr_t dma;
0451 u16 buffs;
0452 int i;
0453
0454 buffs = xsk_buff_alloc_batch(pool, xdp, count);
0455 for (i = 0; i < buffs; i++) {
0456 dma = xsk_buff_xdp_get_dma(*xdp);
0457 rx_desc->read.pkt_addr = cpu_to_le64(dma);
0458 rx_desc->wb.status_error0 = 0;
0459
0460 rx_desc++;
0461 xdp++;
0462 }
0463
0464 return buffs;
0465 }
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
0479 {
0480 u32 nb_buffs_extra = 0, nb_buffs = 0;
0481 union ice_32b_rx_flex_desc *rx_desc;
0482 u16 ntu = rx_ring->next_to_use;
0483 u16 total_count = count;
0484 struct xdp_buff **xdp;
0485
0486 rx_desc = ICE_RX_DESC(rx_ring, ntu);
0487 xdp = ice_xdp_buf(rx_ring, ntu);
0488
0489 if (ntu + count >= rx_ring->count) {
0490 nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
0491 rx_desc,
0492 rx_ring->count - ntu);
0493 if (nb_buffs_extra != rx_ring->count - ntu) {
0494 ntu += nb_buffs_extra;
0495 goto exit;
0496 }
0497 rx_desc = ICE_RX_DESC(rx_ring, 0);
0498 xdp = ice_xdp_buf(rx_ring, 0);
0499 ntu = 0;
0500 count -= nb_buffs_extra;
0501 ice_release_rx_desc(rx_ring, 0);
0502 }
0503
0504 nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
0505
0506 ntu += nb_buffs;
0507 if (ntu == rx_ring->count)
0508 ntu = 0;
0509
0510 exit:
0511 if (rx_ring->next_to_use != ntu)
0512 ice_release_rx_desc(rx_ring, ntu);
0513
0514 return total_count == (nb_buffs_extra + nb_buffs);
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
0528 {
0529 u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
0530 u16 leftover, i, tail_bumps;
0531
0532 tail_bumps = count / rx_thresh;
0533 leftover = count - (tail_bumps * rx_thresh);
0534
0535 for (i = 0; i < tail_bumps; i++)
0536 if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
0537 return false;
0538 return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
0539 }
0540
0541
0542
0543
0544
0545 static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
0546 {
0547 int ntc = rx_ring->next_to_clean + 1;
0548
0549 ntc = (ntc < rx_ring->count) ? ntc : 0;
0550 rx_ring->next_to_clean = ntc;
0551 prefetch(ICE_RX_DESC(rx_ring, ntc));
0552 }
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563 static struct sk_buff *
0564 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
0565 {
0566 unsigned int totalsize = xdp->data_end - xdp->data_meta;
0567 unsigned int metasize = xdp->data - xdp->data_meta;
0568 struct sk_buff *skb;
0569
0570 net_prefetch(xdp->data_meta);
0571
0572 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
0573 GFP_ATOMIC | __GFP_NOWARN);
0574 if (unlikely(!skb))
0575 return NULL;
0576
0577 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
0578 ALIGN(totalsize, sizeof(long)));
0579
0580 if (metasize) {
0581 skb_metadata_set(skb, metasize);
0582 __skb_pull(skb, metasize);
0583 }
0584
0585 xsk_buff_free(xdp);
0586 return skb;
0587 }
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 static int
0599 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
0600 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
0601 {
0602 int err, result = ICE_XDP_PASS;
0603 u32 act;
0604
0605 act = bpf_prog_run_xdp(xdp_prog, xdp);
0606
0607 if (likely(act == XDP_REDIRECT)) {
0608 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
0609 if (!err)
0610 return ICE_XDP_REDIR;
0611 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
0612 result = ICE_XDP_EXIT;
0613 else
0614 result = ICE_XDP_CONSUMED;
0615 goto out_failure;
0616 }
0617
0618 switch (act) {
0619 case XDP_PASS:
0620 break;
0621 case XDP_TX:
0622 result = ice_xmit_xdp_buff(xdp, xdp_ring);
0623 if (result == ICE_XDP_CONSUMED)
0624 goto out_failure;
0625 break;
0626 case XDP_DROP:
0627 result = ICE_XDP_CONSUMED;
0628 break;
0629 default:
0630 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
0631 fallthrough;
0632 case XDP_ABORTED:
0633 result = ICE_XDP_CONSUMED;
0634 out_failure:
0635 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
0636 break;
0637 }
0638
0639 return result;
0640 }
0641
0642
0643
0644
0645
0646
0647
0648
0649 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
0650 {
0651 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
0652 struct ice_tx_ring *xdp_ring;
0653 unsigned int xdp_xmit = 0;
0654 struct bpf_prog *xdp_prog;
0655 bool failure = false;
0656 int entries_to_alloc;
0657
0658
0659
0660
0661 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
0662 xdp_ring = rx_ring->xdp_ring;
0663
0664 while (likely(total_rx_packets < (unsigned int)budget)) {
0665 union ice_32b_rx_flex_desc *rx_desc;
0666 unsigned int size, xdp_res = 0;
0667 struct xdp_buff *xdp;
0668 struct sk_buff *skb;
0669 u16 stat_err_bits;
0670 u16 vlan_tag = 0;
0671 u16 rx_ptype;
0672
0673 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
0674
0675 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
0676 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
0677 break;
0678
0679
0680
0681
0682
0683 dma_rmb();
0684
0685 if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
0686 break;
0687
0688 xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
0689
0690 size = le16_to_cpu(rx_desc->wb.pkt_len) &
0691 ICE_RX_FLX_DESC_PKT_LEN_M;
0692 if (!size) {
0693 xdp->data = NULL;
0694 xdp->data_end = NULL;
0695 xdp->data_hard_start = NULL;
0696 xdp->data_meta = NULL;
0697 goto construct_skb;
0698 }
0699
0700 xsk_buff_set_size(xdp, size);
0701 xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
0702
0703 xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
0704 if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
0705 xdp_xmit |= xdp_res;
0706 } else if (xdp_res == ICE_XDP_EXIT) {
0707 failure = true;
0708 break;
0709 } else if (xdp_res == ICE_XDP_CONSUMED) {
0710 xsk_buff_free(xdp);
0711 } else if (xdp_res == ICE_XDP_PASS) {
0712 goto construct_skb;
0713 }
0714
0715 total_rx_bytes += size;
0716 total_rx_packets++;
0717
0718 ice_bump_ntc(rx_ring);
0719 continue;
0720
0721 construct_skb:
0722
0723 skb = ice_construct_skb_zc(rx_ring, xdp);
0724 if (!skb) {
0725 rx_ring->rx_stats.alloc_buf_failed++;
0726 break;
0727 }
0728
0729 ice_bump_ntc(rx_ring);
0730
0731 if (eth_skb_pad(skb)) {
0732 skb = NULL;
0733 continue;
0734 }
0735
0736 total_rx_bytes += skb->len;
0737 total_rx_packets++;
0738
0739 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
0740
0741 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
0742 ICE_RX_FLEX_DESC_PTYPE_M;
0743
0744 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
0745 ice_receive_skb(rx_ring, skb, vlan_tag);
0746 }
0747
0748 entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
0749 if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
0750 failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
0751
0752 ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
0753 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
0754
0755 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
0756 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
0757 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
0758 else
0759 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
0760
0761 return (int)total_rx_packets;
0762 }
0763
0764 return failure ? budget : (int)total_rx_packets;
0765 }
0766
0767
0768
0769
0770
0771
0772 static void
0773 ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
0774 {
0775 xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
0776 xdp_ring->xdp_tx_active--;
0777 dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
0778 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
0779 dma_unmap_len_set(tx_buf, len, 0);
0780 }
0781
0782
0783
0784
0785
0786 static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
0787 {
0788 u16 ntc = xdp_ring->next_to_clean;
0789 struct ice_tx_desc *tx_desc;
0790 u16 cnt = xdp_ring->count;
0791 struct ice_tx_buf *tx_buf;
0792 u16 xsk_frames = 0;
0793 u16 last_rs;
0794 int i;
0795
0796 last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
0797 tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
0798 if ((tx_desc->cmd_type_offset_bsz &
0799 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
0800 if (last_rs >= ntc)
0801 xsk_frames = last_rs - ntc + 1;
0802 else
0803 xsk_frames = last_rs + cnt - ntc + 1;
0804 }
0805
0806 if (!xsk_frames)
0807 return;
0808
0809 if (likely(!xdp_ring->xdp_tx_active))
0810 goto skip;
0811
0812 ntc = xdp_ring->next_to_clean;
0813 for (i = 0; i < xsk_frames; i++) {
0814 tx_buf = &xdp_ring->tx_buf[ntc];
0815
0816 if (tx_buf->raw_buf) {
0817 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
0818 tx_buf->raw_buf = NULL;
0819 } else {
0820 xsk_frames++;
0821 }
0822
0823 ntc++;
0824 if (ntc >= xdp_ring->count)
0825 ntc = 0;
0826 }
0827 skip:
0828 tx_desc->cmd_type_offset_bsz = 0;
0829 xdp_ring->next_to_clean += xsk_frames;
0830 if (xdp_ring->next_to_clean >= cnt)
0831 xdp_ring->next_to_clean -= cnt;
0832 if (xsk_frames)
0833 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
0834 }
0835
0836
0837
0838
0839
0840
0841
0842 static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
0843 unsigned int *total_bytes)
0844 {
0845 struct ice_tx_desc *tx_desc;
0846 dma_addr_t dma;
0847
0848 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
0849 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
0850
0851 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
0852 tx_desc->buf_addr = cpu_to_le64(dma);
0853 tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
0854 0, desc->len, 0);
0855
0856 *total_bytes += desc->len;
0857 }
0858
0859
0860
0861
0862
0863
0864
0865 static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
0866 unsigned int *total_bytes)
0867 {
0868 u16 ntu = xdp_ring->next_to_use;
0869 struct ice_tx_desc *tx_desc;
0870 u32 i;
0871
0872 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
0873 dma_addr_t dma;
0874
0875 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
0876 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
0877
0878 tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
0879 tx_desc->buf_addr = cpu_to_le64(dma);
0880 tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
0881 0, descs[i].len, 0);
0882
0883 *total_bytes += descs[i].len;
0884 }
0885
0886 xdp_ring->next_to_use = ntu;
0887 }
0888
0889
0890
0891
0892
0893
0894
0895
0896 static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
0897 u32 nb_pkts, unsigned int *total_bytes)
0898 {
0899 u32 batched, leftover, i;
0900
0901 batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
0902 leftover = nb_pkts & (PKTS_PER_BATCH - 1);
0903 for (i = 0; i < batched; i += PKTS_PER_BATCH)
0904 ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
0905 for (; i < batched + leftover; i++)
0906 ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
0907 }
0908
0909
0910
0911
0912
0913 static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
0914 {
0915 u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
0916 struct ice_tx_desc *tx_desc;
0917
0918 tx_desc = ICE_TX_DESC(xdp_ring, ntu);
0919 tx_desc->cmd_type_offset_bsz |=
0920 cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
0921 }
0922
0923
0924
0925
0926
0927
0928
0929 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
0930 {
0931 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
0932 u32 nb_pkts, nb_processed = 0;
0933 unsigned int total_bytes = 0;
0934 int budget;
0935
0936 ice_clean_xdp_irq_zc(xdp_ring);
0937
0938 budget = ICE_DESC_UNUSED(xdp_ring);
0939 budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
0940
0941 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
0942 if (!nb_pkts)
0943 return true;
0944
0945 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
0946 nb_processed = xdp_ring->count - xdp_ring->next_to_use;
0947 ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
0948 xdp_ring->next_to_use = 0;
0949 }
0950
0951 ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
0952 &total_bytes);
0953
0954 ice_set_rs_bit(xdp_ring);
0955 ice_xdp_ring_update_tail(xdp_ring);
0956 ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
0957
0958 if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
0959 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
0960
0961 return nb_pkts < budget;
0962 }
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972 int
0973 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
0974 u32 __always_unused flags)
0975 {
0976 struct ice_netdev_priv *np = netdev_priv(netdev);
0977 struct ice_q_vector *q_vector;
0978 struct ice_vsi *vsi = np->vsi;
0979 struct ice_tx_ring *ring;
0980
0981 if (test_bit(ICE_VSI_DOWN, vsi->state))
0982 return -ENETDOWN;
0983
0984 if (!ice_is_xdp_ena_vsi(vsi))
0985 return -EINVAL;
0986
0987 if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
0988 return -EINVAL;
0989
0990 ring = vsi->rx_rings[queue_id]->xdp_ring;
0991
0992 if (!ring->xsk_pool)
0993 return -EINVAL;
0994
0995
0996
0997
0998
0999
1000
1001 q_vector = ring->q_vector;
1002 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
1003 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
1004
1005 return 0;
1006 }
1007
1008
1009
1010
1011
1012
1013
1014 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
1015 {
1016 int i;
1017
1018 ice_for_each_rxq(vsi, i) {
1019 if (xsk_get_pool_from_qid(vsi->netdev, i))
1020 return true;
1021 }
1022
1023 return false;
1024 }
1025
1026
1027
1028
1029
1030 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
1031 {
1032 u16 ntc = rx_ring->next_to_clean;
1033 u16 ntu = rx_ring->next_to_use;
1034
1035 while (ntc != ntu) {
1036 struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
1037
1038 xsk_buff_free(xdp);
1039 ntc++;
1040 if (ntc >= rx_ring->count)
1041 ntc = 0;
1042 }
1043 }
1044
1045
1046
1047
1048
1049 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
1050 {
1051 u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
1052 u32 xsk_frames = 0;
1053
1054 while (ntc != ntu) {
1055 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
1056
1057 if (tx_buf->raw_buf)
1058 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
1059 else
1060 xsk_frames++;
1061
1062 tx_buf->raw_buf = NULL;
1063
1064 ntc++;
1065 if (ntc >= xdp_ring->count)
1066 ntc = 0;
1067 }
1068
1069 if (xsk_frames)
1070 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
1071 }