0001
0002
0003
0004 #include <linux/bpf_trace.h>
0005 #include <net/xdp_sock_drv.h>
0006 #include <net/xdp.h>
0007
0008 #include "ixgbe.h"
0009 #include "ixgbe_txrx_common.h"
0010
0011 struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
0012 struct ixgbe_ring *ring)
0013 {
0014 bool xdp_on = READ_ONCE(adapter->xdp_prog);
0015 int qid = ring->ring_idx;
0016
0017 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
0018 return NULL;
0019
0020 return xsk_get_pool_from_qid(adapter->netdev, qid);
0021 }
0022
0023 static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
0024 struct xsk_buff_pool *pool,
0025 u16 qid)
0026 {
0027 struct net_device *netdev = adapter->netdev;
0028 bool if_running;
0029 int err;
0030
0031 if (qid >= adapter->num_rx_queues)
0032 return -EINVAL;
0033
0034 if (qid >= netdev->real_num_rx_queues ||
0035 qid >= netdev->real_num_tx_queues)
0036 return -EINVAL;
0037
0038 err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
0039 if (err)
0040 return err;
0041
0042 if_running = netif_running(adapter->netdev) &&
0043 ixgbe_enabled_xdp_adapter(adapter);
0044
0045 if (if_running)
0046 ixgbe_txrx_ring_disable(adapter, qid);
0047
0048 set_bit(qid, adapter->af_xdp_zc_qps);
0049
0050 if (if_running) {
0051 ixgbe_txrx_ring_enable(adapter, qid);
0052
0053
0054 err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
0055 if (err) {
0056 clear_bit(qid, adapter->af_xdp_zc_qps);
0057 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
0058 return err;
0059 }
0060 }
0061
0062 return 0;
0063 }
0064
0065 static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
0066 {
0067 struct xsk_buff_pool *pool;
0068 bool if_running;
0069
0070 pool = xsk_get_pool_from_qid(adapter->netdev, qid);
0071 if (!pool)
0072 return -EINVAL;
0073
0074 if_running = netif_running(adapter->netdev) &&
0075 ixgbe_enabled_xdp_adapter(adapter);
0076
0077 if (if_running)
0078 ixgbe_txrx_ring_disable(adapter, qid);
0079
0080 clear_bit(qid, adapter->af_xdp_zc_qps);
0081 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
0082
0083 if (if_running)
0084 ixgbe_txrx_ring_enable(adapter, qid);
0085
0086 return 0;
0087 }
0088
0089 int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
0090 struct xsk_buff_pool *pool,
0091 u16 qid)
0092 {
0093 return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
0094 ixgbe_xsk_pool_disable(adapter, qid);
0095 }
0096
0097 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
0098 struct ixgbe_ring *rx_ring,
0099 struct xdp_buff *xdp)
0100 {
0101 int err, result = IXGBE_XDP_PASS;
0102 struct bpf_prog *xdp_prog;
0103 struct ixgbe_ring *ring;
0104 struct xdp_frame *xdpf;
0105 u32 act;
0106
0107 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
0108 act = bpf_prog_run_xdp(xdp_prog, xdp);
0109
0110 if (likely(act == XDP_REDIRECT)) {
0111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
0112 if (!err)
0113 return IXGBE_XDP_REDIR;
0114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
0115 result = IXGBE_XDP_EXIT;
0116 else
0117 result = IXGBE_XDP_CONSUMED;
0118 goto out_failure;
0119 }
0120
0121 switch (act) {
0122 case XDP_PASS:
0123 break;
0124 case XDP_TX:
0125 xdpf = xdp_convert_buff_to_frame(xdp);
0126 if (unlikely(!xdpf))
0127 goto out_failure;
0128 ring = ixgbe_determine_xdp_ring(adapter);
0129 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
0130 spin_lock(&ring->tx_lock);
0131 result = ixgbe_xmit_xdp_ring(ring, xdpf);
0132 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
0133 spin_unlock(&ring->tx_lock);
0134 if (result == IXGBE_XDP_CONSUMED)
0135 goto out_failure;
0136 break;
0137 case XDP_DROP:
0138 result = IXGBE_XDP_CONSUMED;
0139 break;
0140 default:
0141 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
0142 fallthrough;
0143 case XDP_ABORTED:
0144 result = IXGBE_XDP_CONSUMED;
0145 out_failure:
0146 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
0147 }
0148 return result;
0149 }
0150
0151 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
0152 {
0153 union ixgbe_adv_rx_desc *rx_desc;
0154 struct ixgbe_rx_buffer *bi;
0155 u16 i = rx_ring->next_to_use;
0156 dma_addr_t dma;
0157 bool ok = true;
0158
0159
0160 if (!count)
0161 return true;
0162
0163 rx_desc = IXGBE_RX_DESC(rx_ring, i);
0164 bi = &rx_ring->rx_buffer_info[i];
0165 i -= rx_ring->count;
0166
0167 do {
0168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
0169 if (!bi->xdp) {
0170 ok = false;
0171 break;
0172 }
0173
0174 dma = xsk_buff_xdp_get_dma(bi->xdp);
0175
0176
0177
0178
0179 rx_desc->read.pkt_addr = cpu_to_le64(dma);
0180
0181 rx_desc++;
0182 bi++;
0183 i++;
0184 if (unlikely(!i)) {
0185 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
0186 bi = rx_ring->rx_buffer_info;
0187 i -= rx_ring->count;
0188 }
0189
0190
0191 rx_desc->wb.upper.length = 0;
0192
0193 count--;
0194 } while (count);
0195
0196 i += rx_ring->count;
0197
0198 if (rx_ring->next_to_use != i) {
0199 rx_ring->next_to_use = i;
0200
0201
0202
0203
0204
0205
0206 wmb();
0207 writel(i, rx_ring->tail);
0208 }
0209
0210 return ok;
0211 }
0212
0213 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
0214 const struct xdp_buff *xdp)
0215 {
0216 unsigned int totalsize = xdp->data_end - xdp->data_meta;
0217 unsigned int metasize = xdp->data - xdp->data_meta;
0218 struct sk_buff *skb;
0219
0220 net_prefetch(xdp->data_meta);
0221
0222
0223 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
0224 GFP_ATOMIC | __GFP_NOWARN);
0225 if (unlikely(!skb))
0226 return NULL;
0227
0228 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
0229 ALIGN(totalsize, sizeof(long)));
0230
0231 if (metasize) {
0232 skb_metadata_set(skb, metasize);
0233 __skb_pull(skb, metasize);
0234 }
0235
0236 return skb;
0237 }
0238
0239 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
0240 {
0241 u32 ntc = rx_ring->next_to_clean + 1;
0242
0243 ntc = (ntc < rx_ring->count) ? ntc : 0;
0244 rx_ring->next_to_clean = ntc;
0245 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
0246 }
0247
0248 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
0249 struct ixgbe_ring *rx_ring,
0250 const int budget)
0251 {
0252 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
0253 struct ixgbe_adapter *adapter = q_vector->adapter;
0254 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
0255 unsigned int xdp_res, xdp_xmit = 0;
0256 bool failure = false;
0257 struct sk_buff *skb;
0258
0259 while (likely(total_rx_packets < budget)) {
0260 union ixgbe_adv_rx_desc *rx_desc;
0261 struct ixgbe_rx_buffer *bi;
0262 unsigned int size;
0263
0264
0265 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
0266 failure = failure ||
0267 !ixgbe_alloc_rx_buffers_zc(rx_ring,
0268 cleaned_count);
0269 cleaned_count = 0;
0270 }
0271
0272 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
0273 size = le16_to_cpu(rx_desc->wb.upper.length);
0274 if (!size)
0275 break;
0276
0277
0278
0279
0280
0281 dma_rmb();
0282
0283 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
0284
0285 if (unlikely(!ixgbe_test_staterr(rx_desc,
0286 IXGBE_RXD_STAT_EOP))) {
0287 struct ixgbe_rx_buffer *next_bi;
0288
0289 xsk_buff_free(bi->xdp);
0290 bi->xdp = NULL;
0291 ixgbe_inc_ntc(rx_ring);
0292 next_bi =
0293 &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
0294 next_bi->discard = true;
0295 continue;
0296 }
0297
0298 if (unlikely(bi->discard)) {
0299 xsk_buff_free(bi->xdp);
0300 bi->xdp = NULL;
0301 bi->discard = false;
0302 ixgbe_inc_ntc(rx_ring);
0303 continue;
0304 }
0305
0306 bi->xdp->data_end = bi->xdp->data + size;
0307 xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
0308 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
0309
0310 if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
0311 xdp_xmit |= xdp_res;
0312 } else if (xdp_res == IXGBE_XDP_EXIT) {
0313 failure = true;
0314 break;
0315 } else if (xdp_res == IXGBE_XDP_CONSUMED) {
0316 xsk_buff_free(bi->xdp);
0317 } else if (xdp_res == IXGBE_XDP_PASS) {
0318 goto construct_skb;
0319 }
0320
0321 bi->xdp = NULL;
0322 total_rx_packets++;
0323 total_rx_bytes += size;
0324
0325 cleaned_count++;
0326 ixgbe_inc_ntc(rx_ring);
0327 continue;
0328
0329 construct_skb:
0330
0331 skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
0332 if (!skb) {
0333 rx_ring->rx_stats.alloc_rx_buff_failed++;
0334 break;
0335 }
0336
0337 xsk_buff_free(bi->xdp);
0338 bi->xdp = NULL;
0339
0340 cleaned_count++;
0341 ixgbe_inc_ntc(rx_ring);
0342
0343 if (eth_skb_pad(skb))
0344 continue;
0345
0346 total_rx_bytes += skb->len;
0347 total_rx_packets++;
0348
0349 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
0350 ixgbe_rx_skb(q_vector, skb);
0351 }
0352
0353 if (xdp_xmit & IXGBE_XDP_REDIR)
0354 xdp_do_flush_map();
0355
0356 if (xdp_xmit & IXGBE_XDP_TX) {
0357 struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
0358
0359 ixgbe_xdp_ring_update_tail_locked(ring);
0360 }
0361
0362 u64_stats_update_begin(&rx_ring->syncp);
0363 rx_ring->stats.packets += total_rx_packets;
0364 rx_ring->stats.bytes += total_rx_bytes;
0365 u64_stats_update_end(&rx_ring->syncp);
0366 q_vector->rx.total_packets += total_rx_packets;
0367 q_vector->rx.total_bytes += total_rx_bytes;
0368
0369 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
0370 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
0371 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
0372 else
0373 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
0374
0375 return (int)total_rx_packets;
0376 }
0377 return failure ? budget : (int)total_rx_packets;
0378 }
0379
0380 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
0381 {
0382 struct ixgbe_rx_buffer *bi;
0383 u16 i;
0384
0385 for (i = 0; i < rx_ring->count; i++) {
0386 bi = &rx_ring->rx_buffer_info[i];
0387
0388 if (!bi->xdp)
0389 continue;
0390
0391 xsk_buff_free(bi->xdp);
0392 bi->xdp = NULL;
0393 }
0394 }
0395
0396 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
0397 {
0398 struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
0399 union ixgbe_adv_tx_desc *tx_desc = NULL;
0400 struct ixgbe_tx_buffer *tx_bi;
0401 bool work_done = true;
0402 struct xdp_desc desc;
0403 dma_addr_t dma;
0404 u32 cmd_type;
0405
0406 while (budget-- > 0) {
0407 if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
0408 work_done = false;
0409 break;
0410 }
0411
0412 if (!netif_carrier_ok(xdp_ring->netdev))
0413 break;
0414
0415 if (!xsk_tx_peek_desc(pool, &desc))
0416 break;
0417
0418 dma = xsk_buff_raw_get_dma(pool, desc.addr);
0419 xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
0420
0421 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
0422 tx_bi->bytecount = desc.len;
0423 tx_bi->xdpf = NULL;
0424 tx_bi->gso_segs = 1;
0425
0426 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
0427 tx_desc->read.buffer_addr = cpu_to_le64(dma);
0428
0429
0430 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
0431 IXGBE_ADVTXD_DCMD_DEXT |
0432 IXGBE_ADVTXD_DCMD_IFCS;
0433 cmd_type |= desc.len | IXGBE_TXD_CMD;
0434 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
0435 tx_desc->read.olinfo_status =
0436 cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
0437
0438 xdp_ring->next_to_use++;
0439 if (xdp_ring->next_to_use == xdp_ring->count)
0440 xdp_ring->next_to_use = 0;
0441 }
0442
0443 if (tx_desc) {
0444 ixgbe_xdp_ring_update_tail(xdp_ring);
0445 xsk_tx_release(pool);
0446 }
0447
0448 return !!budget && work_done;
0449 }
0450
0451 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
0452 struct ixgbe_tx_buffer *tx_bi)
0453 {
0454 xdp_return_frame(tx_bi->xdpf);
0455 dma_unmap_single(tx_ring->dev,
0456 dma_unmap_addr(tx_bi, dma),
0457 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
0458 dma_unmap_len_set(tx_bi, len, 0);
0459 }
0460
0461 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
0462 struct ixgbe_ring *tx_ring, int napi_budget)
0463 {
0464 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
0465 unsigned int total_packets = 0, total_bytes = 0;
0466 struct xsk_buff_pool *pool = tx_ring->xsk_pool;
0467 union ixgbe_adv_tx_desc *tx_desc;
0468 struct ixgbe_tx_buffer *tx_bi;
0469 u32 xsk_frames = 0;
0470
0471 tx_bi = &tx_ring->tx_buffer_info[ntc];
0472 tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
0473
0474 while (ntc != ntu) {
0475 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
0476 break;
0477
0478 total_bytes += tx_bi->bytecount;
0479 total_packets += tx_bi->gso_segs;
0480
0481 if (tx_bi->xdpf)
0482 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
0483 else
0484 xsk_frames++;
0485
0486 tx_bi->xdpf = NULL;
0487
0488 tx_bi++;
0489 tx_desc++;
0490 ntc++;
0491 if (unlikely(ntc == tx_ring->count)) {
0492 ntc = 0;
0493 tx_bi = tx_ring->tx_buffer_info;
0494 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
0495 }
0496
0497
0498 prefetch(tx_desc);
0499 }
0500
0501 tx_ring->next_to_clean = ntc;
0502
0503 u64_stats_update_begin(&tx_ring->syncp);
0504 tx_ring->stats.bytes += total_bytes;
0505 tx_ring->stats.packets += total_packets;
0506 u64_stats_update_end(&tx_ring->syncp);
0507 q_vector->tx.total_bytes += total_bytes;
0508 q_vector->tx.total_packets += total_packets;
0509
0510 if (xsk_frames)
0511 xsk_tx_completed(pool, xsk_frames);
0512
0513 if (xsk_uses_need_wakeup(pool))
0514 xsk_set_tx_need_wakeup(pool);
0515
0516 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
0517 }
0518
0519 int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
0520 {
0521 struct ixgbe_adapter *adapter = netdev_priv(dev);
0522 struct ixgbe_ring *ring;
0523
0524 if (test_bit(__IXGBE_DOWN, &adapter->state))
0525 return -ENETDOWN;
0526
0527 if (!READ_ONCE(adapter->xdp_prog))
0528 return -EINVAL;
0529
0530 if (qid >= adapter->num_xdp_queues)
0531 return -EINVAL;
0532
0533 ring = adapter->xdp_ring[qid];
0534
0535 if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
0536 return -ENETDOWN;
0537
0538 if (!ring->xsk_pool)
0539 return -EINVAL;
0540
0541 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
0542 u64 eics = BIT_ULL(ring->q_vector->v_idx);
0543
0544 ixgbe_irq_rearm_queues(adapter, eics);
0545 }
0546
0547 return 0;
0548 }
0549
0550 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
0551 {
0552 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
0553 struct xsk_buff_pool *pool = tx_ring->xsk_pool;
0554 struct ixgbe_tx_buffer *tx_bi;
0555 u32 xsk_frames = 0;
0556
0557 while (ntc != ntu) {
0558 tx_bi = &tx_ring->tx_buffer_info[ntc];
0559
0560 if (tx_bi->xdpf)
0561 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
0562 else
0563 xsk_frames++;
0564
0565 tx_bi->xdpf = NULL;
0566
0567 ntc++;
0568 if (ntc == tx_ring->count)
0569 ntc = 0;
0570 }
0571
0572 if (xsk_frames)
0573 xsk_tx_completed(pool, xsk_frames);
0574 }