0001
0002
0003
0004
0005
0006 #include <linux/mm.h>
0007 #include <linux/netdevice.h>
0008 #include <linux/prefetch.h>
0009 #include <linux/bpf_trace.h>
0010 #include <net/dsfield.h>
0011 #include <net/mpls.h>
0012 #include <net/xdp.h>
0013 #include "ice_txrx_lib.h"
0014 #include "ice_lib.h"
0015 #include "ice.h"
0016 #include "ice_trace.h"
0017 #include "ice_dcb_lib.h"
0018 #include "ice_xsk.h"
0019 #include "ice_eswitch.h"
0020
0021 #define ICE_RX_HDR_SIZE 256
0022
0023 #define FDIR_DESC_RXDID 0x40
0024 #define ICE_FDIR_CLEAN_DELAY 10
0025
0026
0027
0028
0029
0030
0031
0032 int
0033 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
0034 u8 *raw_packet)
0035 {
0036 struct ice_tx_buf *tx_buf, *first;
0037 struct ice_fltr_desc *f_desc;
0038 struct ice_tx_desc *tx_desc;
0039 struct ice_tx_ring *tx_ring;
0040 struct device *dev;
0041 dma_addr_t dma;
0042 u32 td_cmd;
0043 u16 i;
0044
0045
0046 if (!vsi)
0047 return -ENOENT;
0048 tx_ring = vsi->tx_rings[0];
0049 if (!tx_ring || !tx_ring->desc)
0050 return -ENOENT;
0051 dev = tx_ring->dev;
0052
0053
0054 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
0055 if (!i)
0056 return -EAGAIN;
0057 msleep_interruptible(1);
0058 }
0059
0060 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
0061 DMA_TO_DEVICE);
0062
0063 if (dma_mapping_error(dev, dma))
0064 return -EINVAL;
0065
0066
0067 i = tx_ring->next_to_use;
0068 first = &tx_ring->tx_buf[i];
0069 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
0070 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
0071
0072 i++;
0073 i = (i < tx_ring->count) ? i : 0;
0074 tx_desc = ICE_TX_DESC(tx_ring, i);
0075 tx_buf = &tx_ring->tx_buf[i];
0076
0077 i++;
0078 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
0079
0080 memset(tx_buf, 0, sizeof(*tx_buf));
0081 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
0082 dma_unmap_addr_set(tx_buf, dma, dma);
0083
0084 tx_desc->buf_addr = cpu_to_le64(dma);
0085 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
0086 ICE_TX_DESC_CMD_RE;
0087
0088 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
0089 tx_buf->raw_buf = raw_packet;
0090
0091 tx_desc->cmd_type_offset_bsz =
0092 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
0093
0094
0095
0096
0097 wmb();
0098
0099
0100 first->next_to_watch = tx_desc;
0101
0102 writel(tx_ring->next_to_use, tx_ring->tail);
0103
0104 return 0;
0105 }
0106
0107
0108
0109
0110
0111
0112 static void
0113 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
0114 {
0115 if (tx_buf->skb) {
0116 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
0117 devm_kfree(ring->dev, tx_buf->raw_buf);
0118 else if (ice_ring_is_xdp(ring))
0119 page_frag_free(tx_buf->raw_buf);
0120 else
0121 dev_kfree_skb_any(tx_buf->skb);
0122 if (dma_unmap_len(tx_buf, len))
0123 dma_unmap_single(ring->dev,
0124 dma_unmap_addr(tx_buf, dma),
0125 dma_unmap_len(tx_buf, len),
0126 DMA_TO_DEVICE);
0127 } else if (dma_unmap_len(tx_buf, len)) {
0128 dma_unmap_page(ring->dev,
0129 dma_unmap_addr(tx_buf, dma),
0130 dma_unmap_len(tx_buf, len),
0131 DMA_TO_DEVICE);
0132 }
0133
0134 tx_buf->next_to_watch = NULL;
0135 tx_buf->skb = NULL;
0136 dma_unmap_len_set(tx_buf, len, 0);
0137
0138 }
0139
0140 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
0141 {
0142 return netdev_get_tx_queue(ring->netdev, ring->q_index);
0143 }
0144
0145
0146
0147
0148
0149 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
0150 {
0151 u32 size;
0152 u16 i;
0153
0154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
0155 ice_xsk_clean_xdp_ring(tx_ring);
0156 goto tx_skip_free;
0157 }
0158
0159
0160 if (!tx_ring->tx_buf)
0161 return;
0162
0163
0164 for (i = 0; i < tx_ring->count; i++)
0165 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
0166
0167 tx_skip_free:
0168 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
0169
0170 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
0171 PAGE_SIZE);
0172
0173 memset(tx_ring->desc, 0, size);
0174
0175 tx_ring->next_to_use = 0;
0176 tx_ring->next_to_clean = 0;
0177 tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
0178 tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
0179
0180 if (!tx_ring->netdev)
0181 return;
0182
0183
0184 netdev_tx_reset_queue(txring_txq(tx_ring));
0185 }
0186
0187
0188
0189
0190
0191
0192
0193 void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
0194 {
0195 u32 size;
0196
0197 ice_clean_tx_ring(tx_ring);
0198 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
0199 tx_ring->tx_buf = NULL;
0200
0201 if (tx_ring->desc) {
0202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
0203 PAGE_SIZE);
0204 dmam_free_coherent(tx_ring->dev, size,
0205 tx_ring->desc, tx_ring->dma);
0206 tx_ring->desc = NULL;
0207 }
0208 }
0209
0210
0211
0212
0213
0214
0215
0216
0217 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
0218 {
0219 unsigned int total_bytes = 0, total_pkts = 0;
0220 unsigned int budget = ICE_DFLT_IRQ_WORK;
0221 struct ice_vsi *vsi = tx_ring->vsi;
0222 s16 i = tx_ring->next_to_clean;
0223 struct ice_tx_desc *tx_desc;
0224 struct ice_tx_buf *tx_buf;
0225
0226
0227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
0228
0229 tx_buf = &tx_ring->tx_buf[i];
0230 tx_desc = ICE_TX_DESC(tx_ring, i);
0231 i -= tx_ring->count;
0232
0233 prefetch(&vsi->state);
0234
0235 do {
0236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
0237
0238
0239 if (!eop_desc)
0240 break;
0241
0242
0243 prefetchw(&tx_buf->skb->users);
0244
0245 smp_rmb();
0246
0247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
0248
0249 if (!(eop_desc->cmd_type_offset_bsz &
0250 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
0251 break;
0252
0253
0254 tx_buf->next_to_watch = NULL;
0255
0256
0257 total_bytes += tx_buf->bytecount;
0258 total_pkts += tx_buf->gso_segs;
0259
0260
0261 napi_consume_skb(tx_buf->skb, napi_budget);
0262
0263
0264 dma_unmap_single(tx_ring->dev,
0265 dma_unmap_addr(tx_buf, dma),
0266 dma_unmap_len(tx_buf, len),
0267 DMA_TO_DEVICE);
0268
0269
0270 tx_buf->skb = NULL;
0271 dma_unmap_len_set(tx_buf, len, 0);
0272
0273
0274 while (tx_desc != eop_desc) {
0275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
0276 tx_buf++;
0277 tx_desc++;
0278 i++;
0279 if (unlikely(!i)) {
0280 i -= tx_ring->count;
0281 tx_buf = tx_ring->tx_buf;
0282 tx_desc = ICE_TX_DESC(tx_ring, 0);
0283 }
0284
0285
0286 if (dma_unmap_len(tx_buf, len)) {
0287 dma_unmap_page(tx_ring->dev,
0288 dma_unmap_addr(tx_buf, dma),
0289 dma_unmap_len(tx_buf, len),
0290 DMA_TO_DEVICE);
0291 dma_unmap_len_set(tx_buf, len, 0);
0292 }
0293 }
0294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
0295
0296
0297 tx_buf++;
0298 tx_desc++;
0299 i++;
0300 if (unlikely(!i)) {
0301 i -= tx_ring->count;
0302 tx_buf = tx_ring->tx_buf;
0303 tx_desc = ICE_TX_DESC(tx_ring, 0);
0304 }
0305
0306 prefetch(tx_desc);
0307
0308
0309 budget--;
0310 } while (likely(budget));
0311
0312 i += tx_ring->count;
0313 tx_ring->next_to_clean = i;
0314
0315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
0316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
0317
0318 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
0319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
0320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
0321
0322
0323
0324 smp_mb();
0325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
0326 !test_bit(ICE_VSI_DOWN, vsi->state)) {
0327 netif_tx_wake_queue(txring_txq(tx_ring));
0328 ++tx_ring->tx_stats.restart_q;
0329 }
0330 }
0331
0332 return !!budget;
0333 }
0334
0335
0336
0337
0338
0339
0340
0341 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
0342 {
0343 struct device *dev = tx_ring->dev;
0344 u32 size;
0345
0346 if (!dev)
0347 return -ENOMEM;
0348
0349
0350 WARN_ON(tx_ring->tx_buf);
0351 tx_ring->tx_buf =
0352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
0353 GFP_KERNEL);
0354 if (!tx_ring->tx_buf)
0355 return -ENOMEM;
0356
0357
0358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
0359 PAGE_SIZE);
0360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
0361 GFP_KERNEL);
0362 if (!tx_ring->desc) {
0363 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
0364 size);
0365 goto err;
0366 }
0367
0368 tx_ring->next_to_use = 0;
0369 tx_ring->next_to_clean = 0;
0370 tx_ring->tx_stats.prev_pkt = -1;
0371 return 0;
0372
0373 err:
0374 devm_kfree(dev, tx_ring->tx_buf);
0375 tx_ring->tx_buf = NULL;
0376 return -ENOMEM;
0377 }
0378
0379
0380
0381
0382
0383 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
0384 {
0385 struct device *dev = rx_ring->dev;
0386 u32 size;
0387 u16 i;
0388
0389
0390 if (!rx_ring->rx_buf)
0391 return;
0392
0393 if (rx_ring->skb) {
0394 dev_kfree_skb(rx_ring->skb);
0395 rx_ring->skb = NULL;
0396 }
0397
0398 if (rx_ring->xsk_pool) {
0399 ice_xsk_clean_rx_ring(rx_ring);
0400 goto rx_skip_free;
0401 }
0402
0403
0404 for (i = 0; i < rx_ring->count; i++) {
0405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
0406
0407 if (!rx_buf->page)
0408 continue;
0409
0410
0411
0412
0413 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
0414 rx_buf->page_offset,
0415 rx_ring->rx_buf_len,
0416 DMA_FROM_DEVICE);
0417
0418
0419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
0420 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
0421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
0422
0423 rx_buf->page = NULL;
0424 rx_buf->page_offset = 0;
0425 }
0426
0427 rx_skip_free:
0428 if (rx_ring->xsk_pool)
0429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
0430 else
0431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
0432
0433
0434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
0435 PAGE_SIZE);
0436 memset(rx_ring->desc, 0, size);
0437
0438 rx_ring->next_to_alloc = 0;
0439 rx_ring->next_to_clean = 0;
0440 rx_ring->next_to_use = 0;
0441 }
0442
0443
0444
0445
0446
0447
0448
0449 void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
0450 {
0451 u32 size;
0452
0453 ice_clean_rx_ring(rx_ring);
0454 if (rx_ring->vsi->type == ICE_VSI_PF)
0455 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
0456 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
0457 rx_ring->xdp_prog = NULL;
0458 if (rx_ring->xsk_pool) {
0459 kfree(rx_ring->xdp_buf);
0460 rx_ring->xdp_buf = NULL;
0461 } else {
0462 kfree(rx_ring->rx_buf);
0463 rx_ring->rx_buf = NULL;
0464 }
0465
0466 if (rx_ring->desc) {
0467 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
0468 PAGE_SIZE);
0469 dmam_free_coherent(rx_ring->dev, size,
0470 rx_ring->desc, rx_ring->dma);
0471 rx_ring->desc = NULL;
0472 }
0473 }
0474
0475
0476
0477
0478
0479
0480
0481 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
0482 {
0483 struct device *dev = rx_ring->dev;
0484 u32 size;
0485
0486 if (!dev)
0487 return -ENOMEM;
0488
0489
0490 WARN_ON(rx_ring->rx_buf);
0491 rx_ring->rx_buf =
0492 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
0493 if (!rx_ring->rx_buf)
0494 return -ENOMEM;
0495
0496
0497 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
0498 PAGE_SIZE);
0499 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
0500 GFP_KERNEL);
0501 if (!rx_ring->desc) {
0502 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
0503 size);
0504 goto err;
0505 }
0506
0507 rx_ring->next_to_use = 0;
0508 rx_ring->next_to_clean = 0;
0509
0510 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
0511 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
0512
0513 if (rx_ring->vsi->type == ICE_VSI_PF &&
0514 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
0515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
0516 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
0517 goto err;
0518 return 0;
0519
0520 err:
0521 kfree(rx_ring->rx_buf);
0522 rx_ring->rx_buf = NULL;
0523 return -ENOMEM;
0524 }
0525
0526 static unsigned int
0527 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
0528 {
0529 unsigned int truesize;
0530
0531 #if (PAGE_SIZE < 8192)
0532 truesize = ice_rx_pg_size(rx_ring) / 2;
0533 #else
0534 truesize = rx_ring->rx_offset ?
0535 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
0536 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
0537 SKB_DATA_ALIGN(size);
0538 #endif
0539 return truesize;
0540 }
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 static int
0552 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
0553 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
0554 {
0555 int err;
0556 u32 act;
0557
0558 act = bpf_prog_run_xdp(xdp_prog, xdp);
0559 switch (act) {
0560 case XDP_PASS:
0561 return ICE_XDP_PASS;
0562 case XDP_TX:
0563 if (static_branch_unlikely(&ice_xdp_locking_key))
0564 spin_lock(&xdp_ring->tx_lock);
0565 err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
0566 if (static_branch_unlikely(&ice_xdp_locking_key))
0567 spin_unlock(&xdp_ring->tx_lock);
0568 if (err == ICE_XDP_CONSUMED)
0569 goto out_failure;
0570 return err;
0571 case XDP_REDIRECT:
0572 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
0573 if (err)
0574 goto out_failure;
0575 return ICE_XDP_REDIR;
0576 default:
0577 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
0578 fallthrough;
0579 case XDP_ABORTED:
0580 out_failure:
0581 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
0582 fallthrough;
0583 case XDP_DROP:
0584 return ICE_XDP_CONSUMED;
0585 }
0586 }
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600 int
0601 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
0602 u32 flags)
0603 {
0604 struct ice_netdev_priv *np = netdev_priv(dev);
0605 unsigned int queue_index = smp_processor_id();
0606 struct ice_vsi *vsi = np->vsi;
0607 struct ice_tx_ring *xdp_ring;
0608 int nxmit = 0, i;
0609
0610 if (test_bit(ICE_VSI_DOWN, vsi->state))
0611 return -ENETDOWN;
0612
0613 if (!ice_is_xdp_ena_vsi(vsi))
0614 return -ENXIO;
0615
0616 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
0617 return -EINVAL;
0618
0619 if (static_branch_unlikely(&ice_xdp_locking_key)) {
0620 queue_index %= vsi->num_xdp_txq;
0621 xdp_ring = vsi->xdp_rings[queue_index];
0622 spin_lock(&xdp_ring->tx_lock);
0623 } else {
0624
0625 if (unlikely(queue_index >= vsi->num_xdp_txq))
0626 return -ENXIO;
0627 xdp_ring = vsi->xdp_rings[queue_index];
0628 }
0629
0630 for (i = 0; i < n; i++) {
0631 struct xdp_frame *xdpf = frames[i];
0632 int err;
0633
0634 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
0635 if (err != ICE_XDP_TX)
0636 break;
0637 nxmit++;
0638 }
0639
0640 if (unlikely(flags & XDP_XMIT_FLUSH))
0641 ice_xdp_ring_update_tail(xdp_ring);
0642
0643 if (static_branch_unlikely(&ice_xdp_locking_key))
0644 spin_unlock(&xdp_ring->tx_lock);
0645
0646 return nxmit;
0647 }
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 static bool
0658 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
0659 {
0660 struct page *page = bi->page;
0661 dma_addr_t dma;
0662
0663
0664 if (likely(page))
0665 return true;
0666
0667
0668 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
0669 if (unlikely(!page)) {
0670 rx_ring->rx_stats.alloc_page_failed++;
0671 return false;
0672 }
0673
0674
0675 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
0676 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
0677
0678
0679
0680
0681 if (dma_mapping_error(rx_ring->dev, dma)) {
0682 __free_pages(page, ice_rx_pg_order(rx_ring));
0683 rx_ring->rx_stats.alloc_page_failed++;
0684 return false;
0685 }
0686
0687 bi->dma = dma;
0688 bi->page = page;
0689 bi->page_offset = rx_ring->rx_offset;
0690 page_ref_add(page, USHRT_MAX - 1);
0691 bi->pagecnt_bias = USHRT_MAX;
0692
0693 return true;
0694 }
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
0710 {
0711 union ice_32b_rx_flex_desc *rx_desc;
0712 u16 ntu = rx_ring->next_to_use;
0713 struct ice_rx_buf *bi;
0714
0715
0716 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
0717 !cleaned_count)
0718 return false;
0719
0720
0721 rx_desc = ICE_RX_DESC(rx_ring, ntu);
0722 bi = &rx_ring->rx_buf[ntu];
0723
0724 do {
0725
0726 if (!ice_alloc_mapped_page(rx_ring, bi))
0727 break;
0728
0729
0730 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
0731 bi->page_offset,
0732 rx_ring->rx_buf_len,
0733 DMA_FROM_DEVICE);
0734
0735
0736
0737
0738 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
0739
0740 rx_desc++;
0741 bi++;
0742 ntu++;
0743 if (unlikely(ntu == rx_ring->count)) {
0744 rx_desc = ICE_RX_DESC(rx_ring, 0);
0745 bi = rx_ring->rx_buf;
0746 ntu = 0;
0747 }
0748
0749
0750 rx_desc->wb.status_error0 = 0;
0751
0752 cleaned_count--;
0753 } while (cleaned_count);
0754
0755 if (rx_ring->next_to_use != ntu)
0756 ice_release_rx_desc(rx_ring, ntu);
0757
0758 return !!cleaned_count;
0759 }
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771 static void
0772 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
0773 {
0774 #if (PAGE_SIZE < 8192)
0775
0776 rx_buf->page_offset ^= size;
0777 #else
0778
0779 rx_buf->page_offset += size;
0780 #endif
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 static bool
0794 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
0795 {
0796 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
0797 struct page *page = rx_buf->page;
0798
0799
0800 if (!dev_page_is_reusable(page))
0801 return false;
0802
0803 #if (PAGE_SIZE < 8192)
0804
0805 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
0806 return false;
0807 #else
0808 #define ICE_LAST_OFFSET \
0809 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
0810 if (rx_buf->page_offset > ICE_LAST_OFFSET)
0811 return false;
0812 #endif
0813
0814
0815
0816
0817
0818 if (unlikely(pagecnt_bias == 1)) {
0819 page_ref_add(page, USHRT_MAX - 1);
0820 rx_buf->pagecnt_bias = USHRT_MAX;
0821 }
0822
0823 return true;
0824 }
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 static void
0838 ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
0839 struct sk_buff *skb, unsigned int size)
0840 {
0841 #if (PAGE_SIZE >= 8192)
0842 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
0843 #else
0844 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
0845 #endif
0846
0847 if (!size)
0848 return;
0849 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
0850 rx_buf->page_offset, size, truesize);
0851
0852
0853 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
0854 }
0855
0856
0857
0858
0859
0860
0861
0862
0863 static void
0864 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
0865 {
0866 u16 nta = rx_ring->next_to_alloc;
0867 struct ice_rx_buf *new_buf;
0868
0869 new_buf = &rx_ring->rx_buf[nta];
0870
0871
0872 nta++;
0873 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
0874
0875
0876
0877
0878
0879 new_buf->dma = old_buf->dma;
0880 new_buf->page = old_buf->page;
0881 new_buf->page_offset = old_buf->page_offset;
0882 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
0883 }
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 static struct ice_rx_buf *
0895 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
0896 int *rx_buf_pgcnt)
0897 {
0898 struct ice_rx_buf *rx_buf;
0899
0900 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
0901 *rx_buf_pgcnt =
0902 #if (PAGE_SIZE < 8192)
0903 page_count(rx_buf->page);
0904 #else
0905 0;
0906 #endif
0907 prefetchw(rx_buf->page);
0908
0909 if (!size)
0910 return rx_buf;
0911
0912 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
0913 rx_buf->page_offset, size,
0914 DMA_FROM_DEVICE);
0915
0916
0917 rx_buf->pagecnt_bias--;
0918
0919 return rx_buf;
0920 }
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931 static struct sk_buff *
0932 ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
0933 struct xdp_buff *xdp)
0934 {
0935 u8 metasize = xdp->data - xdp->data_meta;
0936 #if (PAGE_SIZE < 8192)
0937 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
0938 #else
0939 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
0940 SKB_DATA_ALIGN(xdp->data_end -
0941 xdp->data_hard_start);
0942 #endif
0943 struct sk_buff *skb;
0944
0945
0946
0947
0948
0949
0950 net_prefetch(xdp->data_meta);
0951
0952 skb = napi_build_skb(xdp->data_hard_start, truesize);
0953 if (unlikely(!skb))
0954 return NULL;
0955
0956
0957
0958
0959 skb_record_rx_queue(skb, rx_ring->q_index);
0960
0961
0962 skb_reserve(skb, xdp->data - xdp->data_hard_start);
0963 __skb_put(skb, xdp->data_end - xdp->data);
0964 if (metasize)
0965 skb_metadata_set(skb, metasize);
0966
0967
0968 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
0969
0970 return skb;
0971 }
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 static struct sk_buff *
0984 ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
0985 struct xdp_buff *xdp)
0986 {
0987 unsigned int metasize = xdp->data - xdp->data_meta;
0988 unsigned int size = xdp->data_end - xdp->data;
0989 unsigned int headlen;
0990 struct sk_buff *skb;
0991
0992
0993 net_prefetch(xdp->data_meta);
0994
0995
0996 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
0997 ICE_RX_HDR_SIZE + metasize,
0998 GFP_ATOMIC | __GFP_NOWARN);
0999 if (unlikely(!skb))
1000 return NULL;
1001
1002 skb_record_rx_queue(skb, rx_ring->q_index);
1003
1004 headlen = size;
1005 if (headlen > ICE_RX_HDR_SIZE)
1006 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
1007
1008
1009 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1010 ALIGN(headlen + metasize, sizeof(long)));
1011
1012 if (metasize) {
1013 skb_metadata_set(skb, metasize);
1014 __skb_pull(skb, metasize);
1015 }
1016
1017
1018 size -= headlen;
1019 if (size) {
1020 #if (PAGE_SIZE >= 8192)
1021 unsigned int truesize = SKB_DATA_ALIGN(size);
1022 #else
1023 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
1024 #endif
1025 skb_add_rx_frag(skb, 0, rx_buf->page,
1026 rx_buf->page_offset + headlen, size, truesize);
1027
1028 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1029 } else {
1030
1031
1032
1033
1034 rx_buf->pagecnt_bias++;
1035 }
1036
1037 return skb;
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 static void
1051 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1052 int rx_buf_pgcnt)
1053 {
1054 u16 ntc = rx_ring->next_to_clean + 1;
1055
1056
1057 ntc = (ntc < rx_ring->count) ? ntc : 0;
1058 rx_ring->next_to_clean = ntc;
1059
1060 if (!rx_buf)
1061 return;
1062
1063 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1064
1065 ice_reuse_rx_page(rx_ring, rx_buf);
1066 } else {
1067
1068 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1069 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1070 ICE_RX_DMA_ATTR);
1071 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1072 }
1073
1074
1075 rx_buf->page = NULL;
1076 }
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 static bool
1087 ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1088 {
1089
1090 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1091 if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
1092 return false;
1093
1094 rx_ring->rx_stats.non_eop_descs++;
1095
1096 return true;
1097 }
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1112 {
1113 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1114 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1115 unsigned int offset = rx_ring->rx_offset;
1116 struct ice_tx_ring *xdp_ring = NULL;
1117 unsigned int xdp_res, xdp_xmit = 0;
1118 struct sk_buff *skb = rx_ring->skb;
1119 struct bpf_prog *xdp_prog = NULL;
1120 struct xdp_buff xdp;
1121 bool failure;
1122
1123
1124 #if (PAGE_SIZE < 8192)
1125 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1126 #endif
1127 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1128
1129 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1130 if (xdp_prog)
1131 xdp_ring = rx_ring->xdp_ring;
1132
1133
1134 while (likely(total_rx_pkts < (unsigned int)budget)) {
1135 union ice_32b_rx_flex_desc *rx_desc;
1136 struct ice_rx_buf *rx_buf;
1137 unsigned char *hard_start;
1138 unsigned int size;
1139 u16 stat_err_bits;
1140 int rx_buf_pgcnt;
1141 u16 vlan_tag = 0;
1142 u16 rx_ptype;
1143
1144
1145 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1146
1147
1148
1149
1150
1151
1152 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1153 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1154 break;
1155
1156
1157
1158
1159
1160 dma_rmb();
1161
1162 ice_trace(clean_rx_irq, rx_ring, rx_desc);
1163 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1164 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1165
1166 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1167 ctrl_vsi->vf)
1168 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1169 ice_put_rx_buf(rx_ring, NULL, 0);
1170 cleaned_count++;
1171 continue;
1172 }
1173
1174 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1175 ICE_RX_FLX_DESC_PKT_LEN_M;
1176
1177
1178 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1179
1180 if (!size) {
1181 xdp.data = NULL;
1182 xdp.data_end = NULL;
1183 xdp.data_hard_start = NULL;
1184 xdp.data_meta = NULL;
1185 goto construct_skb;
1186 }
1187
1188 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1189 offset;
1190 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1191 #if (PAGE_SIZE > 4096)
1192
1193 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1194 #endif
1195
1196 if (!xdp_prog)
1197 goto construct_skb;
1198
1199 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
1200 if (!xdp_res)
1201 goto construct_skb;
1202 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1203 xdp_xmit |= xdp_res;
1204 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1205 } else {
1206 rx_buf->pagecnt_bias++;
1207 }
1208 total_rx_bytes += size;
1209 total_rx_pkts++;
1210
1211 cleaned_count++;
1212 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1213 continue;
1214 construct_skb:
1215 if (skb) {
1216 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1217 } else if (likely(xdp.data)) {
1218 if (ice_ring_uses_build_skb(rx_ring))
1219 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1220 else
1221 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1222 }
1223
1224 if (!skb) {
1225 rx_ring->rx_stats.alloc_buf_failed++;
1226 if (rx_buf)
1227 rx_buf->pagecnt_bias++;
1228 break;
1229 }
1230
1231 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1232 cleaned_count++;
1233
1234
1235 if (ice_is_non_eop(rx_ring, rx_desc))
1236 continue;
1237
1238 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1239 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
1240 stat_err_bits))) {
1241 dev_kfree_skb_any(skb);
1242 continue;
1243 }
1244
1245 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
1246
1247
1248 if (eth_skb_pad(skb)) {
1249 skb = NULL;
1250 continue;
1251 }
1252
1253
1254 total_rx_bytes += skb->len;
1255
1256
1257 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1258 ICE_RX_FLEX_DESC_PTYPE_M;
1259
1260 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1261
1262 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1263
1264 ice_receive_skb(rx_ring, skb, vlan_tag);
1265 skb = NULL;
1266
1267
1268 total_rx_pkts++;
1269 }
1270
1271
1272 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1273
1274 if (xdp_prog)
1275 ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
1276 rx_ring->skb = skb;
1277
1278 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1279
1280
1281 return failure ? budget : (int)total_rx_pkts;
1282 }
1283
1284 static void __ice_update_sample(struct ice_q_vector *q_vector,
1285 struct ice_ring_container *rc,
1286 struct dim_sample *sample,
1287 bool is_tx)
1288 {
1289 u64 packets = 0, bytes = 0;
1290
1291 if (is_tx) {
1292 struct ice_tx_ring *tx_ring;
1293
1294 ice_for_each_tx_ring(tx_ring, *rc) {
1295 packets += tx_ring->stats.pkts;
1296 bytes += tx_ring->stats.bytes;
1297 }
1298 } else {
1299 struct ice_rx_ring *rx_ring;
1300
1301 ice_for_each_rx_ring(rx_ring, *rc) {
1302 packets += rx_ring->stats.pkts;
1303 bytes += rx_ring->stats.bytes;
1304 }
1305 }
1306
1307 dim_update_sample(q_vector->total_events, packets, bytes, sample);
1308 sample->comp_ctr = 0;
1309
1310
1311
1312
1313
1314
1315 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
1316 rc->dim.state = DIM_START_MEASURE;
1317 }
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 static void ice_net_dim(struct ice_q_vector *q_vector)
1329 {
1330 struct ice_ring_container *tx = &q_vector->tx;
1331 struct ice_ring_container *rx = &q_vector->rx;
1332
1333 if (ITR_IS_DYNAMIC(tx)) {
1334 struct dim_sample dim_sample;
1335
1336 __ice_update_sample(q_vector, tx, &dim_sample, true);
1337 net_dim(&tx->dim, dim_sample);
1338 }
1339
1340 if (ITR_IS_DYNAMIC(rx)) {
1341 struct dim_sample dim_sample;
1342
1343 __ice_update_sample(q_vector, rx, &dim_sample, false);
1344 net_dim(&rx->dim, dim_sample);
1345 }
1346 }
1347
1348
1349
1350
1351
1352
1353 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1354 {
1355
1356
1357
1358
1359
1360
1361
1362 itr &= ICE_ITR_MASK;
1363
1364 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1365 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1366 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 static void ice_enable_interrupt(struct ice_q_vector *q_vector)
1378 {
1379 struct ice_vsi *vsi = q_vector->vsi;
1380 bool wb_en = q_vector->wb_on_itr;
1381 u32 itr_val;
1382
1383 if (test_bit(ICE_DOWN, vsi->state))
1384 return;
1385
1386
1387
1388
1389
1390
1391 if (!wb_en) {
1392 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1393 } else {
1394 q_vector->wb_on_itr = false;
1395
1396
1397
1398
1399
1400
1401
1402 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
1403 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1404 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
1405 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1406 }
1407 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1408 }
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1425 {
1426 struct ice_vsi *vsi = q_vector->vsi;
1427
1428
1429 if (q_vector->wb_on_itr)
1430 return;
1431
1432
1433
1434
1435
1436 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1437 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1438 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1439 GLINT_DYN_CTL_WB_ON_ITR_M);
1440
1441 q_vector->wb_on_itr = true;
1442 }
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 int ice_napi_poll(struct napi_struct *napi, int budget)
1454 {
1455 struct ice_q_vector *q_vector =
1456 container_of(napi, struct ice_q_vector, napi);
1457 struct ice_tx_ring *tx_ring;
1458 struct ice_rx_ring *rx_ring;
1459 bool clean_complete = true;
1460 int budget_per_ring;
1461 int work_done = 0;
1462
1463
1464
1465
1466 ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1467 bool wd;
1468
1469 if (tx_ring->xsk_pool)
1470 wd = ice_xmit_zc(tx_ring);
1471 else if (ice_ring_is_xdp(tx_ring))
1472 wd = true;
1473 else
1474 wd = ice_clean_tx_irq(tx_ring, budget);
1475
1476 if (!wd)
1477 clean_complete = false;
1478 }
1479
1480
1481 if (unlikely(budget <= 0))
1482 return budget;
1483
1484
1485 if (unlikely(q_vector->num_ring_rx > 1))
1486
1487
1488
1489
1490 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1491 else
1492
1493 budget_per_ring = budget;
1494
1495 ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1496 int cleaned;
1497
1498
1499
1500
1501
1502 cleaned = rx_ring->xsk_pool ?
1503 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
1504 ice_clean_rx_irq(rx_ring, budget_per_ring);
1505 work_done += cleaned;
1506
1507 if (cleaned >= budget_per_ring)
1508 clean_complete = false;
1509 }
1510
1511
1512 if (!clean_complete) {
1513
1514
1515
1516 ice_set_wb_on_itr(q_vector);
1517 return budget;
1518 }
1519
1520
1521
1522
1523 if (napi_complete_done(napi, work_done)) {
1524 ice_net_dim(q_vector);
1525 ice_enable_interrupt(q_vector);
1526 } else {
1527 ice_set_wb_on_itr(q_vector);
1528 }
1529
1530 return min_t(int, work_done, budget - 1);
1531 }
1532
1533
1534
1535
1536
1537
1538
1539
1540 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1541 {
1542 netif_tx_stop_queue(txring_txq(tx_ring));
1543
1544 smp_mb();
1545
1546
1547 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1548 return -EBUSY;
1549
1550
1551 netif_tx_start_queue(txring_txq(tx_ring));
1552 ++tx_ring->tx_stats.restart_q;
1553 return 0;
1554 }
1555
1556
1557
1558
1559
1560
1561
1562
1563 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1564 {
1565 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1566 return 0;
1567
1568 return __ice_maybe_stop_tx(tx_ring, size);
1569 }
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 static void
1582 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1583 struct ice_tx_offload_params *off)
1584 {
1585 u64 td_offset, td_tag, td_cmd;
1586 u16 i = tx_ring->next_to_use;
1587 unsigned int data_len, size;
1588 struct ice_tx_desc *tx_desc;
1589 struct ice_tx_buf *tx_buf;
1590 struct sk_buff *skb;
1591 skb_frag_t *frag;
1592 dma_addr_t dma;
1593 bool kick;
1594
1595 td_tag = off->td_l2tag1;
1596 td_cmd = off->td_cmd;
1597 td_offset = off->td_offset;
1598 skb = first->skb;
1599
1600 data_len = skb->data_len;
1601 size = skb_headlen(skb);
1602
1603 tx_desc = ICE_TX_DESC(tx_ring, i);
1604
1605 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1606 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1607 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1608 ICE_TX_FLAGS_VLAN_S;
1609 }
1610
1611 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1612
1613 tx_buf = first;
1614
1615 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1616 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1617
1618 if (dma_mapping_error(tx_ring->dev, dma))
1619 goto dma_error;
1620
1621
1622 dma_unmap_len_set(tx_buf, len, size);
1623 dma_unmap_addr_set(tx_buf, dma, dma);
1624
1625
1626 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1627 tx_desc->buf_addr = cpu_to_le64(dma);
1628
1629
1630
1631
1632 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1633 tx_desc->cmd_type_offset_bsz =
1634 ice_build_ctob(td_cmd, td_offset, max_data,
1635 td_tag);
1636
1637 tx_desc++;
1638 i++;
1639
1640 if (i == tx_ring->count) {
1641 tx_desc = ICE_TX_DESC(tx_ring, 0);
1642 i = 0;
1643 }
1644
1645 dma += max_data;
1646 size -= max_data;
1647
1648 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1649 tx_desc->buf_addr = cpu_to_le64(dma);
1650 }
1651
1652 if (likely(!data_len))
1653 break;
1654
1655 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1656 size, td_tag);
1657
1658 tx_desc++;
1659 i++;
1660
1661 if (i == tx_ring->count) {
1662 tx_desc = ICE_TX_DESC(tx_ring, 0);
1663 i = 0;
1664 }
1665
1666 size = skb_frag_size(frag);
1667 data_len -= size;
1668
1669 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1670 DMA_TO_DEVICE);
1671
1672 tx_buf = &tx_ring->tx_buf[i];
1673 }
1674
1675
1676 skb_tx_timestamp(first->skb);
1677
1678 i++;
1679 if (i == tx_ring->count)
1680 i = 0;
1681
1682
1683 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1684 tx_desc->cmd_type_offset_bsz =
1685 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1686
1687
1688
1689
1690
1691
1692
1693 wmb();
1694
1695
1696 first->next_to_watch = tx_desc;
1697
1698 tx_ring->next_to_use = i;
1699
1700 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1701
1702
1703 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1704 netdev_xmit_more());
1705 if (kick)
1706
1707 writel(i, tx_ring->tail);
1708
1709 return;
1710
1711 dma_error:
1712
1713 for (;;) {
1714 tx_buf = &tx_ring->tx_buf[i];
1715 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1716 if (tx_buf == first)
1717 break;
1718 if (i == 0)
1719 i = tx_ring->count;
1720 i--;
1721 }
1722
1723 tx_ring->next_to_use = i;
1724 }
1725
1726
1727
1728
1729
1730
1731
1732
1733 static
1734 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1735 {
1736 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1737 struct sk_buff *skb = first->skb;
1738 union {
1739 struct iphdr *v4;
1740 struct ipv6hdr *v6;
1741 unsigned char *hdr;
1742 } ip;
1743 union {
1744 struct tcphdr *tcp;
1745 unsigned char *hdr;
1746 } l4;
1747 __be16 frag_off, protocol;
1748 unsigned char *exthdr;
1749 u32 offset, cmd = 0;
1750 u8 l4_proto = 0;
1751
1752 if (skb->ip_summed != CHECKSUM_PARTIAL)
1753 return 0;
1754
1755 protocol = vlan_get_protocol(skb);
1756
1757 if (eth_p_mpls(protocol)) {
1758 ip.hdr = skb_inner_network_header(skb);
1759 l4.hdr = skb_checksum_start(skb);
1760 } else {
1761 ip.hdr = skb_network_header(skb);
1762 l4.hdr = skb_transport_header(skb);
1763 }
1764
1765
1766 l2_len = ip.hdr - skb->data;
1767 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1768
1769
1770
1771
1772 if (ip.v4->version == 4)
1773 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1774 else if (ip.v6->version == 6)
1775 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1776
1777 if (skb->encapsulation) {
1778 bool gso_ena = false;
1779 u32 tunnel = 0;
1780
1781
1782 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1783 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1784 ICE_TX_CTX_EIPT_IPV4 :
1785 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1786 l4_proto = ip.v4->protocol;
1787 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1788 int ret;
1789
1790 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1791 exthdr = ip.hdr + sizeof(*ip.v6);
1792 l4_proto = ip.v6->nexthdr;
1793 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1794 &l4_proto, &frag_off);
1795 if (ret < 0)
1796 return -1;
1797 }
1798
1799
1800 switch (l4_proto) {
1801 case IPPROTO_UDP:
1802 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1803 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1804 break;
1805 case IPPROTO_GRE:
1806 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1807 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1808 break;
1809 case IPPROTO_IPIP:
1810 case IPPROTO_IPV6:
1811 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1812 l4.hdr = skb_inner_network_header(skb);
1813 break;
1814 default:
1815 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1816 return -1;
1817
1818 skb_checksum_help(skb);
1819 return 0;
1820 }
1821
1822
1823 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1824 ICE_TXD_CTX_QW0_EIPLEN_S;
1825
1826
1827 ip.hdr = skb_inner_network_header(skb);
1828
1829
1830 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1831 ICE_TXD_CTX_QW0_NATLEN_S;
1832
1833 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1834
1835 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1836 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1837 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1838
1839
1840 off->cd_tunnel_params |= tunnel;
1841
1842
1843
1844
1845 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1846
1847
1848 l4.hdr = skb_inner_transport_header(skb);
1849 l4_proto = 0;
1850
1851
1852 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1853 if (ip.v4->version == 4)
1854 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1855 if (ip.v6->version == 6)
1856 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1857 }
1858
1859
1860 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1861 l4_proto = ip.v4->protocol;
1862
1863
1864
1865 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1866 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1867 else
1868 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1869
1870 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1871 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1872 exthdr = ip.hdr + sizeof(*ip.v6);
1873 l4_proto = ip.v6->nexthdr;
1874 if (l4.hdr != exthdr)
1875 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1876 &frag_off);
1877 } else {
1878 return -1;
1879 }
1880
1881
1882 l3_len = l4.hdr - ip.hdr;
1883 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1884
1885
1886 switch (l4_proto) {
1887 case IPPROTO_TCP:
1888
1889 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1890 l4_len = l4.tcp->doff;
1891 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1892 break;
1893 case IPPROTO_UDP:
1894
1895 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1896 l4_len = (sizeof(struct udphdr) >> 2);
1897 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1898 break;
1899 case IPPROTO_SCTP:
1900
1901 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1902 l4_len = sizeof(struct sctphdr) >> 2;
1903 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1904 break;
1905
1906 default:
1907 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1908 return -1;
1909 skb_checksum_help(skb);
1910 return 0;
1911 }
1912
1913 off->td_cmd |= cmd;
1914 off->td_offset |= offset;
1915 return 1;
1916 }
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926 static void
1927 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1928 {
1929 struct sk_buff *skb = first->skb;
1930
1931
1932 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1933 return;
1934
1935
1936
1937
1938
1939 if (skb_vlan_tag_present(skb)) {
1940 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1941 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
1942 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1943 else
1944 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1945 }
1946
1947 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1948 }
1949
1950
1951
1952
1953
1954
1955
1956
1957 static
1958 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1959 {
1960 struct sk_buff *skb = first->skb;
1961 union {
1962 struct iphdr *v4;
1963 struct ipv6hdr *v6;
1964 unsigned char *hdr;
1965 } ip;
1966 union {
1967 struct tcphdr *tcp;
1968 struct udphdr *udp;
1969 unsigned char *hdr;
1970 } l4;
1971 u64 cd_mss, cd_tso_len;
1972 __be16 protocol;
1973 u32 paylen;
1974 u8 l4_start;
1975 int err;
1976
1977 if (skb->ip_summed != CHECKSUM_PARTIAL)
1978 return 0;
1979
1980 if (!skb_is_gso(skb))
1981 return 0;
1982
1983 err = skb_cow_head(skb, 0);
1984 if (err < 0)
1985 return err;
1986
1987
1988 protocol = vlan_get_protocol(skb);
1989
1990 if (eth_p_mpls(protocol))
1991 ip.hdr = skb_inner_network_header(skb);
1992 else
1993 ip.hdr = skb_network_header(skb);
1994 l4.hdr = skb_checksum_start(skb);
1995
1996
1997 if (ip.v4->version == 4) {
1998 ip.v4->tot_len = 0;
1999 ip.v4->check = 0;
2000 } else {
2001 ip.v6->payload_len = 0;
2002 }
2003
2004 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2005 SKB_GSO_GRE_CSUM |
2006 SKB_GSO_IPXIP4 |
2007 SKB_GSO_IPXIP6 |
2008 SKB_GSO_UDP_TUNNEL |
2009 SKB_GSO_UDP_TUNNEL_CSUM)) {
2010 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2011 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2012 l4.udp->len = 0;
2013
2014
2015 l4_start = (u8)(l4.hdr - skb->data);
2016
2017
2018 paylen = skb->len - l4_start;
2019 csum_replace_by_diff(&l4.udp->check,
2020 (__force __wsum)htonl(paylen));
2021 }
2022
2023
2024
2025
2026 ip.hdr = skb_inner_network_header(skb);
2027 l4.hdr = skb_inner_transport_header(skb);
2028
2029
2030 if (ip.v4->version == 4) {
2031 ip.v4->tot_len = 0;
2032 ip.v4->check = 0;
2033 } else {
2034 ip.v6->payload_len = 0;
2035 }
2036 }
2037
2038
2039 l4_start = (u8)(l4.hdr - skb->data);
2040
2041
2042 paylen = skb->len - l4_start;
2043
2044 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2045 csum_replace_by_diff(&l4.udp->check,
2046 (__force __wsum)htonl(paylen));
2047
2048 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2049 } else {
2050 csum_replace_by_diff(&l4.tcp->check,
2051 (__force __wsum)htonl(paylen));
2052
2053 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2054 }
2055
2056
2057 first->gso_segs = skb_shinfo(skb)->gso_segs;
2058 first->bytecount += (first->gso_segs - 1) * off->header_len;
2059
2060 cd_tso_len = skb->len - off->header_len;
2061 cd_mss = skb_shinfo(skb)->gso_size;
2062
2063
2064 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2065 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2066 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2067 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2068 first->tx_flags |= ICE_TX_FLAGS_TSO;
2069 return 1;
2070 }
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100 static unsigned int ice_txd_use_count(unsigned int size)
2101 {
2102 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2103 }
2104
2105
2106
2107
2108
2109
2110
2111 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2112 {
2113 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2114 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2115 unsigned int count = 0, size = skb_headlen(skb);
2116
2117 for (;;) {
2118 count += ice_txd_use_count(size);
2119
2120 if (!nr_frags--)
2121 break;
2122
2123 size = skb_frag_size(frag++);
2124 }
2125
2126 return count;
2127 }
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 static bool __ice_chk_linearize(struct sk_buff *skb)
2143 {
2144 const skb_frag_t *frag, *stale;
2145 int nr_frags, sum;
2146
2147
2148 nr_frags = skb_shinfo(skb)->nr_frags;
2149 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2150 return false;
2151
2152
2153
2154
2155 nr_frags -= ICE_MAX_BUF_TXD - 2;
2156 frag = &skb_shinfo(skb)->frags[0];
2157
2158
2159
2160
2161
2162
2163
2164 sum = 1 - skb_shinfo(skb)->gso_size;
2165
2166
2167 sum += skb_frag_size(frag++);
2168 sum += skb_frag_size(frag++);
2169 sum += skb_frag_size(frag++);
2170 sum += skb_frag_size(frag++);
2171 sum += skb_frag_size(frag++);
2172
2173
2174
2175
2176 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2177 int stale_size = skb_frag_size(stale);
2178
2179 sum += skb_frag_size(frag++);
2180
2181
2182
2183
2184
2185
2186
2187 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2188 int align_pad = -(skb_frag_off(stale)) &
2189 (ICE_MAX_READ_REQ_SIZE - 1);
2190
2191 sum -= align_pad;
2192 stale_size -= align_pad;
2193
2194 do {
2195 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2196 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2197 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2198 }
2199
2200
2201 if (sum < 0)
2202 return true;
2203
2204 if (!nr_frags--)
2205 break;
2206
2207 sum -= stale_size;
2208 }
2209
2210 return false;
2211 }
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2223 {
2224
2225 if (likely(count < ICE_MAX_BUF_TXD))
2226 return false;
2227
2228 if (skb_is_gso(skb))
2229 return __ice_chk_linearize(skb);
2230
2231
2232 return count != ICE_MAX_BUF_TXD;
2233 }
2234
2235
2236
2237
2238
2239
2240
2241
2242 static void
2243 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2244 struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2245 {
2246 s8 idx;
2247
2248
2249 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2250 return;
2251
2252 if (!tx_ring->ptp_tx)
2253 return;
2254
2255
2256 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2257 return;
2258
2259
2260 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2261 if (idx < 0)
2262 return;
2263
2264 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2265 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2266 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2267 first->tx_flags |= ICE_TX_FLAGS_TSYN;
2268 }
2269
2270
2271
2272
2273
2274
2275
2276
2277 static netdev_tx_t
2278 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2279 {
2280 struct ice_tx_offload_params offload = { 0 };
2281 struct ice_vsi *vsi = tx_ring->vsi;
2282 struct ice_tx_buf *first;
2283 struct ethhdr *eth;
2284 unsigned int count;
2285 int tso, csum;
2286
2287 ice_trace(xmit_frame_ring, tx_ring, skb);
2288
2289 count = ice_xmit_desc_count(skb);
2290 if (ice_chk_linearize(skb, count)) {
2291 if (__skb_linearize(skb))
2292 goto out_drop;
2293 count = ice_txd_use_count(skb->len);
2294 tx_ring->tx_stats.tx_linearize++;
2295 }
2296
2297
2298
2299
2300
2301
2302
2303 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2304 ICE_DESCS_FOR_CTX_DESC)) {
2305 tx_ring->tx_stats.tx_busy++;
2306 return NETDEV_TX_BUSY;
2307 }
2308
2309
2310 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2311
2312 offload.tx_ring = tx_ring;
2313
2314
2315 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2316 first->skb = skb;
2317 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2318 first->gso_segs = 1;
2319 first->tx_flags = 0;
2320
2321
2322 ice_tx_prepare_vlan_flags(tx_ring, first);
2323 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2324 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2325 (ICE_TX_CTX_DESC_IL2TAG2 <<
2326 ICE_TXD_CTX_QW1_CMD_S));
2327 offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
2328 ICE_TX_FLAGS_VLAN_S;
2329 }
2330
2331
2332 tso = ice_tso(first, &offload);
2333 if (tso < 0)
2334 goto out_drop;
2335
2336
2337 csum = ice_tx_csum(first, &offload);
2338 if (csum < 0)
2339 goto out_drop;
2340
2341
2342 eth = (struct ethhdr *)skb_mac_header(skb);
2343 if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2344 eth->h_proto == htons(ETH_P_LLDP)) &&
2345 vsi->type == ICE_VSI_PF &&
2346 vsi->port_info->qos_cfg.is_sw_lldp))
2347 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2348 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2349 ICE_TXD_CTX_QW1_CMD_S);
2350
2351 ice_tstamp(tx_ring, skb, first, &offload);
2352 if (ice_is_switchdev_running(vsi->back))
2353 ice_eswitch_set_target_vsi(skb, &offload);
2354
2355 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2356 struct ice_tx_ctx_desc *cdesc;
2357 u16 i = tx_ring->next_to_use;
2358
2359
2360 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2361 i++;
2362 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2363
2364
2365 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2366 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2367 cdesc->rsvd = cpu_to_le16(0);
2368 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2369 }
2370
2371 ice_tx_map(tx_ring, first, &offload);
2372 return NETDEV_TX_OK;
2373
2374 out_drop:
2375 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2376 dev_kfree_skb_any(skb);
2377 return NETDEV_TX_OK;
2378 }
2379
2380
2381
2382
2383
2384
2385
2386
2387 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2388 {
2389 struct ice_netdev_priv *np = netdev_priv(netdev);
2390 struct ice_vsi *vsi = np->vsi;
2391 struct ice_tx_ring *tx_ring;
2392
2393 tx_ring = vsi->tx_rings[skb->queue_mapping];
2394
2395
2396
2397
2398 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2399 return NETDEV_TX_OK;
2400
2401 return ice_xmit_frame_ring(skb, tx_ring);
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2412 {
2413 u8 dscp = 0;
2414
2415 if (skb->protocol == htons(ETH_P_IP))
2416 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2417 else if (skb->protocol == htons(ETH_P_IPV6))
2418 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2419
2420 return dcbcfg->dscp_map[dscp];
2421 }
2422
2423 u16
2424 ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2425 struct net_device *sb_dev)
2426 {
2427 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2428 struct ice_dcbx_cfg *dcbcfg;
2429
2430 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2431 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2432 skb->priority = ice_get_dscp_up(dcbcfg, skb);
2433
2434 return netdev_pick_tx(netdev, skb, sb_dev);
2435 }
2436
2437
2438
2439
2440
2441 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2442 {
2443 struct ice_vsi *vsi = tx_ring->vsi;
2444 s16 i = tx_ring->next_to_clean;
2445 int budget = ICE_DFLT_IRQ_WORK;
2446 struct ice_tx_desc *tx_desc;
2447 struct ice_tx_buf *tx_buf;
2448
2449 tx_buf = &tx_ring->tx_buf[i];
2450 tx_desc = ICE_TX_DESC(tx_ring, i);
2451 i -= tx_ring->count;
2452
2453 do {
2454 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2455
2456
2457 if (!eop_desc)
2458 break;
2459
2460
2461 smp_rmb();
2462
2463
2464 if (!(eop_desc->cmd_type_offset_bsz &
2465 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2466 break;
2467
2468
2469 tx_buf->next_to_watch = NULL;
2470 tx_desc->buf_addr = 0;
2471 tx_desc->cmd_type_offset_bsz = 0;
2472
2473
2474 tx_buf++;
2475 tx_desc++;
2476 i++;
2477 if (unlikely(!i)) {
2478 i -= tx_ring->count;
2479 tx_buf = tx_ring->tx_buf;
2480 tx_desc = ICE_TX_DESC(tx_ring, 0);
2481 }
2482
2483
2484 if (dma_unmap_len(tx_buf, len))
2485 dma_unmap_single(tx_ring->dev,
2486 dma_unmap_addr(tx_buf, dma),
2487 dma_unmap_len(tx_buf, len),
2488 DMA_TO_DEVICE);
2489 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2490 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2491
2492
2493 tx_buf->raw_buf = NULL;
2494 tx_buf->tx_flags = 0;
2495 tx_buf->next_to_watch = NULL;
2496 dma_unmap_len_set(tx_buf, len, 0);
2497 tx_desc->buf_addr = 0;
2498 tx_desc->cmd_type_offset_bsz = 0;
2499
2500
2501 tx_buf++;
2502 tx_desc++;
2503 i++;
2504 if (unlikely(!i)) {
2505 i -= tx_ring->count;
2506 tx_buf = tx_ring->tx_buf;
2507 tx_desc = ICE_TX_DESC(tx_ring, 0);
2508 }
2509
2510 budget--;
2511 } while (likely(budget));
2512
2513 i += tx_ring->count;
2514 tx_ring->next_to_clean = i;
2515
2516
2517 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2518 }