0001
0002
0003
0004
0005
0006
0007 #include "gve.h"
0008 #include "gve_adminq.h"
0009 #include "gve_utils.h"
0010 #include <linux/ip.h>
0011 #include <linux/tcp.h>
0012 #include <linux/vmalloc.h>
0013 #include <linux/skbuff.h>
0014
0015 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
0016 struct gve_queue_resources *q_resources,
0017 u32 val)
0018 {
0019 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
0020 }
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
0031 {
0032 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
0033 PAGE_KERNEL);
0034 if (unlikely(!fifo->base)) {
0035 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
0036 fifo->qpl->id);
0037 return -ENOMEM;
0038 }
0039
0040 fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
0041 atomic_set(&fifo->available, fifo->size);
0042 fifo->head = 0;
0043 return 0;
0044 }
0045
0046 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
0047 {
0048 WARN(atomic_read(&fifo->available) != fifo->size,
0049 "Releasing non-empty fifo");
0050
0051 vunmap(fifo->base);
0052 }
0053
0054 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
0055 size_t bytes)
0056 {
0057 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
0058 }
0059
0060 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
0061 {
0062 return (atomic_read(&fifo->available) <= bytes) ? false : true;
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
0076 struct gve_tx_iovec iov[2])
0077 {
0078 size_t overflow, padding;
0079 u32 aligned_head;
0080 int nfrags = 0;
0081
0082 if (!bytes)
0083 return 0;
0084
0085
0086
0087
0088
0089
0090
0091 WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
0092 "Reached %s when there's not enough space in the fifo", __func__);
0093
0094 nfrags++;
0095
0096 iov[0].iov_offset = fifo->head;
0097 iov[0].iov_len = bytes;
0098 fifo->head += bytes;
0099
0100 if (fifo->head > fifo->size) {
0101
0102
0103
0104 nfrags++;
0105 overflow = fifo->head - fifo->size;
0106 iov[0].iov_len -= overflow;
0107 iov[1].iov_offset = 0;
0108 iov[1].iov_len = overflow;
0109
0110 fifo->head = overflow;
0111 }
0112
0113
0114 aligned_head = L1_CACHE_ALIGN(fifo->head);
0115 padding = aligned_head - fifo->head;
0116 iov[nfrags - 1].iov_padding = padding;
0117 atomic_sub(bytes + padding, &fifo->available);
0118 fifo->head = aligned_head;
0119
0120 if (fifo->head == fifo->size)
0121 fifo->head = 0;
0122
0123 return nfrags;
0124 }
0125
0126
0127
0128
0129
0130 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
0131 {
0132 atomic_add(bytes, &fifo->available);
0133 }
0134
0135 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
0136 u32 to_do, bool try_to_wake);
0137
0138 static void gve_tx_free_ring(struct gve_priv *priv, int idx)
0139 {
0140 struct gve_tx_ring *tx = &priv->tx[idx];
0141 struct device *hdev = &priv->pdev->dev;
0142 size_t bytes;
0143 u32 slots;
0144
0145 gve_tx_remove_from_block(priv, idx);
0146 slots = tx->mask + 1;
0147 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
0148 netdev_tx_reset_queue(tx->netdev_txq);
0149
0150 dma_free_coherent(hdev, sizeof(*tx->q_resources),
0151 tx->q_resources, tx->q_resources_bus);
0152 tx->q_resources = NULL;
0153
0154 if (!tx->raw_addressing) {
0155 gve_tx_fifo_release(priv, &tx->tx_fifo);
0156 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
0157 tx->tx_fifo.qpl = NULL;
0158 }
0159
0160 bytes = sizeof(*tx->desc) * slots;
0161 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
0162 tx->desc = NULL;
0163
0164 vfree(tx->info);
0165 tx->info = NULL;
0166
0167 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
0168 }
0169
0170 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
0171 {
0172 struct gve_tx_ring *tx = &priv->tx[idx];
0173 struct device *hdev = &priv->pdev->dev;
0174 u32 slots = priv->tx_desc_cnt;
0175 size_t bytes;
0176
0177
0178 memset(tx, 0, sizeof(*tx));
0179 spin_lock_init(&tx->clean_lock);
0180 tx->q_num = idx;
0181
0182 tx->mask = slots - 1;
0183
0184
0185 tx->info = vzalloc(sizeof(*tx->info) * slots);
0186 if (!tx->info)
0187 return -ENOMEM;
0188
0189
0190 bytes = sizeof(*tx->desc) * slots;
0191 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
0192 if (!tx->desc)
0193 goto abort_with_info;
0194
0195 tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
0196 tx->dev = &priv->pdev->dev;
0197 if (!tx->raw_addressing) {
0198 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
0199 if (!tx->tx_fifo.qpl)
0200 goto abort_with_desc;
0201
0202 if (gve_tx_fifo_init(priv, &tx->tx_fifo))
0203 goto abort_with_qpl;
0204 }
0205
0206 tx->q_resources =
0207 dma_alloc_coherent(hdev,
0208 sizeof(*tx->q_resources),
0209 &tx->q_resources_bus,
0210 GFP_KERNEL);
0211 if (!tx->q_resources)
0212 goto abort_with_fifo;
0213
0214 netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
0215 (unsigned long)tx->bus);
0216 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
0217 gve_tx_add_to_block(priv, idx);
0218
0219 return 0;
0220
0221 abort_with_fifo:
0222 if (!tx->raw_addressing)
0223 gve_tx_fifo_release(priv, &tx->tx_fifo);
0224 abort_with_qpl:
0225 if (!tx->raw_addressing)
0226 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
0227 abort_with_desc:
0228 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
0229 tx->desc = NULL;
0230 abort_with_info:
0231 vfree(tx->info);
0232 tx->info = NULL;
0233 return -ENOMEM;
0234 }
0235
0236 int gve_tx_alloc_rings(struct gve_priv *priv)
0237 {
0238 int err = 0;
0239 int i;
0240
0241 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
0242 err = gve_tx_alloc_ring(priv, i);
0243 if (err) {
0244 netif_err(priv, drv, priv->dev,
0245 "Failed to alloc tx ring=%d: err=%d\n",
0246 i, err);
0247 break;
0248 }
0249 }
0250
0251 if (err) {
0252 int j;
0253
0254 for (j = 0; j < i; j++)
0255 gve_tx_free_ring(priv, j);
0256 }
0257 return err;
0258 }
0259
0260 void gve_tx_free_rings_gqi(struct gve_priv *priv)
0261 {
0262 int i;
0263
0264 for (i = 0; i < priv->tx_cfg.num_queues; i++)
0265 gve_tx_free_ring(priv, i);
0266 }
0267
0268
0269
0270
0271
0272
0273
0274
0275 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
0276 {
0277 return tx->mask + 1 - (tx->req - tx->done);
0278 }
0279
0280 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
0281 struct sk_buff *skb)
0282 {
0283 int pad_bytes, align_hdr_pad;
0284 int bytes;
0285 int hlen;
0286
0287 hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
0288 tcp_hdrlen(skb) : skb_headlen(skb);
0289
0290 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
0291 hlen);
0292
0293 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
0294 bytes = align_hdr_pad + pad_bytes + skb->len;
0295
0296 return bytes;
0297 }
0298
0299
0300
0301
0302
0303
0304
0305
0306 #define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
0307 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
0308 {
0309 if (info->skb) {
0310 dma_unmap_single(dev, dma_unmap_addr(info, dma),
0311 dma_unmap_len(info, len),
0312 DMA_TO_DEVICE);
0313 dma_unmap_len_set(info, len, 0);
0314 } else {
0315 dma_unmap_page(dev, dma_unmap_addr(info, dma),
0316 dma_unmap_len(info, len),
0317 DMA_TO_DEVICE);
0318 dma_unmap_len_set(info, len, 0);
0319 }
0320 }
0321
0322
0323
0324
0325 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
0326 {
0327 bool can_alloc = true;
0328
0329 if (!tx->raw_addressing)
0330 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
0331
0332 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
0333 }
0334
0335 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
0336
0337
0338 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
0339 struct sk_buff *skb)
0340 {
0341 int bytes_required = 0;
0342 u32 nic_done;
0343 u32 to_do;
0344 int ret;
0345
0346 if (!tx->raw_addressing)
0347 bytes_required = gve_skb_fifo_bytes_required(tx, skb);
0348
0349 if (likely(gve_can_tx(tx, bytes_required)))
0350 return 0;
0351
0352 ret = -EBUSY;
0353 spin_lock(&tx->clean_lock);
0354 nic_done = gve_tx_load_event_counter(priv, tx);
0355 to_do = nic_done - tx->done;
0356
0357
0358 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
0359 if (to_do > 0) {
0360 to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
0361 gve_clean_tx_done(priv, tx, to_do, false);
0362 }
0363 if (likely(gve_can_tx(tx, bytes_required)))
0364 ret = 0;
0365 }
0366 if (ret) {
0367
0368 tx->stop_queue++;
0369 netif_tx_stop_queue(tx->netdev_txq);
0370 }
0371 spin_unlock(&tx->clean_lock);
0372
0373 return ret;
0374 }
0375
0376 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
0377 struct sk_buff *skb, bool is_gso,
0378 int l4_hdr_offset, u32 desc_cnt,
0379 u16 hlen, u64 addr)
0380 {
0381
0382 if (is_gso) {
0383 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
0384 pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
0385 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
0386 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
0387 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
0388 pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
0389 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
0390 } else {
0391 pkt_desc->pkt.type_flags = GVE_TXD_STD;
0392 pkt_desc->pkt.l4_csum_offset = 0;
0393 pkt_desc->pkt.l4_hdr_offset = 0;
0394 }
0395 pkt_desc->pkt.desc_cnt = desc_cnt;
0396 pkt_desc->pkt.len = cpu_to_be16(skb->len);
0397 pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
0398 pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
0399 }
0400
0401 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
0402 struct sk_buff *skb)
0403 {
0404 BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
0405
0406 mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
0407 mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
0408 GVE_MTD_PATH_HASH_L4;
0409 mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
0410 mtd_desc->mtd.reserved0 = 0;
0411 mtd_desc->mtd.reserved1 = 0;
0412 }
0413
0414 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
0415 struct sk_buff *skb, bool is_gso,
0416 u16 len, u64 addr)
0417 {
0418 seg_desc->seg.type_flags = GVE_TXD_SEG;
0419 if (is_gso) {
0420 if (skb_is_gso_v6(skb))
0421 seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
0422 seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
0423 seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
0424 }
0425 seg_desc->seg.seg_len = cpu_to_be16(len);
0426 seg_desc->seg.seg_addr = cpu_to_be64(addr);
0427 }
0428
0429 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
0430 u64 iov_offset, u64 iov_len)
0431 {
0432 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
0433 u64 first_page = iov_offset / PAGE_SIZE;
0434 u64 page;
0435
0436 for (page = first_page; page <= last_page; page++)
0437 dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
0438 }
0439
0440 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
0441 {
0442 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
0443 union gve_tx_desc *pkt_desc, *seg_desc;
0444 struct gve_tx_buffer_state *info;
0445 int mtd_desc_nr = !!skb->l4_hash;
0446 bool is_gso = skb_is_gso(skb);
0447 u32 idx = tx->req & tx->mask;
0448 int payload_iov = 2;
0449 int copy_offset;
0450 u32 next_idx;
0451 int i;
0452
0453 info = &tx->info[idx];
0454 pkt_desc = &tx->desc[idx];
0455
0456 l4_hdr_offset = skb_checksum_start_offset(skb);
0457
0458
0459
0460
0461
0462 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
0463 skb_headlen(skb);
0464
0465 info->skb = skb;
0466
0467
0468
0469 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
0470 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
0471 &info->iov[0]);
0472 WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
0473 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
0474 &info->iov[payload_iov]);
0475
0476 gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
0477 1 + mtd_desc_nr + payload_nfrags, hlen,
0478 info->iov[hdr_nfrags - 1].iov_offset);
0479
0480 skb_copy_bits(skb, 0,
0481 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
0482 hlen);
0483 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
0484 info->iov[hdr_nfrags - 1].iov_offset,
0485 info->iov[hdr_nfrags - 1].iov_len);
0486 copy_offset = hlen;
0487
0488 if (mtd_desc_nr) {
0489 next_idx = (tx->req + 1) & tx->mask;
0490 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
0491 }
0492
0493 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
0494 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
0495 seg_desc = &tx->desc[next_idx];
0496
0497 gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
0498 info->iov[i].iov_len,
0499 info->iov[i].iov_offset);
0500
0501 skb_copy_bits(skb, copy_offset,
0502 tx->tx_fifo.base + info->iov[i].iov_offset,
0503 info->iov[i].iov_len);
0504 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
0505 info->iov[i].iov_offset,
0506 info->iov[i].iov_len);
0507 copy_offset += info->iov[i].iov_len;
0508 }
0509
0510 return 1 + mtd_desc_nr + payload_nfrags;
0511 }
0512
0513 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
0514 struct sk_buff *skb)
0515 {
0516 const struct skb_shared_info *shinfo = skb_shinfo(skb);
0517 int hlen, num_descriptors, l4_hdr_offset;
0518 union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
0519 struct gve_tx_buffer_state *info;
0520 int mtd_desc_nr = !!skb->l4_hash;
0521 bool is_gso = skb_is_gso(skb);
0522 u32 idx = tx->req & tx->mask;
0523 u64 addr;
0524 u32 len;
0525 int i;
0526
0527 info = &tx->info[idx];
0528 pkt_desc = &tx->desc[idx];
0529
0530 l4_hdr_offset = skb_checksum_start_offset(skb);
0531
0532
0533
0534
0535
0536 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
0537 len = skb_headlen(skb);
0538
0539 info->skb = skb;
0540
0541 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
0542 if (unlikely(dma_mapping_error(tx->dev, addr))) {
0543 tx->dma_mapping_error++;
0544 goto drop;
0545 }
0546 dma_unmap_len_set(info, len, len);
0547 dma_unmap_addr_set(info, dma, addr);
0548
0549 num_descriptors = 1 + shinfo->nr_frags;
0550 if (hlen < len)
0551 num_descriptors++;
0552 if (mtd_desc_nr)
0553 num_descriptors++;
0554
0555 gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
0556 num_descriptors, hlen, addr);
0557
0558 if (mtd_desc_nr) {
0559 idx = (idx + 1) & tx->mask;
0560 mtd_desc = &tx->desc[idx];
0561 gve_tx_fill_mtd_desc(mtd_desc, skb);
0562 }
0563
0564 if (hlen < len) {
0565
0566
0567
0568 len -= hlen;
0569 addr += hlen;
0570 idx = (idx + 1) & tx->mask;
0571 seg_desc = &tx->desc[idx];
0572 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
0573 }
0574
0575 for (i = 0; i < shinfo->nr_frags; i++) {
0576 const skb_frag_t *frag = &shinfo->frags[i];
0577
0578 idx = (idx + 1) & tx->mask;
0579 seg_desc = &tx->desc[idx];
0580 len = skb_frag_size(frag);
0581 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
0582 if (unlikely(dma_mapping_error(tx->dev, addr))) {
0583 tx->dma_mapping_error++;
0584 goto unmap_drop;
0585 }
0586 tx->info[idx].skb = NULL;
0587 dma_unmap_len_set(&tx->info[idx], len, len);
0588 dma_unmap_addr_set(&tx->info[idx], dma, addr);
0589
0590 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
0591 }
0592
0593 return num_descriptors;
0594
0595 unmap_drop:
0596 i += num_descriptors - shinfo->nr_frags;
0597 while (i--) {
0598
0599 if (i == 1 && mtd_desc_nr == 1)
0600 continue;
0601 idx--;
0602 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
0603 }
0604 drop:
0605 tx->dropped_pkt++;
0606 return 0;
0607 }
0608
0609 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
0610 {
0611 struct gve_priv *priv = netdev_priv(dev);
0612 struct gve_tx_ring *tx;
0613 int nsegs;
0614
0615 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
0616 "skb queue index out of range");
0617 tx = &priv->tx[skb_get_queue_mapping(skb)];
0618 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
0619
0620
0621
0622
0623
0624 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
0625 return NETDEV_TX_BUSY;
0626 }
0627 if (tx->raw_addressing)
0628 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
0629 else
0630 nsegs = gve_tx_add_skb_copy(priv, tx, skb);
0631
0632
0633 if (nsegs) {
0634 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
0635 skb_tx_timestamp(skb);
0636 tx->req += nsegs;
0637 } else {
0638 dev_kfree_skb_any(skb);
0639 }
0640
0641 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
0642 return NETDEV_TX_OK;
0643
0644
0645
0646
0647 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
0648 return NETDEV_TX_OK;
0649 }
0650
0651 #define GVE_TX_START_THRESH PAGE_SIZE
0652
0653 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
0654 u32 to_do, bool try_to_wake)
0655 {
0656 struct gve_tx_buffer_state *info;
0657 u64 pkts = 0, bytes = 0;
0658 size_t space_freed = 0;
0659 struct sk_buff *skb;
0660 int i, j;
0661 u32 idx;
0662
0663 for (j = 0; j < to_do; j++) {
0664 idx = tx->done & tx->mask;
0665 netif_info(priv, tx_done, priv->dev,
0666 "[%d] %s: idx=%d (req=%u done=%u)\n",
0667 tx->q_num, __func__, idx, tx->req, tx->done);
0668 info = &tx->info[idx];
0669 skb = info->skb;
0670
0671
0672 if (tx->raw_addressing)
0673 gve_tx_unmap_buf(tx->dev, info);
0674 tx->done++;
0675
0676 if (skb) {
0677 info->skb = NULL;
0678 bytes += skb->len;
0679 pkts++;
0680 dev_consume_skb_any(skb);
0681 if (tx->raw_addressing)
0682 continue;
0683
0684 for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
0685 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
0686 info->iov[i].iov_len = 0;
0687 info->iov[i].iov_padding = 0;
0688 }
0689 }
0690 }
0691
0692 if (!tx->raw_addressing)
0693 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
0694 u64_stats_update_begin(&tx->statss);
0695 tx->bytes_done += bytes;
0696 tx->pkt_done += pkts;
0697 u64_stats_update_end(&tx->statss);
0698 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
0699
0700
0701 #ifndef CONFIG_BQL
0702
0703 smp_mb();
0704 #endif
0705 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
0706 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
0707 tx->wake_queue++;
0708 netif_tx_wake_queue(tx->netdev_txq);
0709 }
0710
0711 return pkts;
0712 }
0713
0714 u32 gve_tx_load_event_counter(struct gve_priv *priv,
0715 struct gve_tx_ring *tx)
0716 {
0717 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
0718 __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
0719
0720 return be32_to_cpu(counter);
0721 }
0722
0723 bool gve_tx_poll(struct gve_notify_block *block, int budget)
0724 {
0725 struct gve_priv *priv = block->priv;
0726 struct gve_tx_ring *tx = block->tx;
0727 u32 nic_done;
0728 u32 to_do;
0729
0730
0731 if (budget == 0)
0732 budget = INT_MAX;
0733
0734
0735
0736
0737
0738 spin_lock(&tx->clean_lock);
0739
0740 nic_done = gve_tx_load_event_counter(priv, tx);
0741 to_do = min_t(u32, (nic_done - tx->done), budget);
0742 gve_clean_tx_done(priv, tx, to_do, true);
0743 spin_unlock(&tx->clean_lock);
0744
0745 return nic_done != tx->done;
0746 }
0747
0748 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
0749 {
0750 u32 nic_done = gve_tx_load_event_counter(priv, tx);
0751
0752 return nic_done != tx->done;
0753 }