0001
0002
0003
0004
0005
0006
0007 #include "gve.h"
0008 #include "gve_adminq.h"
0009 #include "gve_utils.h"
0010 #include <linux/etherdevice.h>
0011
0012 static void gve_rx_free_buffer(struct device *dev,
0013 struct gve_rx_slot_page_info *page_info,
0014 union gve_rx_data_slot *data_slot)
0015 {
0016 dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
0017 GVE_DATA_SLOT_ADDR_PAGE_MASK);
0018
0019 page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
0020 gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
0021 }
0022
0023 static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
0024 {
0025 u32 slots = rx->mask + 1;
0026 int i;
0027
0028 if (rx->data.raw_addressing) {
0029 for (i = 0; i < slots; i++)
0030 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
0031 &rx->data.data_ring[i]);
0032 } else {
0033 for (i = 0; i < slots; i++)
0034 page_ref_sub(rx->data.page_info[i].page,
0035 rx->data.page_info[i].pagecnt_bias - 1);
0036 gve_unassign_qpl(priv, rx->data.qpl->id);
0037 rx->data.qpl = NULL;
0038 }
0039 kvfree(rx->data.page_info);
0040 rx->data.page_info = NULL;
0041 }
0042
0043 static void gve_rx_free_ring(struct gve_priv *priv, int idx)
0044 {
0045 struct gve_rx_ring *rx = &priv->rx[idx];
0046 struct device *dev = &priv->pdev->dev;
0047 u32 slots = rx->mask + 1;
0048 size_t bytes;
0049
0050 gve_rx_remove_from_block(priv, idx);
0051
0052 bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
0053 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
0054 rx->desc.desc_ring = NULL;
0055
0056 dma_free_coherent(dev, sizeof(*rx->q_resources),
0057 rx->q_resources, rx->q_resources_bus);
0058 rx->q_resources = NULL;
0059
0060 gve_rx_unfill_pages(priv, rx);
0061
0062 bytes = sizeof(*rx->data.data_ring) * slots;
0063 dma_free_coherent(dev, bytes, rx->data.data_ring,
0064 rx->data.data_bus);
0065 rx->data.data_ring = NULL;
0066 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
0067 }
0068
0069 static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
0070 dma_addr_t addr, struct page *page, __be64 *slot_addr)
0071 {
0072 page_info->page = page;
0073 page_info->page_offset = 0;
0074 page_info->page_address = page_address(page);
0075 *slot_addr = cpu_to_be64(addr);
0076
0077 page_ref_add(page, INT_MAX - 1);
0078 page_info->pagecnt_bias = INT_MAX;
0079 }
0080
0081 static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
0082 struct gve_rx_slot_page_info *page_info,
0083 union gve_rx_data_slot *data_slot)
0084 {
0085 struct page *page;
0086 dma_addr_t dma;
0087 int err;
0088
0089 err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
0090 GFP_ATOMIC);
0091 if (err)
0092 return err;
0093
0094 gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
0095 return 0;
0096 }
0097
0098 static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
0099 {
0100 struct gve_priv *priv = rx->gve;
0101 u32 slots;
0102 int err;
0103 int i;
0104
0105
0106
0107
0108 slots = rx->mask + 1;
0109
0110 rx->data.page_info = kvzalloc(slots *
0111 sizeof(*rx->data.page_info), GFP_KERNEL);
0112 if (!rx->data.page_info)
0113 return -ENOMEM;
0114
0115 if (!rx->data.raw_addressing) {
0116 rx->data.qpl = gve_assign_rx_qpl(priv);
0117 if (!rx->data.qpl) {
0118 kvfree(rx->data.page_info);
0119 rx->data.page_info = NULL;
0120 return -ENOMEM;
0121 }
0122 }
0123 for (i = 0; i < slots; i++) {
0124 if (!rx->data.raw_addressing) {
0125 struct page *page = rx->data.qpl->pages[i];
0126 dma_addr_t addr = i * PAGE_SIZE;
0127
0128 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
0129 &rx->data.data_ring[i].qpl_offset);
0130 continue;
0131 }
0132 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
0133 &rx->data.data_ring[i]);
0134 if (err)
0135 goto alloc_err;
0136 }
0137
0138 return slots;
0139 alloc_err:
0140 while (i--)
0141 gve_rx_free_buffer(&priv->pdev->dev,
0142 &rx->data.page_info[i],
0143 &rx->data.data_ring[i]);
0144 return err;
0145 }
0146
0147 static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
0148 {
0149 ctx->curr_frag_cnt = 0;
0150 ctx->total_expected_size = 0;
0151 ctx->expected_frag_cnt = 0;
0152 ctx->skb_head = NULL;
0153 ctx->skb_tail = NULL;
0154 ctx->reuse_frags = false;
0155 }
0156
0157 static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
0158 {
0159 struct gve_rx_ring *rx = &priv->rx[idx];
0160 struct device *hdev = &priv->pdev->dev;
0161 u32 slots, npages;
0162 int filled_pages;
0163 size_t bytes;
0164 int err;
0165
0166 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
0167
0168 memset(rx, 0, sizeof(*rx));
0169
0170 rx->gve = priv;
0171 rx->q_num = idx;
0172
0173 slots = priv->rx_data_slot_cnt;
0174 rx->mask = slots - 1;
0175 rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
0176
0177
0178 bytes = sizeof(*rx->data.data_ring) * slots;
0179 rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
0180 &rx->data.data_bus,
0181 GFP_KERNEL);
0182 if (!rx->data.data_ring)
0183 return -ENOMEM;
0184 filled_pages = gve_prefill_rx_pages(rx);
0185 if (filled_pages < 0) {
0186 err = -ENOMEM;
0187 goto abort_with_slots;
0188 }
0189 rx->fill_cnt = filled_pages;
0190
0191 dma_wmb();
0192
0193
0194 rx->q_resources =
0195 dma_alloc_coherent(hdev,
0196 sizeof(*rx->q_resources),
0197 &rx->q_resources_bus,
0198 GFP_KERNEL);
0199 if (!rx->q_resources) {
0200 err = -ENOMEM;
0201 goto abort_filled;
0202 }
0203 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx,
0204 (unsigned long)rx->data.data_bus);
0205
0206
0207 bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
0208 npages = bytes / PAGE_SIZE;
0209 if (npages * PAGE_SIZE != bytes) {
0210 err = -EIO;
0211 goto abort_with_q_resources;
0212 }
0213
0214 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
0215 GFP_KERNEL);
0216 if (!rx->desc.desc_ring) {
0217 err = -ENOMEM;
0218 goto abort_with_q_resources;
0219 }
0220 rx->cnt = 0;
0221 rx->db_threshold = priv->rx_desc_cnt / 2;
0222 rx->desc.seqno = 1;
0223
0224
0225
0226
0227 rx->packet_buffer_size = PAGE_SIZE / 2;
0228 gve_rx_ctx_clear(&rx->ctx);
0229 gve_rx_add_to_block(priv, idx);
0230
0231 return 0;
0232
0233 abort_with_q_resources:
0234 dma_free_coherent(hdev, sizeof(*rx->q_resources),
0235 rx->q_resources, rx->q_resources_bus);
0236 rx->q_resources = NULL;
0237 abort_filled:
0238 gve_rx_unfill_pages(priv, rx);
0239 abort_with_slots:
0240 bytes = sizeof(*rx->data.data_ring) * slots;
0241 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
0242 rx->data.data_ring = NULL;
0243
0244 return err;
0245 }
0246
0247 int gve_rx_alloc_rings(struct gve_priv *priv)
0248 {
0249 int err = 0;
0250 int i;
0251
0252 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
0253 err = gve_rx_alloc_ring(priv, i);
0254 if (err) {
0255 netif_err(priv, drv, priv->dev,
0256 "Failed to alloc rx ring=%d: err=%d\n",
0257 i, err);
0258 break;
0259 }
0260 }
0261
0262 if (err) {
0263 int j;
0264
0265 for (j = 0; j < i; j++)
0266 gve_rx_free_ring(priv, j);
0267 }
0268 return err;
0269 }
0270
0271 void gve_rx_free_rings_gqi(struct gve_priv *priv)
0272 {
0273 int i;
0274
0275 for (i = 0; i < priv->rx_cfg.num_queues; i++)
0276 gve_rx_free_ring(priv, i);
0277 }
0278
0279 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
0280 {
0281 u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
0282
0283 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
0284 }
0285
0286 static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
0287 {
0288 if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP)))
0289 return PKT_HASH_TYPE_L4;
0290 if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
0291 return PKT_HASH_TYPE_L3;
0292 return PKT_HASH_TYPE_L2;
0293 }
0294
0295 static u16 gve_rx_ctx_padding(struct gve_rx_ctx *ctx)
0296 {
0297 return (ctx->curr_frag_cnt == 0) ? GVE_RX_PAD : 0;
0298 }
0299
0300 static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
0301 struct gve_rx_slot_page_info *page_info,
0302 u16 packet_buffer_size, u16 len,
0303 struct gve_rx_ctx *ctx)
0304 {
0305 u32 offset = page_info->page_offset + gve_rx_ctx_padding(ctx);
0306 struct sk_buff *skb;
0307
0308 if (!ctx->skb_head)
0309 ctx->skb_head = napi_get_frags(napi);
0310
0311 if (unlikely(!ctx->skb_head))
0312 return NULL;
0313
0314 skb = ctx->skb_head;
0315 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
0316 offset, len, packet_buffer_size);
0317
0318 return skb;
0319 }
0320
0321 static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
0322 {
0323 const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
0324
0325
0326 page_info->page_offset ^= PAGE_SIZE / 2;
0327 *(slot_addr) ^= offset;
0328 }
0329
0330 static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
0331 {
0332 int pagecount = page_count(page_info->page);
0333
0334
0335 if (pagecount == page_info->pagecnt_bias)
0336 return 1;
0337
0338 else if (pagecount > page_info->pagecnt_bias)
0339 return 0;
0340 WARN(pagecount < page_info->pagecnt_bias,
0341 "Pagecount should never be less than the bias.");
0342 return -1;
0343 }
0344
0345 static struct sk_buff *
0346 gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
0347 struct gve_rx_slot_page_info *page_info, u16 len,
0348 struct napi_struct *napi,
0349 union gve_rx_data_slot *data_slot,
0350 u16 packet_buffer_size, struct gve_rx_ctx *ctx)
0351 {
0352 struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx);
0353
0354 if (!skb)
0355 return NULL;
0356
0357
0358
0359
0360
0361 gve_dec_pagecnt_bias(page_info);
0362
0363 return skb;
0364 }
0365
0366 static struct sk_buff *
0367 gve_rx_qpl(struct device *dev, struct net_device *netdev,
0368 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
0369 u16 len, struct napi_struct *napi,
0370 union gve_rx_data_slot *data_slot)
0371 {
0372 struct gve_rx_ctx *ctx = &rx->ctx;
0373 struct sk_buff *skb;
0374
0375
0376
0377
0378
0379
0380 if (ctx->reuse_frags) {
0381 skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
0382
0383 if (skb) {
0384
0385 gve_dec_pagecnt_bias(page_info);
0386 gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
0387 }
0388 } else {
0389 const u16 padding = gve_rx_ctx_padding(ctx);
0390
0391 skb = gve_rx_copy(netdev, napi, page_info, len, padding, ctx);
0392 if (skb) {
0393 u64_stats_update_begin(&rx->statss);
0394 rx->rx_frag_copy_cnt++;
0395 u64_stats_update_end(&rx->statss);
0396 }
0397 }
0398 return skb;
0399 }
0400
0401 #define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
0402 static u16 gve_rx_get_fragment_size(struct gve_rx_ctx *ctx, struct gve_rx_desc *desc)
0403 {
0404 return be16_to_cpu(desc->len) - gve_rx_ctx_padding(ctx);
0405 }
0406
0407 static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
0408 {
0409 bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false;
0410 bool buffer_error = false, desc_error = false, seqno_error = false;
0411 struct gve_rx_slot_page_info *page_info;
0412 struct gve_priv *priv = rx->gve;
0413 u32 idx = rx->cnt & rx->mask;
0414 bool reuse_frags, can_flip;
0415 struct gve_rx_desc *desc;
0416 u16 packet_size = 0;
0417 u16 n_frags = 0;
0418 int recycle;
0419
0420
0421
0422
0423 can_flip = qpl_mode;
0424 reuse_frags = can_flip;
0425 do {
0426 u16 frag_size;
0427
0428 n_frags++;
0429 desc = &rx->desc.desc_ring[idx];
0430 desc_error = unlikely(desc->flags_seq & GVE_RXF_ERR) || desc_error;
0431 if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) {
0432 seqno_error = true;
0433 netdev_warn(priv->dev,
0434 "RX seqno error: want=%d, got=%d, dropping packet and scheduling reset.",
0435 rx->desc.seqno, GVE_SEQNO(desc->flags_seq));
0436 }
0437 frag_size = be16_to_cpu(desc->len);
0438 packet_size += frag_size;
0439 if (frag_size > rx->packet_buffer_size) {
0440 packet_size_error = true;
0441 netdev_warn(priv->dev,
0442 "RX fragment error: packet_buffer_size=%d, frag_size=%d, dropping packet.",
0443 rx->packet_buffer_size, be16_to_cpu(desc->len));
0444 }
0445 page_info = &rx->data.page_info[idx];
0446 if (can_flip) {
0447 recycle = gve_rx_can_recycle_buffer(page_info);
0448 reuse_frags = reuse_frags && recycle > 0;
0449 buffer_error = buffer_error || unlikely(recycle < 0);
0450 }
0451 idx = (idx + 1) & rx->mask;
0452 rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
0453 } while (GVE_PKTCONT_BIT_IS_SET(desc->flags_seq));
0454
0455 prefetch(rx->desc.desc_ring + idx);
0456
0457 ctx->curr_frag_cnt = 0;
0458 ctx->total_expected_size = packet_size - GVE_RX_PAD;
0459 ctx->expected_frag_cnt = n_frags;
0460 ctx->skb_head = NULL;
0461 ctx->reuse_frags = reuse_frags;
0462
0463 if (ctx->expected_frag_cnt > 1) {
0464 u64_stats_update_begin(&rx->statss);
0465 rx->rx_cont_packet_cnt++;
0466 u64_stats_update_end(&rx->statss);
0467 }
0468 if (ctx->total_expected_size > priv->rx_copybreak && !ctx->reuse_frags && qpl_mode) {
0469 u64_stats_update_begin(&rx->statss);
0470 rx->rx_copied_pkt++;
0471 u64_stats_update_end(&rx->statss);
0472 }
0473
0474 if (unlikely(buffer_error || seqno_error || packet_size_error)) {
0475 gve_schedule_reset(priv);
0476 return false;
0477 }
0478
0479 if (unlikely(desc_error)) {
0480 u64_stats_update_begin(&rx->statss);
0481 rx->rx_desc_err_dropped_pkt++;
0482 u64_stats_update_end(&rx->statss);
0483 return false;
0484 }
0485 return true;
0486 }
0487
0488 static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
0489 struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
0490 u16 len, union gve_rx_data_slot *data_slot)
0491 {
0492 struct net_device *netdev = priv->dev;
0493 struct gve_rx_ctx *ctx = &rx->ctx;
0494 struct sk_buff *skb = NULL;
0495
0496 if (len <= priv->rx_copybreak && ctx->expected_frag_cnt == 1) {
0497
0498 skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD, ctx);
0499 if (skb) {
0500 u64_stats_update_begin(&rx->statss);
0501 rx->rx_copied_pkt++;
0502 rx->rx_frag_copy_cnt++;
0503 rx->rx_copybreak_pkt++;
0504 u64_stats_update_end(&rx->statss);
0505 }
0506 } else {
0507 if (rx->data.raw_addressing) {
0508 int recycle = gve_rx_can_recycle_buffer(page_info);
0509
0510 if (unlikely(recycle < 0)) {
0511 gve_schedule_reset(priv);
0512 return NULL;
0513 }
0514 page_info->can_flip = recycle;
0515 if (page_info->can_flip) {
0516 u64_stats_update_begin(&rx->statss);
0517 rx->rx_frag_flip_cnt++;
0518 u64_stats_update_end(&rx->statss);
0519 }
0520 skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
0521 page_info, len, napi,
0522 data_slot,
0523 rx->packet_buffer_size, ctx);
0524 } else {
0525 if (ctx->reuse_frags) {
0526 u64_stats_update_begin(&rx->statss);
0527 rx->rx_frag_flip_cnt++;
0528 u64_stats_update_end(&rx->statss);
0529 }
0530 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
0531 page_info, len, napi, data_slot);
0532 }
0533 }
0534 return skb;
0535 }
0536
0537 static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
0538 u64 *packet_size_bytes, u32 *work_done)
0539 {
0540 struct gve_rx_slot_page_info *page_info;
0541 struct gve_rx_ctx *ctx = &rx->ctx;
0542 union gve_rx_data_slot *data_slot;
0543 struct gve_priv *priv = rx->gve;
0544 struct gve_rx_desc *first_desc;
0545 struct sk_buff *skb = NULL;
0546 struct gve_rx_desc *desc;
0547 struct napi_struct *napi;
0548 dma_addr_t page_bus;
0549 u32 work_cnt = 0;
0550 void *va;
0551 u32 idx;
0552 u16 len;
0553
0554 idx = rx->cnt & rx->mask;
0555 first_desc = &rx->desc.desc_ring[idx];
0556 desc = first_desc;
0557 napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
0558
0559 if (unlikely(!gve_rx_ctx_init(ctx, rx)))
0560 goto skb_alloc_fail;
0561
0562 while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
0563
0564 page_info = &rx->data.page_info[(idx + 2) & rx->mask];
0565 va = page_info->page_address + page_info->page_offset;
0566
0567 prefetch(page_info->page);
0568 prefetch(va);
0569 prefetch(va + 64);
0570
0571 len = gve_rx_get_fragment_size(ctx, desc);
0572
0573 page_info = &rx->data.page_info[idx];
0574 data_slot = &rx->data.data_ring[idx];
0575 page_bus = rx->data.raw_addressing ?
0576 be64_to_cpu(data_slot->addr) - page_info->page_offset :
0577 rx->data.qpl->page_buses[idx];
0578 dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
0579
0580 skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
0581 if (!skb) {
0582 u64_stats_update_begin(&rx->statss);
0583 rx->rx_skb_alloc_fail++;
0584 u64_stats_update_end(&rx->statss);
0585 goto skb_alloc_fail;
0586 }
0587
0588 ctx->curr_frag_cnt++;
0589 rx->cnt++;
0590 idx = rx->cnt & rx->mask;
0591 work_cnt++;
0592 desc = &rx->desc.desc_ring[idx];
0593 }
0594
0595 if (likely(feat & NETIF_F_RXCSUM)) {
0596
0597 if (first_desc->csum)
0598 skb->ip_summed = CHECKSUM_COMPLETE;
0599 else
0600 skb->ip_summed = CHECKSUM_NONE;
0601 skb->csum = csum_unfold(first_desc->csum);
0602 }
0603
0604
0605 if (likely(feat & NETIF_F_RXHASH) &&
0606 gve_needs_rss(first_desc->flags_seq))
0607 skb_set_hash(skb, be32_to_cpu(first_desc->rss_hash),
0608 gve_rss_type(first_desc->flags_seq));
0609
0610 *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
0611 *work_done = work_cnt;
0612 skb_record_rx_queue(skb, rx->q_num);
0613 if (skb_is_nonlinear(skb))
0614 napi_gro_frags(napi);
0615 else
0616 napi_gro_receive(napi, skb);
0617
0618 gve_rx_ctx_clear(ctx);
0619 return true;
0620
0621 skb_alloc_fail:
0622 if (napi->skb)
0623 napi_free_frags(napi);
0624 *packet_size_bytes = 0;
0625 *work_done = ctx->expected_frag_cnt;
0626 while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
0627 rx->cnt++;
0628 ctx->curr_frag_cnt++;
0629 }
0630 gve_rx_ctx_clear(ctx);
0631 return false;
0632 }
0633
0634 bool gve_rx_work_pending(struct gve_rx_ring *rx)
0635 {
0636 struct gve_rx_desc *desc;
0637 __be16 flags_seq;
0638 u32 next_idx;
0639
0640 next_idx = rx->cnt & rx->mask;
0641 desc = rx->desc.desc_ring + next_idx;
0642
0643 flags_seq = desc->flags_seq;
0644
0645 return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
0646 }
0647
0648 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
0649 {
0650 int refill_target = rx->mask + 1;
0651 u32 fill_cnt = rx->fill_cnt;
0652
0653 while (fill_cnt - rx->cnt < refill_target) {
0654 struct gve_rx_slot_page_info *page_info;
0655 u32 idx = fill_cnt & rx->mask;
0656
0657 page_info = &rx->data.page_info[idx];
0658 if (page_info->can_flip) {
0659
0660
0661
0662 union gve_rx_data_slot *data_slot =
0663 &rx->data.data_ring[idx];
0664
0665 gve_rx_flip_buff(page_info, &data_slot->addr);
0666 page_info->can_flip = 0;
0667 } else {
0668
0669
0670
0671
0672
0673
0674
0675 int recycle = gve_rx_can_recycle_buffer(page_info);
0676
0677 if (recycle < 0) {
0678 if (!rx->data.raw_addressing)
0679 gve_schedule_reset(priv);
0680 return false;
0681 }
0682 if (!recycle) {
0683
0684 union gve_rx_data_slot *data_slot =
0685 &rx->data.data_ring[idx];
0686 struct device *dev = &priv->pdev->dev;
0687 gve_rx_free_buffer(dev, page_info, data_slot);
0688 page_info->page = NULL;
0689 if (gve_rx_alloc_buffer(priv, dev, page_info,
0690 data_slot)) {
0691 u64_stats_update_begin(&rx->statss);
0692 rx->rx_buf_alloc_fail++;
0693 u64_stats_update_end(&rx->statss);
0694 break;
0695 }
0696 }
0697 }
0698 fill_cnt++;
0699 }
0700 rx->fill_cnt = fill_cnt;
0701 return true;
0702 }
0703
0704 static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
0705 netdev_features_t feat)
0706 {
0707 u32 work_done = 0, total_packet_cnt = 0, ok_packet_cnt = 0;
0708 struct gve_priv *priv = rx->gve;
0709 u32 idx = rx->cnt & rx->mask;
0710 struct gve_rx_desc *desc;
0711 u64 bytes = 0;
0712
0713 desc = &rx->desc.desc_ring[idx];
0714 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
0715 work_done < budget) {
0716 u64 packet_size_bytes = 0;
0717 u32 work_cnt = 0;
0718 bool dropped;
0719
0720 netif_info(priv, rx_status, priv->dev,
0721 "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
0722 rx->q_num, idx, desc, desc->flags_seq);
0723 netif_info(priv, rx_status, priv->dev,
0724 "[%d] seqno=%d rx->desc.seqno=%d\n",
0725 rx->q_num, GVE_SEQNO(desc->flags_seq),
0726 rx->desc.seqno);
0727
0728 dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt);
0729 if (!dropped) {
0730 bytes += packet_size_bytes;
0731 ok_packet_cnt++;
0732 }
0733 total_packet_cnt++;
0734 idx = rx->cnt & rx->mask;
0735 desc = &rx->desc.desc_ring[idx];
0736 work_done += work_cnt;
0737 }
0738
0739 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
0740 return 0;
0741
0742 if (work_done) {
0743 u64_stats_update_begin(&rx->statss);
0744 rx->rpackets += ok_packet_cnt;
0745 rx->rbytes += bytes;
0746 u64_stats_update_end(&rx->statss);
0747 }
0748
0749
0750 if (!rx->data.raw_addressing) {
0751
0752 rx->fill_cnt += work_done;
0753 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
0754
0755
0756
0757 if (!gve_rx_refill_buffers(priv, rx))
0758 return 0;
0759
0760
0761
0762
0763 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
0764 gve_rx_write_doorbell(priv, rx);
0765 return budget;
0766 }
0767 }
0768
0769 gve_rx_write_doorbell(priv, rx);
0770 return total_packet_cnt;
0771 }
0772
0773 int gve_rx_poll(struct gve_notify_block *block, int budget)
0774 {
0775 struct gve_rx_ring *rx = block->rx;
0776 netdev_features_t feat;
0777 int work_done = 0;
0778
0779 feat = block->napi.dev->features;
0780
0781
0782 if (budget == 0)
0783 budget = INT_MAX;
0784
0785 if (budget > 0)
0786 work_done = gve_clean_rx_done(rx, budget, feat);
0787
0788 return work_done;
0789 }