0001
0002
0003
0004
0005
0006
0007 #include "gve.h"
0008 #include "gve_dqo.h"
0009 #include "gve_adminq.h"
0010 #include "gve_utils.h"
0011 #include <linux/ip.h>
0012 #include <linux/ipv6.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/slab.h>
0015 #include <net/ip6_checksum.h>
0016 #include <net/ipv6.h>
0017 #include <net/tcp.h>
0018
0019 static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
0020 {
0021 return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
0022 }
0023
0024 static void gve_free_page_dqo(struct gve_priv *priv,
0025 struct gve_rx_buf_state_dqo *bs)
0026 {
0027 page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
0028 gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
0029 DMA_FROM_DEVICE);
0030 bs->page_info.page = NULL;
0031 }
0032
0033 static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
0034 {
0035 struct gve_rx_buf_state_dqo *buf_state;
0036 s16 buffer_id;
0037
0038 buffer_id = rx->dqo.free_buf_states;
0039 if (unlikely(buffer_id == -1))
0040 return NULL;
0041
0042 buf_state = &rx->dqo.buf_states[buffer_id];
0043
0044
0045 rx->dqo.free_buf_states = buf_state->next;
0046
0047
0048 buf_state->next = buffer_id;
0049
0050 return buf_state;
0051 }
0052
0053 static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
0054 struct gve_rx_buf_state_dqo *buf_state)
0055 {
0056 s16 buffer_id = buf_state - rx->dqo.buf_states;
0057
0058 return buf_state->next == buffer_id;
0059 }
0060
0061 static void gve_free_buf_state(struct gve_rx_ring *rx,
0062 struct gve_rx_buf_state_dqo *buf_state)
0063 {
0064 s16 buffer_id = buf_state - rx->dqo.buf_states;
0065
0066 buf_state->next = rx->dqo.free_buf_states;
0067 rx->dqo.free_buf_states = buffer_id;
0068 }
0069
0070 static struct gve_rx_buf_state_dqo *
0071 gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list)
0072 {
0073 struct gve_rx_buf_state_dqo *buf_state;
0074 s16 buffer_id;
0075
0076 buffer_id = list->head;
0077 if (unlikely(buffer_id == -1))
0078 return NULL;
0079
0080 buf_state = &rx->dqo.buf_states[buffer_id];
0081
0082
0083 list->head = buf_state->next;
0084 if (buf_state->next == -1)
0085 list->tail = -1;
0086
0087
0088 buf_state->next = buffer_id;
0089
0090 return buf_state;
0091 }
0092
0093 static void gve_enqueue_buf_state(struct gve_rx_ring *rx,
0094 struct gve_index_list *list,
0095 struct gve_rx_buf_state_dqo *buf_state)
0096 {
0097 s16 buffer_id = buf_state - rx->dqo.buf_states;
0098
0099 buf_state->next = -1;
0100
0101 if (list->head == -1) {
0102 list->head = buffer_id;
0103 list->tail = buffer_id;
0104 } else {
0105 int tail = list->tail;
0106
0107 rx->dqo.buf_states[tail].next = buffer_id;
0108 list->tail = buffer_id;
0109 }
0110 }
0111
0112 static struct gve_rx_buf_state_dqo *
0113 gve_get_recycled_buf_state(struct gve_rx_ring *rx)
0114 {
0115 struct gve_rx_buf_state_dqo *buf_state;
0116 int i;
0117
0118
0119 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
0120 if (likely(buf_state))
0121 return buf_state;
0122
0123 if (unlikely(rx->dqo.used_buf_states.head == -1))
0124 return NULL;
0125
0126
0127
0128
0129
0130
0131 for (i = 0; i < 5; i++) {
0132 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
0133 if (gve_buf_ref_cnt(buf_state) == 0)
0134 return buf_state;
0135
0136 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
0137 }
0138
0139
0140
0141
0142 if (unlikely(rx->dqo.free_buf_states == -1)) {
0143 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
0144 if (gve_buf_ref_cnt(buf_state) == 0)
0145 return buf_state;
0146
0147 gve_free_page_dqo(rx->gve, buf_state);
0148 gve_free_buf_state(rx, buf_state);
0149 }
0150
0151 return NULL;
0152 }
0153
0154 static int gve_alloc_page_dqo(struct gve_priv *priv,
0155 struct gve_rx_buf_state_dqo *buf_state)
0156 {
0157 int err;
0158
0159 err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
0160 &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
0161 if (err)
0162 return err;
0163
0164 buf_state->page_info.page_offset = 0;
0165 buf_state->page_info.page_address =
0166 page_address(buf_state->page_info.page);
0167 buf_state->last_single_ref_offset = 0;
0168
0169
0170 page_ref_add(buf_state->page_info.page, INT_MAX - 1);
0171 buf_state->page_info.pagecnt_bias = INT_MAX;
0172
0173 return 0;
0174 }
0175
0176 static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
0177 {
0178 struct gve_rx_ring *rx = &priv->rx[idx];
0179 struct device *hdev = &priv->pdev->dev;
0180 size_t completion_queue_slots;
0181 size_t buffer_queue_slots;
0182 size_t size;
0183 int i;
0184
0185 completion_queue_slots = rx->dqo.complq.mask + 1;
0186 buffer_queue_slots = rx->dqo.bufq.mask + 1;
0187
0188 gve_rx_remove_from_block(priv, idx);
0189
0190 if (rx->q_resources) {
0191 dma_free_coherent(hdev, sizeof(*rx->q_resources),
0192 rx->q_resources, rx->q_resources_bus);
0193 rx->q_resources = NULL;
0194 }
0195
0196 for (i = 0; i < rx->dqo.num_buf_states; i++) {
0197 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
0198
0199 if (bs->page_info.page)
0200 gve_free_page_dqo(priv, bs);
0201 }
0202
0203 if (rx->dqo.bufq.desc_ring) {
0204 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
0205 dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring,
0206 rx->dqo.bufq.bus);
0207 rx->dqo.bufq.desc_ring = NULL;
0208 }
0209
0210 if (rx->dqo.complq.desc_ring) {
0211 size = sizeof(rx->dqo.complq.desc_ring[0]) *
0212 completion_queue_slots;
0213 dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring,
0214 rx->dqo.complq.bus);
0215 rx->dqo.complq.desc_ring = NULL;
0216 }
0217
0218 kvfree(rx->dqo.buf_states);
0219 rx->dqo.buf_states = NULL;
0220
0221 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
0222 }
0223
0224 static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
0225 {
0226 struct gve_rx_ring *rx = &priv->rx[idx];
0227 struct device *hdev = &priv->pdev->dev;
0228 size_t size;
0229 int i;
0230
0231 const u32 buffer_queue_slots =
0232 priv->options_dqo_rda.rx_buff_ring_entries;
0233 const u32 completion_queue_slots = priv->rx_desc_cnt;
0234
0235 netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
0236
0237 memset(rx, 0, sizeof(*rx));
0238 rx->gve = priv;
0239 rx->q_num = idx;
0240 rx->dqo.bufq.mask = buffer_queue_slots - 1;
0241 rx->dqo.complq.num_free_slots = completion_queue_slots;
0242 rx->dqo.complq.mask = completion_queue_slots - 1;
0243 rx->ctx.skb_head = NULL;
0244 rx->ctx.skb_tail = NULL;
0245
0246 rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
0247 rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
0248 sizeof(rx->dqo.buf_states[0]),
0249 GFP_KERNEL);
0250 if (!rx->dqo.buf_states)
0251 return -ENOMEM;
0252
0253
0254 for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
0255 rx->dqo.buf_states[i].next = i + 1;
0256
0257 rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
0258 rx->dqo.recycled_buf_states.head = -1;
0259 rx->dqo.recycled_buf_states.tail = -1;
0260 rx->dqo.used_buf_states.head = -1;
0261 rx->dqo.used_buf_states.tail = -1;
0262
0263
0264 size = sizeof(rx->dqo.complq.desc_ring[0]) *
0265 completion_queue_slots;
0266 rx->dqo.complq.desc_ring =
0267 dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL);
0268 if (!rx->dqo.complq.desc_ring)
0269 goto err;
0270
0271
0272 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
0273 rx->dqo.bufq.desc_ring =
0274 dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL);
0275 if (!rx->dqo.bufq.desc_ring)
0276 goto err;
0277
0278 rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
0279 &rx->q_resources_bus, GFP_KERNEL);
0280 if (!rx->q_resources)
0281 goto err;
0282
0283 gve_rx_add_to_block(priv, idx);
0284
0285 return 0;
0286
0287 err:
0288 gve_rx_free_ring_dqo(priv, idx);
0289 return -ENOMEM;
0290 }
0291
0292 void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
0293 {
0294 const struct gve_rx_ring *rx = &priv->rx[queue_idx];
0295 u64 index = be32_to_cpu(rx->q_resources->db_index);
0296
0297 iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
0298 }
0299
0300 int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
0301 {
0302 int err = 0;
0303 int i;
0304
0305 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
0306 err = gve_rx_alloc_ring_dqo(priv, i);
0307 if (err) {
0308 netif_err(priv, drv, priv->dev,
0309 "Failed to alloc rx ring=%d: err=%d\n",
0310 i, err);
0311 goto err;
0312 }
0313 }
0314
0315 return 0;
0316
0317 err:
0318 for (i--; i >= 0; i--)
0319 gve_rx_free_ring_dqo(priv, i);
0320
0321 return err;
0322 }
0323
0324 void gve_rx_free_rings_dqo(struct gve_priv *priv)
0325 {
0326 int i;
0327
0328 for (i = 0; i < priv->rx_cfg.num_queues; i++)
0329 gve_rx_free_ring_dqo(priv, i);
0330 }
0331
0332 void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
0333 {
0334 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
0335 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
0336 struct gve_priv *priv = rx->gve;
0337 u32 num_avail_slots;
0338 u32 num_full_slots;
0339 u32 num_posted = 0;
0340
0341 num_full_slots = (bufq->tail - bufq->head) & bufq->mask;
0342 num_avail_slots = bufq->mask - num_full_slots;
0343
0344 num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots);
0345 while (num_posted < num_avail_slots) {
0346 struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail];
0347 struct gve_rx_buf_state_dqo *buf_state;
0348
0349 buf_state = gve_get_recycled_buf_state(rx);
0350 if (unlikely(!buf_state)) {
0351 buf_state = gve_alloc_buf_state(rx);
0352 if (unlikely(!buf_state))
0353 break;
0354
0355 if (unlikely(gve_alloc_page_dqo(priv, buf_state))) {
0356 u64_stats_update_begin(&rx->statss);
0357 rx->rx_buf_alloc_fail++;
0358 u64_stats_update_end(&rx->statss);
0359 gve_free_buf_state(rx, buf_state);
0360 break;
0361 }
0362 }
0363
0364 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
0365 desc->buf_addr = cpu_to_le64(buf_state->addr +
0366 buf_state->page_info.page_offset);
0367
0368 bufq->tail = (bufq->tail + 1) & bufq->mask;
0369 complq->num_free_slots--;
0370 num_posted++;
0371
0372 if ((bufq->tail & (GVE_RX_BUF_THRESH_DQO - 1)) == 0)
0373 gve_rx_write_doorbell_dqo(priv, rx->q_num);
0374 }
0375
0376 rx->fill_cnt += num_posted;
0377 }
0378
0379 static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
0380 struct gve_rx_buf_state_dqo *buf_state)
0381 {
0382 const int data_buffer_size = priv->data_buffer_size_dqo;
0383 int pagecount;
0384
0385
0386 if (data_buffer_size * 2 > PAGE_SIZE)
0387 goto mark_used;
0388
0389 pagecount = gve_buf_ref_cnt(buf_state);
0390
0391
0392
0393
0394
0395
0396 if (pagecount == 1) {
0397 buf_state->last_single_ref_offset =
0398 buf_state->page_info.page_offset;
0399 }
0400
0401
0402 buf_state->page_info.page_offset += data_buffer_size;
0403 buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
0404
0405
0406
0407
0408 if (buf_state->page_info.page_offset ==
0409 buf_state->last_single_ref_offset) {
0410 goto mark_used;
0411 }
0412
0413 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
0414 return;
0415
0416 mark_used:
0417 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
0418 }
0419
0420 static void gve_rx_skb_csum(struct sk_buff *skb,
0421 const struct gve_rx_compl_desc_dqo *desc,
0422 struct gve_ptype ptype)
0423 {
0424 skb->ip_summed = CHECKSUM_NONE;
0425
0426
0427 if (unlikely(!desc->l3_l4_processed))
0428 return;
0429
0430 if (ptype.l3_type == GVE_L3_TYPE_IPV4) {
0431 if (unlikely(desc->csum_ip_err || desc->csum_external_ip_err))
0432 return;
0433 } else if (ptype.l3_type == GVE_L3_TYPE_IPV6) {
0434
0435 if (unlikely(desc->ipv6_ex_add))
0436 return;
0437 }
0438
0439 if (unlikely(desc->csum_l4_err))
0440 return;
0441
0442 switch (ptype.l4_type) {
0443 case GVE_L4_TYPE_TCP:
0444 case GVE_L4_TYPE_UDP:
0445 case GVE_L4_TYPE_ICMP:
0446 case GVE_L4_TYPE_SCTP:
0447 skb->ip_summed = CHECKSUM_UNNECESSARY;
0448 break;
0449 default:
0450 break;
0451 }
0452 }
0453
0454 static void gve_rx_skb_hash(struct sk_buff *skb,
0455 const struct gve_rx_compl_desc_dqo *compl_desc,
0456 struct gve_ptype ptype)
0457 {
0458 enum pkt_hash_types hash_type = PKT_HASH_TYPE_L2;
0459
0460 if (ptype.l4_type != GVE_L4_TYPE_UNKNOWN)
0461 hash_type = PKT_HASH_TYPE_L4;
0462 else if (ptype.l3_type != GVE_L3_TYPE_UNKNOWN)
0463 hash_type = PKT_HASH_TYPE_L3;
0464
0465 skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
0466 }
0467
0468 static void gve_rx_free_skb(struct gve_rx_ring *rx)
0469 {
0470 if (!rx->ctx.skb_head)
0471 return;
0472
0473 dev_kfree_skb_any(rx->ctx.skb_head);
0474 rx->ctx.skb_head = NULL;
0475 rx->ctx.skb_tail = NULL;
0476 }
0477
0478
0479
0480
0481 static int gve_rx_append_frags(struct napi_struct *napi,
0482 struct gve_rx_buf_state_dqo *buf_state,
0483 u16 buf_len, struct gve_rx_ring *rx,
0484 struct gve_priv *priv)
0485 {
0486 int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags;
0487
0488 if (unlikely(num_frags == MAX_SKB_FRAGS)) {
0489 struct sk_buff *skb;
0490
0491 skb = napi_alloc_skb(napi, 0);
0492 if (!skb)
0493 return -1;
0494
0495 skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
0496 rx->ctx.skb_tail = skb;
0497 num_frags = 0;
0498 }
0499 if (rx->ctx.skb_tail != rx->ctx.skb_head) {
0500 rx->ctx.skb_head->len += buf_len;
0501 rx->ctx.skb_head->data_len += buf_len;
0502 rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
0503 }
0504
0505 skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
0506 buf_state->page_info.page,
0507 buf_state->page_info.page_offset,
0508 buf_len, priv->data_buffer_size_dqo);
0509 gve_dec_pagecnt_bias(&buf_state->page_info);
0510
0511 return 0;
0512 }
0513
0514
0515
0516
0517
0518 static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
0519 const struct gve_rx_compl_desc_dqo *compl_desc,
0520 int queue_idx)
0521 {
0522 const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
0523 const bool eop = compl_desc->end_of_packet != 0;
0524 struct gve_rx_buf_state_dqo *buf_state;
0525 struct gve_priv *priv = rx->gve;
0526 u16 buf_len;
0527
0528 if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
0529 net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
0530 priv->dev->name, buffer_id);
0531 return -EINVAL;
0532 }
0533 buf_state = &rx->dqo.buf_states[buffer_id];
0534 if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) {
0535 net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n",
0536 priv->dev->name, buffer_id);
0537 return -EINVAL;
0538 }
0539
0540 if (unlikely(compl_desc->rx_error)) {
0541 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
0542 buf_state);
0543 return -EINVAL;
0544 }
0545
0546 buf_len = compl_desc->packet_len;
0547
0548
0549
0550
0551 prefetch(buf_state->page_info.page);
0552
0553
0554 dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
0555 buf_state->page_info.page_offset,
0556 buf_len, DMA_FROM_DEVICE);
0557
0558
0559 if (rx->ctx.skb_head) {
0560 if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
0561 priv)) != 0) {
0562 goto error;
0563 }
0564
0565 gve_try_recycle_buf(priv, rx, buf_state);
0566 return 0;
0567 }
0568
0569 if (eop && buf_len <= priv->rx_copybreak) {
0570 rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
0571 &buf_state->page_info, buf_len, 0, NULL);
0572 if (unlikely(!rx->ctx.skb_head))
0573 goto error;
0574 rx->ctx.skb_tail = rx->ctx.skb_head;
0575
0576 u64_stats_update_begin(&rx->statss);
0577 rx->rx_copied_pkt++;
0578 rx->rx_copybreak_pkt++;
0579 u64_stats_update_end(&rx->statss);
0580
0581 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
0582 buf_state);
0583 return 0;
0584 }
0585
0586 rx->ctx.skb_head = napi_get_frags(napi);
0587 if (unlikely(!rx->ctx.skb_head))
0588 goto error;
0589 rx->ctx.skb_tail = rx->ctx.skb_head;
0590
0591 skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
0592 buf_state->page_info.page_offset, buf_len,
0593 priv->data_buffer_size_dqo);
0594 gve_dec_pagecnt_bias(&buf_state->page_info);
0595
0596 gve_try_recycle_buf(priv, rx, buf_state);
0597 return 0;
0598
0599 error:
0600 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
0601 return -ENOMEM;
0602 }
0603
0604 static int gve_rx_complete_rsc(struct sk_buff *skb,
0605 const struct gve_rx_compl_desc_dqo *desc,
0606 struct gve_ptype ptype)
0607 {
0608 struct skb_shared_info *shinfo = skb_shinfo(skb);
0609
0610
0611 if (ptype.l4_type != GVE_L4_TYPE_TCP)
0612 return -EINVAL;
0613
0614 switch (ptype.l3_type) {
0615 case GVE_L3_TYPE_IPV4:
0616 shinfo->gso_type = SKB_GSO_TCPV4;
0617 break;
0618 case GVE_L3_TYPE_IPV6:
0619 shinfo->gso_type = SKB_GSO_TCPV6;
0620 break;
0621 default:
0622 return -EINVAL;
0623 }
0624
0625 shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len);
0626 return 0;
0627 }
0628
0629
0630 static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
0631 const struct gve_rx_compl_desc_dqo *desc,
0632 netdev_features_t feat)
0633 {
0634 struct gve_ptype ptype =
0635 rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
0636 int err;
0637
0638 skb_record_rx_queue(rx->ctx.skb_head, rx->q_num);
0639
0640 if (feat & NETIF_F_RXHASH)
0641 gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype);
0642
0643 if (feat & NETIF_F_RXCSUM)
0644 gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
0645
0646
0647
0648
0649 if (desc->rsc) {
0650 err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype);
0651 if (err < 0)
0652 return err;
0653 }
0654
0655 if (skb_headlen(rx->ctx.skb_head) == 0)
0656 napi_gro_frags(napi);
0657 else
0658 napi_gro_receive(napi, rx->ctx.skb_head);
0659
0660 return 0;
0661 }
0662
0663 int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
0664 {
0665 struct napi_struct *napi = &block->napi;
0666 netdev_features_t feat = napi->dev->features;
0667
0668 struct gve_rx_ring *rx = block->rx;
0669 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
0670
0671 u32 work_done = 0;
0672 u64 bytes = 0;
0673 int err;
0674
0675 while (work_done < budget) {
0676 struct gve_rx_compl_desc_dqo *compl_desc =
0677 &complq->desc_ring[complq->head];
0678 u32 pkt_bytes;
0679
0680
0681 if (compl_desc->generation == complq->cur_gen_bit)
0682 break;
0683
0684
0685 prefetch(&complq->desc_ring[(complq->head + 1) & complq->mask]);
0686 prefetch(&complq->desc_ring[(complq->head + 2) & complq->mask]);
0687
0688
0689 dma_rmb();
0690
0691 err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
0692 if (err < 0) {
0693 gve_rx_free_skb(rx);
0694 u64_stats_update_begin(&rx->statss);
0695 if (err == -ENOMEM)
0696 rx->rx_skb_alloc_fail++;
0697 else if (err == -EINVAL)
0698 rx->rx_desc_err_dropped_pkt++;
0699 u64_stats_update_end(&rx->statss);
0700 }
0701
0702 complq->head = (complq->head + 1) & complq->mask;
0703 complq->num_free_slots++;
0704
0705
0706 complq->cur_gen_bit ^= (complq->head == 0);
0707
0708
0709
0710
0711 {
0712 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
0713
0714 bufq->head = (bufq->head + 1) & bufq->mask;
0715 }
0716
0717
0718 rx->cnt++;
0719
0720 if (!rx->ctx.skb_head)
0721 continue;
0722
0723 if (!compl_desc->end_of_packet)
0724 continue;
0725
0726 work_done++;
0727 pkt_bytes = rx->ctx.skb_head->len;
0728
0729
0730
0731 if (skb_headlen(rx->ctx.skb_head))
0732 pkt_bytes += ETH_HLEN;
0733
0734
0735 if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
0736 gve_rx_free_skb(rx);
0737 u64_stats_update_begin(&rx->statss);
0738 rx->rx_desc_err_dropped_pkt++;
0739 u64_stats_update_end(&rx->statss);
0740 continue;
0741 }
0742
0743 bytes += pkt_bytes;
0744 rx->ctx.skb_head = NULL;
0745 rx->ctx.skb_tail = NULL;
0746 }
0747
0748 gve_rx_post_buffers_dqo(rx);
0749
0750 u64_stats_update_begin(&rx->statss);
0751 rx->rpackets += work_done;
0752 rx->rbytes += bytes;
0753 u64_stats_update_end(&rx->statss);
0754
0755 return work_done;
0756 }