Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
0002 /* Google virtual Ethernet (gve) driver
0003  *
0004  * Copyright (C) 2015-2021 Google, Inc.
0005  */
0006 
0007 #include "gve.h"
0008 #include "gve_adminq.h"
0009 #include "gve_utils.h"
0010 
0011 void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
0012 {
0013     struct gve_notify_block *block =
0014             &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
0015 
0016     block->tx = NULL;
0017 }
0018 
0019 void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
0020 {
0021     unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
0022                      num_online_cpus());
0023     int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
0024     struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
0025     struct gve_tx_ring *tx = &priv->tx[queue_idx];
0026 
0027     block->tx = tx;
0028     tx->ntfy_id = ntfy_idx;
0029     netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
0030                 queue_idx);
0031 }
0032 
0033 void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
0034 {
0035     struct gve_notify_block *block =
0036             &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
0037 
0038     block->rx = NULL;
0039 }
0040 
0041 void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
0042 {
0043     u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
0044     struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
0045     struct gve_rx_ring *rx = &priv->rx[queue_idx];
0046 
0047     block->rx = rx;
0048     rx->ntfy_id = ntfy_idx;
0049 }
0050 
0051 struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
0052                 struct gve_rx_slot_page_info *page_info, u16 len,
0053                 u16 padding, struct gve_rx_ctx *ctx)
0054 {
0055     void *va = page_info->page_address + padding + page_info->page_offset;
0056     int skb_linear_offset = 0;
0057     bool set_protocol = false;
0058     struct sk_buff *skb;
0059 
0060     if (ctx) {
0061         if (!ctx->skb_head)
0062             ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
0063 
0064         if (unlikely(!ctx->skb_head))
0065             return NULL;
0066         skb = ctx->skb_head;
0067         skb_linear_offset = skb->len;
0068         set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
0069     } else {
0070         skb = napi_alloc_skb(napi, len);
0071 
0072         if (unlikely(!skb))
0073             return NULL;
0074         set_protocol = true;
0075     }
0076     __skb_put(skb, len);
0077     skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
0078 
0079     if (set_protocol)
0080         skb->protocol = eth_type_trans(skb, dev);
0081 
0082     return skb;
0083 }
0084 
0085 void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
0086 {
0087     page_info->pagecnt_bias--;
0088     if (page_info->pagecnt_bias == 0) {
0089         int pagecount = page_count(page_info->page);
0090 
0091         /* If we have run out of bias - set it back up to INT_MAX
0092          * minus the existing refs.
0093          */
0094         page_info->pagecnt_bias = INT_MAX - pagecount;
0095 
0096         /* Set pagecount back up to max. */
0097         page_ref_add(page_info->page, INT_MAX - pagecount);
0098     }
0099 }