0001
0002
0003
0004
0005
0006
0007 #ifndef _GVE_H_
0008 #define _GVE_H_
0009
0010 #include <linux/dma-mapping.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/pci.h>
0013 #include <linux/u64_stats_sync.h>
0014
0015 #include "gve_desc.h"
0016 #include "gve_desc_dqo.h"
0017
0018 #ifndef PCI_VENDOR_ID_GOOGLE
0019 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
0020 #endif
0021
0022 #define PCI_DEV_ID_GVNIC 0x0042
0023
0024 #define GVE_REGISTER_BAR 0
0025 #define GVE_DOORBELL_BAR 2
0026
0027
0028 #define GVE_TX_MAX_IOVEC 4
0029
0030 #define GVE_MIN_MSIX 3
0031
0032
0033 #define GVE_TX_STATS_REPORT_NUM 6
0034 #define GVE_RX_STATS_REPORT_NUM 2
0035
0036
0037 #define GVE_STATS_REPORT_TIMER_PERIOD 20000
0038
0039
0040 #define NIC_TX_STATS_REPORT_NUM 0
0041 #define NIC_RX_STATS_REPORT_NUM 4
0042
0043 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
0044
0045
0046 #define GVE_NUM_PTYPES 1024
0047
0048 #define GVE_RX_BUFFER_SIZE_DQO 2048
0049
0050
0051 struct gve_rx_desc_queue {
0052 struct gve_rx_desc *desc_ring;
0053 dma_addr_t bus;
0054 u8 seqno;
0055 };
0056
0057
0058 struct gve_rx_slot_page_info {
0059 struct page *page;
0060 void *page_address;
0061 u32 page_offset;
0062 int pagecnt_bias;
0063 u8 can_flip;
0064 };
0065
0066
0067
0068
0069 struct gve_queue_page_list {
0070 u32 id;
0071 u32 num_entries;
0072 struct page **pages;
0073 dma_addr_t *page_buses;
0074 };
0075
0076
0077 struct gve_rx_data_queue {
0078 union gve_rx_data_slot *data_ring;
0079 dma_addr_t data_bus;
0080 struct gve_rx_slot_page_info *page_info;
0081 struct gve_queue_page_list *qpl;
0082 u8 raw_addressing;
0083 };
0084
0085 struct gve_priv;
0086
0087
0088
0089
0090 struct gve_rx_buf_queue_dqo {
0091 struct gve_rx_desc_dqo *desc_ring;
0092 dma_addr_t bus;
0093 u32 head;
0094 u32 tail;
0095 u32 mask;
0096 };
0097
0098
0099 struct gve_rx_compl_queue_dqo {
0100 struct gve_rx_compl_desc_dqo *desc_ring;
0101 dma_addr_t bus;
0102
0103
0104
0105
0106
0107 int num_free_slots;
0108
0109
0110
0111
0112
0113 u8 cur_gen_bit;
0114
0115
0116
0117
0118 u32 head;
0119 u32 mask;
0120 };
0121
0122
0123 struct gve_rx_buf_state_dqo {
0124
0125 struct gve_rx_slot_page_info page_info;
0126
0127
0128 dma_addr_t addr;
0129
0130
0131
0132
0133 u32 last_single_ref_offset;
0134
0135
0136 s16 next;
0137 };
0138
0139
0140 struct gve_index_list {
0141 s16 head;
0142 s16 tail;
0143 };
0144
0145
0146
0147
0148 struct gve_rx_ctx {
0149
0150 struct sk_buff *skb_head;
0151 struct sk_buff *skb_tail;
0152 u16 total_expected_size;
0153 u8 expected_frag_cnt;
0154 u8 curr_frag_cnt;
0155 u8 reuse_frags;
0156 };
0157
0158
0159 struct gve_rx_ring {
0160 struct gve_priv *gve;
0161 union {
0162
0163 struct {
0164 struct gve_rx_desc_queue desc;
0165 struct gve_rx_data_queue data;
0166
0167
0168 u32 db_threshold;
0169 u16 packet_buffer_size;
0170 };
0171
0172
0173 struct {
0174 struct gve_rx_buf_queue_dqo bufq;
0175 struct gve_rx_compl_queue_dqo complq;
0176
0177 struct gve_rx_buf_state_dqo *buf_states;
0178 u16 num_buf_states;
0179
0180
0181
0182
0183 s16 free_buf_states;
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 struct gve_index_list recycled_buf_states;
0196
0197
0198
0199
0200
0201
0202
0203 struct gve_index_list used_buf_states;
0204 } dqo;
0205 };
0206
0207 u64 rbytes;
0208 u64 rpackets;
0209 u32 cnt;
0210 u32 fill_cnt;
0211 u32 mask;
0212 u64 rx_copybreak_pkt;
0213 u64 rx_copied_pkt;
0214 u64 rx_skb_alloc_fail;
0215 u64 rx_buf_alloc_fail;
0216 u64 rx_desc_err_dropped_pkt;
0217 u64 rx_cont_packet_cnt;
0218 u64 rx_frag_flip_cnt;
0219 u64 rx_frag_copy_cnt;
0220 u32 q_num;
0221 u32 ntfy_id;
0222 struct gve_queue_resources *q_resources;
0223 dma_addr_t q_resources_bus;
0224 struct u64_stats_sync statss;
0225
0226 struct gve_rx_ctx ctx;
0227 };
0228
0229
0230 union gve_tx_desc {
0231 struct gve_tx_pkt_desc pkt;
0232 struct gve_tx_mtd_desc mtd;
0233 struct gve_tx_seg_desc seg;
0234 };
0235
0236
0237 struct gve_tx_iovec {
0238 u32 iov_offset;
0239 u32 iov_len;
0240 u32 iov_padding;
0241 };
0242
0243
0244
0245
0246 struct gve_tx_buffer_state {
0247 struct sk_buff *skb;
0248 union {
0249 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC];
0250 struct {
0251 DEFINE_DMA_UNMAP_ADDR(dma);
0252 DEFINE_DMA_UNMAP_LEN(len);
0253 };
0254 };
0255 };
0256
0257
0258 struct gve_tx_fifo {
0259 void *base;
0260 u32 size;
0261 atomic_t available;
0262 u32 head;
0263 struct gve_queue_page_list *qpl;
0264 };
0265
0266
0267 union gve_tx_desc_dqo {
0268 struct gve_tx_pkt_desc_dqo pkt;
0269 struct gve_tx_tso_context_desc_dqo tso_ctx;
0270 struct gve_tx_general_context_desc_dqo general_ctx;
0271 };
0272
0273 enum gve_packet_state {
0274
0275
0276
0277 GVE_PACKET_STATE_UNALLOCATED,
0278
0279 GVE_PACKET_STATE_PENDING_DATA_COMPL,
0280
0281
0282
0283 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
0284
0285 GVE_PACKET_STATE_TIMED_OUT_COMPL,
0286 };
0287
0288 struct gve_tx_pending_packet_dqo {
0289 struct sk_buff *skb;
0290
0291
0292
0293
0294
0295
0296
0297 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
0298 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
0299 u16 num_bufs;
0300
0301
0302 s16 next;
0303
0304
0305
0306
0307
0308 s16 prev;
0309
0310
0311
0312
0313 u8 state;
0314
0315
0316
0317
0318
0319 unsigned long timeout_jiffies;
0320 };
0321
0322
0323 struct gve_tx_ring {
0324
0325 union {
0326
0327 struct {
0328 struct gve_tx_fifo tx_fifo;
0329 u32 req;
0330 u32 done;
0331 };
0332
0333
0334 struct {
0335
0336
0337
0338
0339
0340
0341
0342
0343 s16 free_pending_packets;
0344
0345
0346 u32 head;
0347 u32 tail;
0348
0349
0350
0351
0352 u32 last_re_idx;
0353 } dqo_tx;
0354 };
0355
0356
0357 union {
0358
0359 struct {
0360
0361 spinlock_t clean_lock;
0362 };
0363
0364
0365 struct {
0366 u32 head;
0367
0368
0369 u8 cur_gen_bit;
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 atomic_t free_pending_packets;
0380
0381
0382 atomic_t hw_tx_head;
0383
0384
0385
0386
0387 struct gve_index_list miss_completions;
0388
0389
0390
0391
0392
0393 struct gve_index_list timed_out_completions;
0394 } dqo_compl;
0395 } ____cacheline_aligned;
0396 u64 pkt_done;
0397 u64 bytes_done;
0398 u64 dropped_pkt;
0399 u64 dma_mapping_error;
0400
0401
0402 union {
0403
0404 struct {
0405 union gve_tx_desc *desc;
0406
0407
0408 struct gve_tx_buffer_state *info;
0409 };
0410
0411
0412 struct {
0413 union gve_tx_desc_dqo *tx_ring;
0414 struct gve_tx_compl_desc *compl_ring;
0415
0416 struct gve_tx_pending_packet_dqo *pending_packets;
0417 s16 num_pending_packets;
0418
0419 u32 complq_mask;
0420 } dqo;
0421 } ____cacheline_aligned;
0422 struct netdev_queue *netdev_txq;
0423 struct gve_queue_resources *q_resources;
0424 struct device *dev;
0425 u32 mask;
0426 u8 raw_addressing;
0427
0428
0429 u32 q_num ____cacheline_aligned;
0430 u32 stop_queue;
0431 u32 wake_queue;
0432 u32 queue_timeout;
0433 u32 ntfy_id;
0434 u32 last_kick_msec;
0435 dma_addr_t bus;
0436 dma_addr_t q_resources_bus;
0437 dma_addr_t complq_bus_dqo;
0438 struct u64_stats_sync statss;
0439 } ____cacheline_aligned;
0440
0441
0442
0443
0444 struct gve_notify_block {
0445 __be32 *irq_db_index;
0446 char name[IFNAMSIZ + 16];
0447 struct napi_struct napi;
0448 struct gve_priv *priv;
0449 struct gve_tx_ring *tx;
0450 struct gve_rx_ring *rx;
0451 };
0452
0453
0454 struct gve_queue_config {
0455 u16 max_queues;
0456 u16 num_queues;
0457 };
0458
0459
0460 struct gve_qpl_config {
0461 u32 qpl_map_size;
0462 unsigned long *qpl_id_map;
0463 };
0464
0465 struct gve_options_dqo_rda {
0466 u16 tx_comp_ring_entries;
0467 u16 rx_buff_ring_entries;
0468 };
0469
0470 struct gve_irq_db {
0471 __be32 index;
0472 } ____cacheline_aligned;
0473
0474 struct gve_ptype {
0475 u8 l3_type;
0476 u8 l4_type;
0477 };
0478
0479 struct gve_ptype_lut {
0480 struct gve_ptype ptypes[GVE_NUM_PTYPES];
0481 };
0482
0483
0484
0485
0486
0487 enum gve_queue_format {
0488 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
0489 GVE_GQI_RDA_FORMAT = 0x1,
0490 GVE_GQI_QPL_FORMAT = 0x2,
0491 GVE_DQO_RDA_FORMAT = 0x3,
0492 };
0493
0494 struct gve_priv {
0495 struct net_device *dev;
0496 struct gve_tx_ring *tx;
0497 struct gve_rx_ring *rx;
0498 struct gve_queue_page_list *qpls;
0499 struct gve_notify_block *ntfy_blocks;
0500 struct gve_irq_db *irq_db_indices;
0501 dma_addr_t irq_db_indices_bus;
0502 struct msix_entry *msix_vectors;
0503 char mgmt_msix_name[IFNAMSIZ + 16];
0504 u32 mgmt_msix_idx;
0505 __be32 *counter_array;
0506 dma_addr_t counter_array_bus;
0507
0508 u16 num_event_counters;
0509 u16 tx_desc_cnt;
0510 u16 rx_desc_cnt;
0511 u16 tx_pages_per_qpl;
0512 u16 rx_data_slot_cnt;
0513 u64 max_registered_pages;
0514 u64 num_registered_pages;
0515 u32 rx_copybreak;
0516 u16 default_num_queues;
0517
0518 struct gve_queue_config tx_cfg;
0519 struct gve_queue_config rx_cfg;
0520 struct gve_qpl_config qpl_cfg;
0521 u32 num_ntfy_blks;
0522
0523 struct gve_registers __iomem *reg_bar0;
0524 __be32 __iomem *db_bar2;
0525 u32 msg_enable;
0526 struct pci_dev *pdev;
0527
0528
0529 u32 tx_timeo_cnt;
0530
0531
0532 union gve_adminq_command *adminq;
0533 dma_addr_t adminq_bus_addr;
0534 u32 adminq_mask;
0535 u32 adminq_prod_cnt;
0536 u32 adminq_cmd_fail;
0537 u32 adminq_timeouts;
0538
0539 u32 adminq_describe_device_cnt;
0540 u32 adminq_cfg_device_resources_cnt;
0541 u32 adminq_register_page_list_cnt;
0542 u32 adminq_unregister_page_list_cnt;
0543 u32 adminq_create_tx_queue_cnt;
0544 u32 adminq_create_rx_queue_cnt;
0545 u32 adminq_destroy_tx_queue_cnt;
0546 u32 adminq_destroy_rx_queue_cnt;
0547 u32 adminq_dcfg_device_resources_cnt;
0548 u32 adminq_set_driver_parameter_cnt;
0549 u32 adminq_report_stats_cnt;
0550 u32 adminq_report_link_speed_cnt;
0551 u32 adminq_get_ptype_map_cnt;
0552
0553
0554 u32 interface_up_cnt;
0555 u32 interface_down_cnt;
0556 u32 reset_cnt;
0557 u32 page_alloc_fail;
0558 u32 dma_mapping_error;
0559 u32 stats_report_trigger_cnt;
0560 u32 suspend_cnt;
0561 u32 resume_cnt;
0562 struct workqueue_struct *gve_wq;
0563 struct work_struct service_task;
0564 struct work_struct stats_report_task;
0565 unsigned long service_task_flags;
0566 unsigned long state_flags;
0567
0568 struct gve_stats_report *stats_report;
0569 u64 stats_report_len;
0570 dma_addr_t stats_report_bus;
0571 unsigned long ethtool_flags;
0572
0573 unsigned long stats_report_timer_period;
0574 struct timer_list stats_report_timer;
0575
0576
0577 u64 link_speed;
0578 bool up_before_suspend;
0579
0580 struct gve_options_dqo_rda options_dqo_rda;
0581 struct gve_ptype_lut *ptype_lut_dqo;
0582
0583
0584 int data_buffer_size_dqo;
0585
0586 enum gve_queue_format queue_format;
0587
0588
0589 u32 tx_coalesce_usecs;
0590 u32 rx_coalesce_usecs;
0591 };
0592
0593 enum gve_service_task_flags_bit {
0594 GVE_PRIV_FLAGS_DO_RESET = 1,
0595 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
0596 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
0597 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
0598 };
0599
0600 enum gve_state_flags_bit {
0601 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
0602 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
0603 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
0604 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
0605 };
0606
0607 enum gve_ethtool_flags_bit {
0608 GVE_PRIV_FLAGS_REPORT_STATS = 0,
0609 };
0610
0611 static inline bool gve_get_do_reset(struct gve_priv *priv)
0612 {
0613 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
0614 }
0615
0616 static inline void gve_set_do_reset(struct gve_priv *priv)
0617 {
0618 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
0619 }
0620
0621 static inline void gve_clear_do_reset(struct gve_priv *priv)
0622 {
0623 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
0624 }
0625
0626 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
0627 {
0628 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
0629 &priv->service_task_flags);
0630 }
0631
0632 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
0633 {
0634 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
0635 }
0636
0637 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
0638 {
0639 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
0640 }
0641
0642 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
0643 {
0644 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
0645 &priv->service_task_flags);
0646 }
0647
0648 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
0649 {
0650 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
0651 }
0652
0653 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
0654 {
0655 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
0656 }
0657
0658 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
0659 {
0660 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
0661 &priv->service_task_flags);
0662 }
0663
0664 static inline void gve_set_do_report_stats(struct gve_priv *priv)
0665 {
0666 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
0667 }
0668
0669 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
0670 {
0671 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
0672 }
0673
0674 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
0675 {
0676 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
0677 }
0678
0679 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
0680 {
0681 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
0682 }
0683
0684 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
0685 {
0686 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
0687 }
0688
0689 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
0690 {
0691 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
0692 }
0693
0694 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
0695 {
0696 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
0697 }
0698
0699 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
0700 {
0701 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
0702 }
0703
0704 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
0705 {
0706 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
0707 }
0708
0709 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
0710 {
0711 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
0712 }
0713
0714 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
0715 {
0716 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
0717 }
0718
0719 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
0720 {
0721 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
0722 }
0723
0724 static inline void gve_set_napi_enabled(struct gve_priv *priv)
0725 {
0726 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
0727 }
0728
0729 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
0730 {
0731 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
0732 }
0733
0734 static inline bool gve_get_report_stats(struct gve_priv *priv)
0735 {
0736 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
0737 }
0738
0739 static inline void gve_clear_report_stats(struct gve_priv *priv)
0740 {
0741 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
0742 }
0743
0744
0745
0746 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
0747 struct gve_notify_block *block)
0748 {
0749 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
0750 }
0751
0752
0753
0754 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
0755 {
0756 return queue_idx;
0757 }
0758
0759
0760
0761 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
0762 {
0763 return (priv->num_ntfy_blks / 2) + queue_idx;
0764 }
0765
0766
0767
0768 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
0769 {
0770 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
0771 return 0;
0772
0773 return priv->tx_cfg.num_queues;
0774 }
0775
0776
0777
0778 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
0779 {
0780 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
0781 return 0;
0782
0783 return priv->rx_cfg.num_queues;
0784 }
0785
0786
0787
0788 static inline
0789 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
0790 {
0791 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
0792 priv->qpl_cfg.qpl_map_size);
0793
0794
0795 if (id >= gve_num_tx_qpls(priv))
0796 return NULL;
0797
0798 set_bit(id, priv->qpl_cfg.qpl_id_map);
0799 return &priv->qpls[id];
0800 }
0801
0802
0803
0804 static inline
0805 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
0806 {
0807 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
0808 priv->qpl_cfg.qpl_map_size,
0809 gve_num_tx_qpls(priv));
0810
0811
0812 if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
0813 return NULL;
0814
0815 set_bit(id, priv->qpl_cfg.qpl_id_map);
0816 return &priv->qpls[id];
0817 }
0818
0819
0820
0821 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
0822 {
0823 clear_bit(id, priv->qpl_cfg.qpl_id_map);
0824 }
0825
0826
0827
0828 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
0829 int id)
0830 {
0831 if (id < gve_num_tx_qpls(priv))
0832 return DMA_TO_DEVICE;
0833 else
0834 return DMA_FROM_DEVICE;
0835 }
0836
0837 static inline bool gve_is_gqi(struct gve_priv *priv)
0838 {
0839 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
0840 priv->queue_format == GVE_GQI_QPL_FORMAT;
0841 }
0842
0843
0844 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
0845 struct page **page, dma_addr_t *dma,
0846 enum dma_data_direction, gfp_t gfp_flags);
0847 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
0848 enum dma_data_direction);
0849
0850 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
0851 bool gve_tx_poll(struct gve_notify_block *block, int budget);
0852 int gve_tx_alloc_rings(struct gve_priv *priv);
0853 void gve_tx_free_rings_gqi(struct gve_priv *priv);
0854 u32 gve_tx_load_event_counter(struct gve_priv *priv,
0855 struct gve_tx_ring *tx);
0856 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
0857
0858 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
0859 int gve_rx_poll(struct gve_notify_block *block, int budget);
0860 bool gve_rx_work_pending(struct gve_rx_ring *rx);
0861 int gve_rx_alloc_rings(struct gve_priv *priv);
0862 void gve_rx_free_rings_gqi(struct gve_priv *priv);
0863
0864 void gve_schedule_reset(struct gve_priv *priv);
0865 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
0866 int gve_adjust_queues(struct gve_priv *priv,
0867 struct gve_queue_config new_rx_config,
0868 struct gve_queue_config new_tx_config);
0869
0870 void gve_handle_report_stats(struct gve_priv *priv);
0871
0872 extern const struct ethtool_ops gve_ethtool_ops;
0873
0874 extern const char gve_version_str[];
0875 #endif