Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
0002 /*
0003  * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
0004  */
0005 
0006 #ifndef ENA_COM
0007 #define ENA_COM
0008 
0009 #include <linux/compiler.h>
0010 #include <linux/delay.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/gfp.h>
0013 #include <linux/io.h>
0014 #include <linux/prefetch.h>
0015 #include <linux/sched.h>
0016 #include <linux/sizes.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/types.h>
0019 #include <linux/wait.h>
0020 #include <linux/netdevice.h>
0021 
0022 #include "ena_common_defs.h"
0023 #include "ena_admin_defs.h"
0024 #include "ena_eth_io_defs.h"
0025 #include "ena_regs_defs.h"
0026 
0027 #undef pr_fmt
0028 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0029 
0030 #define ENA_MAX_NUM_IO_QUEUES 128U
0031 /* We need to queues for each IO (on for Tx and one for Rx) */
0032 #define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
0033 
0034 #define ENA_MAX_HANDLERS 256
0035 
0036 #define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
0037 
0038 /* Unit in usec */
0039 #define ENA_REG_READ_TIMEOUT 200000
0040 
0041 #define ADMIN_SQ_SIZE(depth)    ((depth) * sizeof(struct ena_admin_aq_entry))
0042 #define ADMIN_CQ_SIZE(depth)    ((depth) * sizeof(struct ena_admin_acq_entry))
0043 #define ADMIN_AENQ_SIZE(depth)  ((depth) * sizeof(struct ena_admin_aenq_entry))
0044 
0045 /*****************************************************************************/
0046 /*****************************************************************************/
0047 /* ENA adaptive interrupt moderation settings */
0048 
0049 #define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
0050 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
0051 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
0052 
0053 #define ENA_HASH_KEY_SIZE 40
0054 
0055 #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
0056 
0057 #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
0058 
0059 struct ena_llq_configurations {
0060     enum ena_admin_llq_header_location llq_header_location;
0061     enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
0062     enum ena_admin_llq_stride_ctrl  llq_stride_ctrl;
0063     enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
0064     u16 llq_ring_entry_size_value;
0065 };
0066 
0067 enum queue_direction {
0068     ENA_COM_IO_QUEUE_DIRECTION_TX,
0069     ENA_COM_IO_QUEUE_DIRECTION_RX
0070 };
0071 
0072 struct ena_com_buf {
0073     dma_addr_t paddr; /**< Buffer physical address */
0074     u16 len; /**< Buffer length in bytes */
0075 };
0076 
0077 struct ena_com_rx_buf_info {
0078     u16 len;
0079     u16 req_id;
0080 };
0081 
0082 struct ena_com_io_desc_addr {
0083     u8 __iomem *pbuf_dev_addr; /* LLQ address */
0084     u8 *virt_addr;
0085     dma_addr_t phys_addr;
0086 };
0087 
0088 struct ena_com_tx_meta {
0089     u16 mss;
0090     u16 l3_hdr_len;
0091     u16 l3_hdr_offset;
0092     u16 l4_hdr_len; /* In words */
0093 };
0094 
0095 struct ena_com_llq_info {
0096     u16 header_location_ctrl;
0097     u16 desc_stride_ctrl;
0098     u16 desc_list_entry_size_ctrl;
0099     u16 desc_list_entry_size;
0100     u16 descs_num_before_header;
0101     u16 descs_per_entry;
0102     u16 max_entries_in_tx_burst;
0103     bool disable_meta_caching;
0104 };
0105 
0106 struct ena_com_io_cq {
0107     struct ena_com_io_desc_addr cdesc_addr;
0108 
0109     /* Interrupt unmask register */
0110     u32 __iomem *unmask_reg;
0111 
0112     /* The completion queue head doorbell register */
0113     u32 __iomem *cq_head_db_reg;
0114 
0115     /* numa configuration register (for TPH) */
0116     u32 __iomem *numa_node_cfg_reg;
0117 
0118     /* The value to write to the above register to unmask
0119      * the interrupt of this queue
0120      */
0121     u32 msix_vector;
0122 
0123     enum queue_direction direction;
0124 
0125     /* holds the number of cdesc of the current packet */
0126     u16 cur_rx_pkt_cdesc_count;
0127     /* save the first cdesc idx of the current packet */
0128     u16 cur_rx_pkt_cdesc_start_idx;
0129 
0130     u16 q_depth;
0131     /* Caller qid */
0132     u16 qid;
0133 
0134     /* Device queue index */
0135     u16 idx;
0136     u16 head;
0137     u16 last_head_update;
0138     u8 phase;
0139     u8 cdesc_entry_size_in_bytes;
0140 
0141 } ____cacheline_aligned;
0142 
0143 struct ena_com_io_bounce_buffer_control {
0144     u8 *base_buffer;
0145     u16 next_to_use;
0146     u16 buffer_size;
0147     u16 buffers_num;  /* Must be a power of 2 */
0148 };
0149 
0150 /* This struct is to keep tracking the current location of the next llq entry */
0151 struct ena_com_llq_pkt_ctrl {
0152     u8 *curr_bounce_buf;
0153     u16 idx;
0154     u16 descs_left_in_line;
0155 };
0156 
0157 struct ena_com_io_sq {
0158     struct ena_com_io_desc_addr desc_addr;
0159 
0160     u32 __iomem *db_addr;
0161     u8 __iomem *header_addr;
0162 
0163     enum queue_direction direction;
0164     enum ena_admin_placement_policy_type mem_queue_type;
0165 
0166     bool disable_meta_caching;
0167 
0168     u32 msix_vector;
0169     struct ena_com_tx_meta cached_tx_meta;
0170     struct ena_com_llq_info llq_info;
0171     struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
0172     struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
0173 
0174     u16 q_depth;
0175     u16 qid;
0176 
0177     u16 idx;
0178     u16 tail;
0179     u16 next_to_comp;
0180     u16 llq_last_copy_tail;
0181     u32 tx_max_header_size;
0182     u8 phase;
0183     u8 desc_entry_size;
0184     u8 dma_addr_bits;
0185     u16 entries_in_tx_burst_left;
0186 } ____cacheline_aligned;
0187 
0188 struct ena_com_admin_cq {
0189     struct ena_admin_acq_entry *entries;
0190     dma_addr_t dma_addr;
0191 
0192     u16 head;
0193     u8 phase;
0194 };
0195 
0196 struct ena_com_admin_sq {
0197     struct ena_admin_aq_entry *entries;
0198     dma_addr_t dma_addr;
0199 
0200     u32 __iomem *db_addr;
0201 
0202     u16 head;
0203     u16 tail;
0204     u8 phase;
0205 
0206 };
0207 
0208 struct ena_com_stats_admin {
0209     u64 aborted_cmd;
0210     u64 submitted_cmd;
0211     u64 completed_cmd;
0212     u64 out_of_space;
0213     u64 no_completion;
0214 };
0215 
0216 struct ena_com_admin_queue {
0217     void *q_dmadev;
0218     struct ena_com_dev *ena_dev;
0219     spinlock_t q_lock; /* spinlock for the admin queue */
0220 
0221     struct ena_comp_ctx *comp_ctx;
0222     u32 completion_timeout;
0223     u16 q_depth;
0224     struct ena_com_admin_cq cq;
0225     struct ena_com_admin_sq sq;
0226 
0227     /* Indicate if the admin queue should poll for completion */
0228     bool polling;
0229 
0230     /* Define if fallback to polling mode should occur */
0231     bool auto_polling;
0232 
0233     u16 curr_cmd_id;
0234 
0235     /* Indicate that the ena was initialized and can
0236      * process new admin commands
0237      */
0238     bool running_state;
0239 
0240     /* Count the number of outstanding admin commands */
0241     atomic_t outstanding_cmds;
0242 
0243     struct ena_com_stats_admin stats;
0244 };
0245 
0246 struct ena_aenq_handlers;
0247 
0248 struct ena_com_aenq {
0249     u16 head;
0250     u8 phase;
0251     struct ena_admin_aenq_entry *entries;
0252     dma_addr_t dma_addr;
0253     u16 q_depth;
0254     struct ena_aenq_handlers *aenq_handlers;
0255 };
0256 
0257 struct ena_com_mmio_read {
0258     struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
0259     dma_addr_t read_resp_dma_addr;
0260     u32 reg_read_to; /* in us */
0261     u16 seq_num;
0262     bool readless_supported;
0263     /* spin lock to ensure a single outstanding read */
0264     spinlock_t lock;
0265 };
0266 
0267 struct ena_rss {
0268     /* Indirect table */
0269     u16 *host_rss_ind_tbl;
0270     struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
0271     dma_addr_t rss_ind_tbl_dma_addr;
0272     u16 tbl_log_size;
0273 
0274     /* Hash key */
0275     enum ena_admin_hash_functions hash_func;
0276     struct ena_admin_feature_rss_flow_hash_control *hash_key;
0277     dma_addr_t hash_key_dma_addr;
0278     u32 hash_init_val;
0279 
0280     /* Flow Control */
0281     struct ena_admin_feature_rss_hash_control *hash_ctrl;
0282     dma_addr_t hash_ctrl_dma_addr;
0283 
0284 };
0285 
0286 struct ena_host_attribute {
0287     /* Debug area */
0288     u8 *debug_area_virt_addr;
0289     dma_addr_t debug_area_dma_addr;
0290     u32 debug_area_size;
0291 
0292     /* Host information */
0293     struct ena_admin_host_info *host_info;
0294     dma_addr_t host_info_dma_addr;
0295 };
0296 
0297 /* Each ena_dev is a PCI function. */
0298 struct ena_com_dev {
0299     struct ena_com_admin_queue admin_queue;
0300     struct ena_com_aenq aenq;
0301     struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
0302     struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
0303     u8 __iomem *reg_bar;
0304     void __iomem *mem_bar;
0305     void *dmadev;
0306     struct net_device *net_device;
0307 
0308     enum ena_admin_placement_policy_type tx_mem_queue_type;
0309     u32 tx_max_header_size;
0310     u16 stats_func; /* Selected function for extended statistic dump */
0311     u16 stats_queue; /* Selected queue for extended statistic dump */
0312 
0313     struct ena_com_mmio_read mmio_read;
0314 
0315     struct ena_rss rss;
0316     u32 supported_features;
0317     u32 capabilities;
0318     u32 dma_addr_bits;
0319 
0320     struct ena_host_attribute host_attr;
0321     bool adaptive_coalescing;
0322     u16 intr_delay_resolution;
0323 
0324     /* interrupt moderation intervals are in usec divided by
0325      * intr_delay_resolution, which is supplied by the device.
0326      */
0327     u32 intr_moder_tx_interval;
0328     u32 intr_moder_rx_interval;
0329 
0330     struct ena_intr_moder_entry *intr_moder_tbl;
0331 
0332     struct ena_com_llq_info llq_info;
0333 
0334     u32 ena_min_poll_delay_us;
0335 };
0336 
0337 struct ena_com_dev_get_features_ctx {
0338     struct ena_admin_queue_feature_desc max_queues;
0339     struct ena_admin_queue_ext_feature_desc max_queue_ext;
0340     struct ena_admin_device_attr_feature_desc dev_attr;
0341     struct ena_admin_feature_aenq_desc aenq;
0342     struct ena_admin_feature_offload_desc offload;
0343     struct ena_admin_ena_hw_hints hw_hints;
0344     struct ena_admin_feature_llq_desc llq;
0345 };
0346 
0347 struct ena_com_create_io_ctx {
0348     enum ena_admin_placement_policy_type mem_queue_type;
0349     enum queue_direction direction;
0350     int numa_node;
0351     u32 msix_vector;
0352     u16 queue_size;
0353     u16 qid;
0354 };
0355 
0356 typedef void (*ena_aenq_handler)(void *data,
0357     struct ena_admin_aenq_entry *aenq_e);
0358 
0359 /* Holds aenq handlers. Indexed by AENQ event group */
0360 struct ena_aenq_handlers {
0361     ena_aenq_handler handlers[ENA_MAX_HANDLERS];
0362     ena_aenq_handler unimplemented_handler;
0363 };
0364 
0365 /*****************************************************************************/
0366 /*****************************************************************************/
0367 
0368 /* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
0369  * @ena_dev: ENA communication layer struct
0370  *
0371  * Initialize the register read mechanism.
0372  *
0373  * @note: This method must be the first stage in the initialization sequence.
0374  *
0375  * @return - 0 on success, negative value on failure.
0376  */
0377 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
0378 
0379 /* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
0380  * @ena_dev: ENA communication layer struct
0381  * @readless_supported: readless mode (enable/disable)
0382  */
0383 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
0384                 bool readless_supported);
0385 
0386 /* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
0387  * value physical address.
0388  * @ena_dev: ENA communication layer struct
0389  */
0390 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
0391 
0392 /* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
0393  * @ena_dev: ENA communication layer struct
0394  */
0395 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
0396 
0397 /* ena_com_admin_init - Init the admin and the async queues
0398  * @ena_dev: ENA communication layer struct
0399  * @aenq_handlers: Those handlers to be called upon event.
0400  *
0401  * Initialize the admin submission and completion queues.
0402  * Initialize the asynchronous events notification queues.
0403  *
0404  * @return - 0 on success, negative value on failure.
0405  */
0406 int ena_com_admin_init(struct ena_com_dev *ena_dev,
0407                struct ena_aenq_handlers *aenq_handlers);
0408 
0409 /* ena_com_admin_destroy - Destroy the admin and the async events queues.
0410  * @ena_dev: ENA communication layer struct
0411  *
0412  * @note: Before calling this method, the caller must validate that the device
0413  * won't send any additional admin completions/aenq.
0414  * To achieve that, a FLR is recommended.
0415  */
0416 void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
0417 
0418 /* ena_com_dev_reset - Perform device FLR to the device.
0419  * @ena_dev: ENA communication layer struct
0420  * @reset_reason: Specify what is the trigger for the reset in case of an error.
0421  *
0422  * @return - 0 on success, negative value on failure.
0423  */
0424 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
0425               enum ena_regs_reset_reason_types reset_reason);
0426 
0427 /* ena_com_create_io_queue - Create io queue.
0428  * @ena_dev: ENA communication layer struct
0429  * @ctx - create context structure
0430  *
0431  * Create the submission and the completion queues.
0432  *
0433  * @return - 0 on success, negative value on failure.
0434  */
0435 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
0436                 struct ena_com_create_io_ctx *ctx);
0437 
0438 /* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
0439  * @ena_dev: ENA communication layer struct
0440  * @qid - the caller virtual queue id.
0441  */
0442 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
0443 
0444 /* ena_com_get_io_handlers - Return the io queue handlers
0445  * @ena_dev: ENA communication layer struct
0446  * @qid - the caller virtual queue id.
0447  * @io_sq - IO submission queue handler
0448  * @io_cq - IO completion queue handler.
0449  *
0450  * @return - 0 on success, negative value on failure.
0451  */
0452 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
0453                 struct ena_com_io_sq **io_sq,
0454                 struct ena_com_io_cq **io_cq);
0455 
0456 /* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
0457  * @ena_dev: ENA communication layer struct
0458  *
0459  * After this method, aenq event can be received via AENQ.
0460  */
0461 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
0462 
0463 /* ena_com_set_admin_running_state - Set the state of the admin queue
0464  * @ena_dev: ENA communication layer struct
0465  *
0466  * Change the state of the admin queue (enable/disable)
0467  */
0468 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
0469 
0470 /* ena_com_get_admin_running_state - Get the admin queue state
0471  * @ena_dev: ENA communication layer struct
0472  *
0473  * Retrieve the state of the admin queue (enable/disable)
0474  *
0475  * @return - current polling mode (enable/disable)
0476  */
0477 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
0478 
0479 /* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
0480  * @ena_dev: ENA communication layer struct
0481  * @polling: ENAble/Disable polling mode
0482  *
0483  * Set the admin completion mode.
0484  */
0485 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
0486 
0487 /* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
0488  * @ena_dev: ENA communication layer struct
0489  * @polling: Enable/Disable polling mode
0490  *
0491  * Set the autopolling mode.
0492  * If autopolling is on:
0493  * In case of missing interrupt when data is available switch to polling.
0494  */
0495 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
0496                      bool polling);
0497 
0498 /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
0499  * @ena_dev: ENA communication layer struct
0500  *
0501  * This method goes over the admin completion queue and wakes up all the pending
0502  * threads that wait on the commands wait event.
0503  *
0504  * @note: Should be called after MSI-X interrupt.
0505  */
0506 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
0507 
0508 /* ena_com_aenq_intr_handler - AENQ interrupt handler
0509  * @ena_dev: ENA communication layer struct
0510  *
0511  * This method goes over the async event notification queue and calls the proper
0512  * aenq handler.
0513  */
0514 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
0515 
0516 /* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
0517  * @ena_dev: ENA communication layer struct
0518  *
0519  * This method aborts all the outstanding admin commands.
0520  * The caller should then call ena_com_wait_for_abort_completion to make sure
0521  * all the commands were completed.
0522  */
0523 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
0524 
0525 /* ena_com_wait_for_abort_completion - Wait for admin commands abort.
0526  * @ena_dev: ENA communication layer struct
0527  *
0528  * This method waits until all the outstanding admin commands are completed.
0529  */
0530 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
0531 
0532 /* ena_com_validate_version - Validate the device parameters
0533  * @ena_dev: ENA communication layer struct
0534  *
0535  * This method verifies the device parameters are the same as the saved
0536  * parameters in ena_dev.
0537  * This method is useful after device reset, to validate the device mac address
0538  * and the device offloads are the same as before the reset.
0539  *
0540  * @return - 0 on success negative value otherwise.
0541  */
0542 int ena_com_validate_version(struct ena_com_dev *ena_dev);
0543 
0544 /* ena_com_get_link_params - Retrieve physical link parameters.
0545  * @ena_dev: ENA communication layer struct
0546  * @resp: Link parameters
0547  *
0548  * Retrieve the physical link parameters,
0549  * like speed, auto-negotiation and full duplex support.
0550  *
0551  * @return - 0 on Success negative value otherwise.
0552  */
0553 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
0554                 struct ena_admin_get_feat_resp *resp);
0555 
0556 /* ena_com_get_dma_width - Retrieve physical dma address width the device
0557  * supports.
0558  * @ena_dev: ENA communication layer struct
0559  *
0560  * Retrieve the maximum physical address bits the device can handle.
0561  *
0562  * @return: > 0 on Success and negative value otherwise.
0563  */
0564 int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
0565 
0566 /* ena_com_set_aenq_config - Set aenq groups configurations
0567  * @ena_dev: ENA communication layer struct
0568  * @groups flag: bit fields flags of enum ena_admin_aenq_group.
0569  *
0570  * Configure which aenq event group the driver would like to receive.
0571  *
0572  * @return: 0 on Success and negative value otherwise.
0573  */
0574 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
0575 
0576 /* ena_com_get_dev_attr_feat - Get device features
0577  * @ena_dev: ENA communication layer struct
0578  * @get_feat_ctx: returned context that contain the get features.
0579  *
0580  * @return: 0 on Success and negative value otherwise.
0581  */
0582 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
0583                   struct ena_com_dev_get_features_ctx *get_feat_ctx);
0584 
0585 /* ena_com_get_dev_basic_stats - Get device basic statistics
0586  * @ena_dev: ENA communication layer struct
0587  * @stats: stats return value
0588  *
0589  * @return: 0 on Success and negative value otherwise.
0590  */
0591 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
0592                 struct ena_admin_basic_stats *stats);
0593 
0594 /* ena_com_get_eni_stats - Get extended network interface statistics
0595  * @ena_dev: ENA communication layer struct
0596  * @stats: stats return value
0597  *
0598  * @return: 0 on Success and negative value otherwise.
0599  */
0600 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
0601               struct ena_admin_eni_stats *stats);
0602 
0603 /* ena_com_set_dev_mtu - Configure the device mtu.
0604  * @ena_dev: ENA communication layer struct
0605  * @mtu: mtu value
0606  *
0607  * @return: 0 on Success and negative value otherwise.
0608  */
0609 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
0610 
0611 /* ena_com_get_offload_settings - Retrieve the device offloads capabilities
0612  * @ena_dev: ENA communication layer struct
0613  * @offlad: offload return value
0614  *
0615  * @return: 0 on Success and negative value otherwise.
0616  */
0617 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
0618                  struct ena_admin_feature_offload_desc *offload);
0619 
0620 /* ena_com_rss_init - Init RSS
0621  * @ena_dev: ENA communication layer struct
0622  * @log_size: indirection log size
0623  *
0624  * Allocate RSS/RFS resources.
0625  * The caller then can configure rss using ena_com_set_hash_function,
0626  * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
0627  *
0628  * @return: 0 on Success and negative value otherwise.
0629  */
0630 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
0631 
0632 /* ena_com_rss_destroy - Destroy rss
0633  * @ena_dev: ENA communication layer struct
0634  *
0635  * Free all the RSS/RFS resources.
0636  */
0637 void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
0638 
0639 /* ena_com_get_current_hash_function - Get RSS hash function
0640  * @ena_dev: ENA communication layer struct
0641  *
0642  * Return the current hash function.
0643  * @return: 0 or one of the ena_admin_hash_functions values.
0644  */
0645 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
0646 
0647 /* ena_com_fill_hash_function - Fill RSS hash function
0648  * @ena_dev: ENA communication layer struct
0649  * @func: The hash function (Toeplitz or crc)
0650  * @key: Hash key (for toeplitz hash)
0651  * @key_len: key length (max length 10 DW)
0652  * @init_val: initial value for the hash function
0653  *
0654  * Fill the ena_dev resources with the desire hash function, hash key, key_len
0655  * and key initial value (if needed by the hash function).
0656  * To flush the key into the device the caller should call
0657  * ena_com_set_hash_function.
0658  *
0659  * @return: 0 on Success and negative value otherwise.
0660  */
0661 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
0662                    enum ena_admin_hash_functions func,
0663                    const u8 *key, u16 key_len, u32 init_val);
0664 
0665 /* ena_com_set_hash_function - Flush the hash function and it dependencies to
0666  * the device.
0667  * @ena_dev: ENA communication layer struct
0668  *
0669  * Flush the hash function and it dependencies (key, key length and
0670  * initial value) if needed.
0671  *
0672  * @note: Prior to this method the caller should call ena_com_fill_hash_function
0673  *
0674  * @return: 0 on Success and negative value otherwise.
0675  */
0676 int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
0677 
0678 /* ena_com_get_hash_function - Retrieve the hash function from the device.
0679  * @ena_dev: ENA communication layer struct
0680  * @func: hash function
0681  *
0682  * Retrieve the hash function from the device.
0683  *
0684  * @note: If the caller called ena_com_fill_hash_function but didn't flush
0685  * it to the device, the new configuration will be lost.
0686  *
0687  * @return: 0 on Success and negative value otherwise.
0688  */
0689 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
0690                   enum ena_admin_hash_functions *func);
0691 
0692 /* ena_com_get_hash_key - Retrieve the hash key
0693  * @ena_dev: ENA communication layer struct
0694  * @key: hash key
0695  *
0696  * Retrieve the hash key.
0697  *
0698  * @note: If the caller called ena_com_fill_hash_key but didn't flush
0699  * it to the device, the new configuration will be lost.
0700  *
0701  * @return: 0 on Success and negative value otherwise.
0702  */
0703 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
0704 /* ena_com_fill_hash_ctrl - Fill RSS hash control
0705  * @ena_dev: ENA communication layer struct.
0706  * @proto: The protocol to configure.
0707  * @hash_fields: bit mask of ena_admin_flow_hash_fields
0708  *
0709  * Fill the ena_dev resources with the desire hash control (the ethernet
0710  * fields that take part of the hash) for a specific protocol.
0711  * To flush the hash control to the device, the caller should call
0712  * ena_com_set_hash_ctrl.
0713  *
0714  * @return: 0 on Success and negative value otherwise.
0715  */
0716 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
0717                enum ena_admin_flow_hash_proto proto,
0718                u16 hash_fields);
0719 
0720 /* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
0721  * @ena_dev: ENA communication layer struct
0722  *
0723  * Flush the hash control (the ethernet fields that take part of the hash)
0724  *
0725  * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
0726  *
0727  * @return: 0 on Success and negative value otherwise.
0728  */
0729 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
0730 
0731 /* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
0732  * @ena_dev: ENA communication layer struct
0733  * @proto: The protocol to retrieve.
0734  * @fields: bit mask of ena_admin_flow_hash_fields.
0735  *
0736  * Retrieve the hash control from the device.
0737  *
0738  * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
0739  * it to the device, the new configuration will be lost.
0740  *
0741  * @return: 0 on Success and negative value otherwise.
0742  */
0743 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
0744               enum ena_admin_flow_hash_proto proto,
0745               u16 *fields);
0746 
0747 /* ena_com_set_default_hash_ctrl - Set the hash control to a default
0748  * configuration.
0749  * @ena_dev: ENA communication layer struct
0750  *
0751  * Fill the ena_dev resources with the default hash control configuration.
0752  * To flush the hash control to the device, the caller should call
0753  * ena_com_set_hash_ctrl.
0754  *
0755  * @return: 0 on Success and negative value otherwise.
0756  */
0757 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
0758 
0759 /* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
0760  * indirection table
0761  * @ena_dev: ENA communication layer struct.
0762  * @entry_idx - indirection table entry.
0763  * @entry_value - redirection value
0764  *
0765  * Fill a single entry of the RSS indirection table in the ena_dev resources.
0766  * To flush the indirection table to the device, the called should call
0767  * ena_com_indirect_table_set.
0768  *
0769  * @return: 0 on Success and negative value otherwise.
0770  */
0771 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
0772                       u16 entry_idx, u16 entry_value);
0773 
0774 /* ena_com_indirect_table_set - Flush the indirection table to the device.
0775  * @ena_dev: ENA communication layer struct
0776  *
0777  * Flush the indirection hash control to the device.
0778  * Prior to this method the caller should call ena_com_indirect_table_fill_entry
0779  *
0780  * @return: 0 on Success and negative value otherwise.
0781  */
0782 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
0783 
0784 /* ena_com_indirect_table_get - Retrieve the indirection table from the device.
0785  * @ena_dev: ENA communication layer struct
0786  * @ind_tbl: indirection table
0787  *
0788  * Retrieve the RSS indirection table from the device.
0789  *
0790  * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
0791  * it to the device, the new configuration will be lost.
0792  *
0793  * @return: 0 on Success and negative value otherwise.
0794  */
0795 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
0796 
0797 /* ena_com_allocate_host_info - Allocate host info resources.
0798  * @ena_dev: ENA communication layer struct
0799  *
0800  * @return: 0 on Success and negative value otherwise.
0801  */
0802 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
0803 
0804 /* ena_com_allocate_debug_area - Allocate debug area.
0805  * @ena_dev: ENA communication layer struct
0806  * @debug_area_size - debug area size.
0807  *
0808  * @return: 0 on Success and negative value otherwise.
0809  */
0810 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
0811                 u32 debug_area_size);
0812 
0813 /* ena_com_delete_debug_area - Free the debug area resources.
0814  * @ena_dev: ENA communication layer struct
0815  *
0816  * Free the allocated debug area.
0817  */
0818 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
0819 
0820 /* ena_com_delete_host_info - Free the host info resources.
0821  * @ena_dev: ENA communication layer struct
0822  *
0823  * Free the allocated host info.
0824  */
0825 void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
0826 
0827 /* ena_com_set_host_attributes - Update the device with the host
0828  * attributes (debug area and host info) base address.
0829  * @ena_dev: ENA communication layer struct
0830  *
0831  * @return: 0 on Success and negative value otherwise.
0832  */
0833 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
0834 
0835 /* ena_com_create_io_cq - Create io completion queue.
0836  * @ena_dev: ENA communication layer struct
0837  * @io_cq - io completion queue handler
0838 
0839  * Create IO completion queue.
0840  *
0841  * @return - 0 on success, negative value on failure.
0842  */
0843 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
0844              struct ena_com_io_cq *io_cq);
0845 
0846 /* ena_com_destroy_io_cq - Destroy io completion queue.
0847  * @ena_dev: ENA communication layer struct
0848  * @io_cq - io completion queue handler
0849 
0850  * Destroy IO completion queue.
0851  *
0852  * @return - 0 on success, negative value on failure.
0853  */
0854 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
0855               struct ena_com_io_cq *io_cq);
0856 
0857 /* ena_com_execute_admin_command - Execute admin command
0858  * @admin_queue: admin queue.
0859  * @cmd: the admin command to execute.
0860  * @cmd_size: the command size.
0861  * @cmd_completion: command completion return value.
0862  * @cmd_comp_size: command completion size.
0863 
0864  * Submit an admin command and then wait until the device returns a
0865  * completion.
0866  * The completion will be copied into cmd_comp.
0867  *
0868  * @return - 0 on success, negative value on failure.
0869  */
0870 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
0871                   struct ena_admin_aq_entry *cmd,
0872                   size_t cmd_size,
0873                   struct ena_admin_acq_entry *cmd_comp,
0874                   size_t cmd_comp_size);
0875 
0876 /* ena_com_init_interrupt_moderation - Init interrupt moderation
0877  * @ena_dev: ENA communication layer struct
0878  *
0879  * @return - 0 on success, negative value on failure.
0880  */
0881 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
0882 
0883 /* ena_com_interrupt_moderation_supported - Return if interrupt moderation
0884  * capability is supported by the device.
0885  *
0886  * @return - supported or not.
0887  */
0888 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
0889 
0890 /* ena_com_update_nonadaptive_moderation_interval_tx - Update the
0891  * non-adaptive interval in Tx direction.
0892  * @ena_dev: ENA communication layer struct
0893  * @tx_coalesce_usecs: Interval in usec.
0894  *
0895  * @return - 0 on success, negative value on failure.
0896  */
0897 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
0898                               u32 tx_coalesce_usecs);
0899 
0900 /* ena_com_update_nonadaptive_moderation_interval_rx - Update the
0901  * non-adaptive interval in Rx direction.
0902  * @ena_dev: ENA communication layer struct
0903  * @rx_coalesce_usecs: Interval in usec.
0904  *
0905  * @return - 0 on success, negative value on failure.
0906  */
0907 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
0908                               u32 rx_coalesce_usecs);
0909 
0910 /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
0911  * non-adaptive interval in Tx direction.
0912  * @ena_dev: ENA communication layer struct
0913  *
0914  * @return - interval in usec
0915  */
0916 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
0917 
0918 /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
0919  * non-adaptive interval in Rx direction.
0920  * @ena_dev: ENA communication layer struct
0921  *
0922  * @return - interval in usec
0923  */
0924 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
0925 
0926 /* ena_com_config_dev_mode - Configure the placement policy of the device.
0927  * @ena_dev: ENA communication layer struct
0928  * @llq_features: LLQ feature descriptor, retrieve via
0929  *         ena_com_get_dev_attr_feat.
0930  * @ena_llq_config: The default driver LLQ parameters configurations
0931  */
0932 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
0933                 struct ena_admin_feature_llq_desc *llq_features,
0934                 struct ena_llq_configurations *llq_default_config);
0935 
0936 /* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
0937  * @io_sq: IO submit queue struct
0938  *
0939  * @return - ena_com_dev struct extracted from io_sq
0940  */
0941 static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
0942 {
0943     return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
0944 }
0945 
0946 /* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
0947  * @io_sq: IO submit queue struct
0948  *
0949  * @return - ena_com_dev struct extracted from io_sq
0950  */
0951 static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
0952 {
0953     return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
0954 }
0955 
0956 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
0957 {
0958     return ena_dev->adaptive_coalescing;
0959 }
0960 
0961 static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
0962 {
0963     ena_dev->adaptive_coalescing = true;
0964 }
0965 
0966 static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
0967 {
0968     ena_dev->adaptive_coalescing = false;
0969 }
0970 
0971 /* ena_com_get_cap - query whether device supports a capability.
0972  * @ena_dev: ENA communication layer struct
0973  * @cap_id: enum value representing the capability
0974  *
0975  * @return - true if capability is supported or false otherwise
0976  */
0977 static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
0978                    enum ena_admin_aq_caps_id cap_id)
0979 {
0980     return !!(ena_dev->capabilities & BIT(cap_id));
0981 }
0982 
0983 /* ena_com_update_intr_reg - Prepare interrupt register
0984  * @intr_reg: interrupt register to update.
0985  * @rx_delay_interval: Rx interval in usecs
0986  * @tx_delay_interval: Tx interval in usecs
0987  * @unmask: unmask enable/disable
0988  *
0989  * Prepare interrupt update register with the supplied parameters.
0990  */
0991 static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
0992                        u32 rx_delay_interval,
0993                        u32 tx_delay_interval,
0994                        bool unmask)
0995 {
0996     intr_reg->intr_control = 0;
0997     intr_reg->intr_control |= rx_delay_interval &
0998         ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
0999 
1000     intr_reg->intr_control |=
1001         (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
1002         & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
1003 
1004     if (unmask)
1005         intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
1006 }
1007 
1008 static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
1009 {
1010     u16 size, buffers_num;
1011     u8 *buf;
1012 
1013     size = bounce_buf_ctrl->buffer_size;
1014     buffers_num = bounce_buf_ctrl->buffers_num;
1015 
1016     buf = bounce_buf_ctrl->base_buffer +
1017         (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
1018 
1019     prefetchw(bounce_buf_ctrl->base_buffer +
1020         (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
1021 
1022     return buf;
1023 }
1024 
1025 #endif /* !(ENA_COM) */