0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifndef MLX5_DRIVER_H
0034 #define MLX5_DRIVER_H
0035
0036 #include <linux/kernel.h>
0037 #include <linux/completion.h>
0038 #include <linux/pci.h>
0039 #include <linux/irq.h>
0040 #include <linux/spinlock_types.h>
0041 #include <linux/semaphore.h>
0042 #include <linux/slab.h>
0043 #include <linux/vmalloc.h>
0044 #include <linux/xarray.h>
0045 #include <linux/workqueue.h>
0046 #include <linux/mempool.h>
0047 #include <linux/interrupt.h>
0048 #include <linux/idr.h>
0049 #include <linux/notifier.h>
0050 #include <linux/refcount.h>
0051 #include <linux/auxiliary_bus.h>
0052
0053 #include <linux/mlx5/device.h>
0054 #include <linux/mlx5/doorbell.h>
0055 #include <linux/mlx5/eq.h>
0056 #include <linux/timecounter.h>
0057 #include <linux/ptp_clock_kernel.h>
0058 #include <net/devlink.h>
0059
0060 #define MLX5_ADEV_NAME "mlx5_core"
0061
0062 #define MLX5_IRQ_EQ_CTRL (U8_MAX)
0063
0064 enum {
0065 MLX5_BOARD_ID_LEN = 64,
0066 };
0067
0068 enum {
0069 MLX5_CMD_WQ_MAX_NAME = 32,
0070 };
0071
0072 enum {
0073 CMD_OWNER_SW = 0x0,
0074 CMD_OWNER_HW = 0x1,
0075 CMD_STATUS_SUCCESS = 0,
0076 };
0077
0078 enum mlx5_sqp_t {
0079 MLX5_SQP_SMI = 0,
0080 MLX5_SQP_GSI = 1,
0081 MLX5_SQP_IEEE_1588 = 2,
0082 MLX5_SQP_SNIFFER = 3,
0083 MLX5_SQP_SYNC_UMR = 4,
0084 };
0085
0086 enum {
0087 MLX5_MAX_PORTS = 4,
0088 };
0089
0090 enum {
0091 MLX5_ATOMIC_MODE_OFFSET = 16,
0092 MLX5_ATOMIC_MODE_IB_COMP = 1,
0093 MLX5_ATOMIC_MODE_CX = 2,
0094 MLX5_ATOMIC_MODE_8B = 3,
0095 MLX5_ATOMIC_MODE_16B = 4,
0096 MLX5_ATOMIC_MODE_32B = 5,
0097 MLX5_ATOMIC_MODE_64B = 6,
0098 MLX5_ATOMIC_MODE_128B = 7,
0099 MLX5_ATOMIC_MODE_256B = 8,
0100 };
0101
0102 enum {
0103 MLX5_REG_QPTS = 0x4002,
0104 MLX5_REG_QETCR = 0x4005,
0105 MLX5_REG_QTCT = 0x400a,
0106 MLX5_REG_QPDPM = 0x4013,
0107 MLX5_REG_QCAM = 0x4019,
0108 MLX5_REG_DCBX_PARAM = 0x4020,
0109 MLX5_REG_DCBX_APP = 0x4021,
0110 MLX5_REG_FPGA_CAP = 0x4022,
0111 MLX5_REG_FPGA_CTRL = 0x4023,
0112 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
0113 MLX5_REG_CORE_DUMP = 0x402e,
0114 MLX5_REG_PCAP = 0x5001,
0115 MLX5_REG_PMTU = 0x5003,
0116 MLX5_REG_PTYS = 0x5004,
0117 MLX5_REG_PAOS = 0x5006,
0118 MLX5_REG_PFCC = 0x5007,
0119 MLX5_REG_PPCNT = 0x5008,
0120 MLX5_REG_PPTB = 0x500b,
0121 MLX5_REG_PBMC = 0x500c,
0122 MLX5_REG_PMAOS = 0x5012,
0123 MLX5_REG_PUDE = 0x5009,
0124 MLX5_REG_PMPE = 0x5010,
0125 MLX5_REG_PELC = 0x500e,
0126 MLX5_REG_PVLC = 0x500f,
0127 MLX5_REG_PCMR = 0x5041,
0128 MLX5_REG_PDDR = 0x5031,
0129 MLX5_REG_PMLP = 0x5002,
0130 MLX5_REG_PPLM = 0x5023,
0131 MLX5_REG_PCAM = 0x507f,
0132 MLX5_REG_NODE_DESC = 0x6001,
0133 MLX5_REG_HOST_ENDIANNESS = 0x7004,
0134 MLX5_REG_MCIA = 0x9014,
0135 MLX5_REG_MFRL = 0x9028,
0136 MLX5_REG_MLCR = 0x902b,
0137 MLX5_REG_MRTC = 0x902d,
0138 MLX5_REG_MTRC_CAP = 0x9040,
0139 MLX5_REG_MTRC_CONF = 0x9041,
0140 MLX5_REG_MTRC_STDB = 0x9042,
0141 MLX5_REG_MTRC_CTRL = 0x9043,
0142 MLX5_REG_MPEIN = 0x9050,
0143 MLX5_REG_MPCNT = 0x9051,
0144 MLX5_REG_MTPPS = 0x9053,
0145 MLX5_REG_MTPPSE = 0x9054,
0146 MLX5_REG_MTUTC = 0x9055,
0147 MLX5_REG_MPEGC = 0x9056,
0148 MLX5_REG_MCQS = 0x9060,
0149 MLX5_REG_MCQI = 0x9061,
0150 MLX5_REG_MCC = 0x9062,
0151 MLX5_REG_MCDA = 0x9063,
0152 MLX5_REG_MCAM = 0x907f,
0153 MLX5_REG_MIRC = 0x9162,
0154 MLX5_REG_SBCAM = 0xB01F,
0155 MLX5_REG_RESOURCE_DUMP = 0xC000,
0156 MLX5_REG_DTOR = 0xC00E,
0157 };
0158
0159 enum mlx5_qpts_trust_state {
0160 MLX5_QPTS_TRUST_PCP = 1,
0161 MLX5_QPTS_TRUST_DSCP = 2,
0162 };
0163
0164 enum mlx5_dcbx_oper_mode {
0165 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
0166 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
0167 };
0168
0169 enum {
0170 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
0171 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
0172 MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
0173 MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
0174 };
0175
0176 enum mlx5_page_fault_resume_flags {
0177 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
0178 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
0179 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
0180 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
0181 };
0182
0183 enum dbg_rsc_type {
0184 MLX5_DBG_RSC_QP,
0185 MLX5_DBG_RSC_EQ,
0186 MLX5_DBG_RSC_CQ,
0187 };
0188
0189 enum port_state_policy {
0190 MLX5_POLICY_DOWN = 0,
0191 MLX5_POLICY_UP = 1,
0192 MLX5_POLICY_FOLLOW = 2,
0193 MLX5_POLICY_INVALID = 0xffffffff
0194 };
0195
0196 enum mlx5_coredev_type {
0197 MLX5_COREDEV_PF,
0198 MLX5_COREDEV_VF,
0199 MLX5_COREDEV_SF,
0200 };
0201
0202 struct mlx5_field_desc {
0203 int i;
0204 };
0205
0206 struct mlx5_rsc_debug {
0207 struct mlx5_core_dev *dev;
0208 void *object;
0209 enum dbg_rsc_type type;
0210 struct dentry *root;
0211 struct mlx5_field_desc fields[];
0212 };
0213
0214 enum mlx5_dev_event {
0215 MLX5_DEV_EVENT_SYS_ERROR = 128,
0216 MLX5_DEV_EVENT_PORT_AFFINITY = 129,
0217 };
0218
0219 enum mlx5_port_status {
0220 MLX5_PORT_UP = 1,
0221 MLX5_PORT_DOWN = 2,
0222 };
0223
0224 enum mlx5_cmdif_state {
0225 MLX5_CMDIF_STATE_UNINITIALIZED,
0226 MLX5_CMDIF_STATE_UP,
0227 MLX5_CMDIF_STATE_DOWN,
0228 };
0229
0230 struct mlx5_cmd_first {
0231 __be32 data[4];
0232 };
0233
0234 struct mlx5_cmd_msg {
0235 struct list_head list;
0236 struct cmd_msg_cache *parent;
0237 u32 len;
0238 struct mlx5_cmd_first first;
0239 struct mlx5_cmd_mailbox *next;
0240 };
0241
0242 struct mlx5_cmd_debug {
0243 struct dentry *dbg_root;
0244 void *in_msg;
0245 void *out_msg;
0246 u8 status;
0247 u16 inlen;
0248 u16 outlen;
0249 };
0250
0251 struct cmd_msg_cache {
0252
0253
0254 spinlock_t lock;
0255 struct list_head head;
0256 unsigned int max_inbox_size;
0257 unsigned int num_ent;
0258 };
0259
0260 enum {
0261 MLX5_NUM_COMMAND_CACHES = 5,
0262 };
0263
0264 struct mlx5_cmd_stats {
0265 u64 sum;
0266 u64 n;
0267
0268 u64 failed;
0269
0270 u64 failed_mbox_status;
0271
0272 u32 last_failed_errno;
0273
0274 u8 last_failed_mbox_status;
0275
0276 u32 last_failed_syndrome;
0277 struct dentry *root;
0278
0279 spinlock_t lock;
0280 };
0281
0282 struct mlx5_cmd {
0283 struct mlx5_nb nb;
0284
0285 enum mlx5_cmdif_state state;
0286 void *cmd_alloc_buf;
0287 dma_addr_t alloc_dma;
0288 int alloc_size;
0289 void *cmd_buf;
0290 dma_addr_t dma;
0291 u16 cmdif_rev;
0292 u8 log_sz;
0293 u8 log_stride;
0294 int max_reg_cmds;
0295 int events;
0296 u32 __iomem *vector;
0297
0298
0299
0300 spinlock_t alloc_lock;
0301
0302
0303
0304 spinlock_t token_lock;
0305 u8 token;
0306 unsigned long bitmask;
0307 char wq_name[MLX5_CMD_WQ_MAX_NAME];
0308 struct workqueue_struct *wq;
0309 struct semaphore sem;
0310 struct semaphore pages_sem;
0311 int mode;
0312 u16 allowed_opcode;
0313 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
0314 struct dma_pool *pool;
0315 struct mlx5_cmd_debug dbg;
0316 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
0317 int checksum_disabled;
0318 struct mlx5_cmd_stats *stats;
0319 };
0320
0321 struct mlx5_cmd_mailbox {
0322 void *buf;
0323 dma_addr_t dma;
0324 struct mlx5_cmd_mailbox *next;
0325 };
0326
0327 struct mlx5_buf_list {
0328 void *buf;
0329 dma_addr_t map;
0330 };
0331
0332 struct mlx5_frag_buf {
0333 struct mlx5_buf_list *frags;
0334 int npages;
0335 int size;
0336 u8 page_shift;
0337 };
0338
0339 struct mlx5_frag_buf_ctrl {
0340 struct mlx5_buf_list *frags;
0341 u32 sz_m1;
0342 u16 frag_sz_m1;
0343 u16 strides_offset;
0344 u8 log_sz;
0345 u8 log_stride;
0346 u8 log_frag_strides;
0347 };
0348
0349 struct mlx5_core_psv {
0350 u32 psv_idx;
0351 struct psv_layout {
0352 u32 pd;
0353 u16 syndrome;
0354 u16 reserved;
0355 u16 bg;
0356 u16 app_tag;
0357 u32 ref_tag;
0358 } psv;
0359 };
0360
0361 struct mlx5_core_sig_ctx {
0362 struct mlx5_core_psv psv_memory;
0363 struct mlx5_core_psv psv_wire;
0364 struct ib_sig_err err_item;
0365 bool sig_status_checked;
0366 bool sig_err_exists;
0367 u32 sigerr_count;
0368 };
0369
0370 #define MLX5_24BIT_MASK ((1 << 24) - 1)
0371
0372 enum mlx5_res_type {
0373 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
0374 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
0375 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
0376 MLX5_RES_SRQ = 3,
0377 MLX5_RES_XSRQ = 4,
0378 MLX5_RES_XRQ = 5,
0379 MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
0380 };
0381
0382 struct mlx5_core_rsc_common {
0383 enum mlx5_res_type res;
0384 refcount_t refcount;
0385 struct completion free;
0386 };
0387
0388 struct mlx5_uars_page {
0389 void __iomem *map;
0390 bool wc;
0391 u32 index;
0392 struct list_head list;
0393 unsigned int bfregs;
0394 unsigned long *reg_bitmap;
0395 unsigned long *fp_bitmap;
0396 unsigned int reg_avail;
0397 unsigned int fp_avail;
0398 struct kref ref_count;
0399 struct mlx5_core_dev *mdev;
0400 };
0401
0402 struct mlx5_bfreg_head {
0403
0404 struct mutex lock;
0405 struct list_head list;
0406 };
0407
0408 struct mlx5_bfreg_data {
0409 struct mlx5_bfreg_head reg_head;
0410 struct mlx5_bfreg_head wc_head;
0411 };
0412
0413 struct mlx5_sq_bfreg {
0414 void __iomem *map;
0415 struct mlx5_uars_page *up;
0416 bool wc;
0417 u32 index;
0418 unsigned int offset;
0419 };
0420
0421 struct mlx5_core_health {
0422 struct health_buffer __iomem *health;
0423 __be32 __iomem *health_counter;
0424 struct timer_list timer;
0425 u32 prev;
0426 int miss_counter;
0427 u8 synd;
0428 u32 fatal_error;
0429 u32 crdump_size;
0430
0431 spinlock_t wq_lock;
0432 struct workqueue_struct *wq;
0433 unsigned long flags;
0434 struct work_struct fatal_report_work;
0435 struct work_struct report_work;
0436 struct devlink_health_reporter *fw_reporter;
0437 struct devlink_health_reporter *fw_fatal_reporter;
0438 struct delayed_work update_fw_log_ts_work;
0439 };
0440
0441 struct mlx5_qp_table {
0442 struct notifier_block nb;
0443
0444
0445
0446 spinlock_t lock;
0447 struct radix_tree_root tree;
0448 };
0449
0450 enum {
0451 MLX5_PF_NOTIFY_DISABLE_VF,
0452 MLX5_PF_NOTIFY_ENABLE_VF,
0453 };
0454
0455 struct mlx5_vf_context {
0456 int enabled;
0457 u64 port_guid;
0458 u64 node_guid;
0459
0460
0461
0462 u8 port_guid_valid:1;
0463 u8 node_guid_valid:1;
0464 enum port_state_policy policy;
0465 struct blocking_notifier_head notifier;
0466 };
0467
0468 struct mlx5_core_sriov {
0469 struct mlx5_vf_context *vfs_ctx;
0470 int num_vfs;
0471 u16 max_vfs;
0472 };
0473
0474 struct mlx5_fc_pool {
0475 struct mlx5_core_dev *dev;
0476 struct mutex pool_lock;
0477 struct list_head fully_used;
0478 struct list_head partially_used;
0479 struct list_head unused;
0480 int available_fcs;
0481 int used_fcs;
0482 int threshold;
0483 };
0484
0485 struct mlx5_fc_stats {
0486 spinlock_t counters_idr_lock;
0487 struct idr counters_idr;
0488 struct list_head counters;
0489 struct llist_head addlist;
0490 struct llist_head dellist;
0491
0492 struct workqueue_struct *wq;
0493 struct delayed_work work;
0494 unsigned long next_query;
0495 unsigned long sampling_interval;
0496 u32 *bulk_query_out;
0497 int bulk_query_len;
0498 size_t num_counters;
0499 bool bulk_query_alloc_failed;
0500 unsigned long next_bulk_query_alloc;
0501 struct mlx5_fc_pool fc_pool;
0502 };
0503
0504 struct mlx5_events;
0505 struct mlx5_mpfs;
0506 struct mlx5_eswitch;
0507 struct mlx5_lag;
0508 struct mlx5_devcom;
0509 struct mlx5_fw_reset;
0510 struct mlx5_eq_table;
0511 struct mlx5_irq_table;
0512 struct mlx5_vhca_state_notifier;
0513 struct mlx5_sf_dev_table;
0514 struct mlx5_sf_hw_table;
0515 struct mlx5_sf_table;
0516
0517 struct mlx5_rate_limit {
0518 u32 rate;
0519 u32 max_burst_sz;
0520 u16 typical_pkt_sz;
0521 };
0522
0523 struct mlx5_rl_entry {
0524 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
0525 u64 refcount;
0526 u16 index;
0527 u16 uid;
0528 u8 dedicated : 1;
0529 };
0530
0531 struct mlx5_rl_table {
0532
0533 struct mutex rl_lock;
0534 u16 max_size;
0535 u32 max_rate;
0536 u32 min_rate;
0537 struct mlx5_rl_entry *rl_entry;
0538 u64 refcount;
0539 };
0540
0541 struct mlx5_core_roce {
0542 struct mlx5_flow_table *ft;
0543 struct mlx5_flow_group *fg;
0544 struct mlx5_flow_handle *allow_rule;
0545 };
0546
0547 enum {
0548 MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
0549 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
0550
0551
0552
0553 MLX5_PRIV_FLAGS_DETACH = 1 << 2,
0554
0555
0556
0557 MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW = 1 << 3,
0558 };
0559
0560 struct mlx5_adev {
0561 struct auxiliary_device adev;
0562 struct mlx5_core_dev *mdev;
0563 int idx;
0564 };
0565
0566 struct mlx5_debugfs_entries {
0567 struct dentry *dbg_root;
0568 struct dentry *qp_debugfs;
0569 struct dentry *eq_debugfs;
0570 struct dentry *cq_debugfs;
0571 struct dentry *cmdif_debugfs;
0572 struct dentry *pages_debugfs;
0573 struct dentry *lag_debugfs;
0574 };
0575
0576 struct mlx5_ft_pool;
0577 struct mlx5_priv {
0578
0579 struct mlx5_irq_table *irq_table;
0580 struct mlx5_eq_table *eq_table;
0581
0582
0583 struct mlx5_nb pg_nb;
0584 struct workqueue_struct *pg_wq;
0585 struct xarray page_root_xa;
0586 u32 fw_pages;
0587 atomic_t reg_pages;
0588 struct list_head free_list;
0589 u32 vfs_pages;
0590 u32 host_pf_pages;
0591 u32 fw_pages_alloc_failed;
0592 u32 give_pages_dropped;
0593 u32 reclaim_pages_discard;
0594
0595 struct mlx5_core_health health;
0596 struct list_head traps;
0597
0598 struct mlx5_debugfs_entries dbg;
0599
0600
0601
0602 struct mutex alloc_mutex;
0603 int numa_node;
0604
0605 struct mutex pgdir_mutex;
0606 struct list_head pgdir_list;
0607
0608
0609 struct list_head ctx_list;
0610 spinlock_t ctx_lock;
0611 struct mlx5_adev **adev;
0612 int adev_idx;
0613 int sw_vhca_id;
0614 struct mlx5_events *events;
0615
0616 struct mlx5_flow_steering *steering;
0617 struct mlx5_mpfs *mpfs;
0618 struct mlx5_eswitch *eswitch;
0619 struct mlx5_core_sriov sriov;
0620 struct mlx5_lag *lag;
0621 u32 flags;
0622 struct mlx5_devcom *devcom;
0623 struct mlx5_fw_reset *fw_reset;
0624 struct mlx5_core_roce roce;
0625 struct mlx5_fc_stats fc_stats;
0626 struct mlx5_rl_table rl_table;
0627 struct mlx5_ft_pool *ft_pool;
0628
0629 struct mlx5_bfreg_data bfregs;
0630 struct mlx5_uars_page *uar;
0631 #ifdef CONFIG_MLX5_SF
0632 struct mlx5_vhca_state_notifier *vhca_state_notifier;
0633 struct mlx5_sf_dev_table *sf_dev_table;
0634 struct mlx5_core_dev *parent_mdev;
0635 #endif
0636 #ifdef CONFIG_MLX5_SF_MANAGER
0637 struct mlx5_sf_hw_table *sf_hw_table;
0638 struct mlx5_sf_table *sf_table;
0639 #endif
0640 };
0641
0642 enum mlx5_device_state {
0643 MLX5_DEVICE_STATE_UP = 1,
0644 MLX5_DEVICE_STATE_INTERNAL_ERROR,
0645 };
0646
0647 enum mlx5_interface_state {
0648 MLX5_INTERFACE_STATE_UP = BIT(0),
0649 MLX5_BREAK_FW_WAIT = BIT(1),
0650 };
0651
0652 enum mlx5_pci_status {
0653 MLX5_PCI_STATUS_DISABLED,
0654 MLX5_PCI_STATUS_ENABLED,
0655 };
0656
0657 enum mlx5_pagefault_type_flags {
0658 MLX5_PFAULT_REQUESTOR = 1 << 0,
0659 MLX5_PFAULT_WRITE = 1 << 1,
0660 MLX5_PFAULT_RDMA = 1 << 2,
0661 };
0662
0663 struct mlx5_td {
0664
0665 struct mutex list_lock;
0666 struct list_head tirs_list;
0667 u32 tdn;
0668 };
0669
0670 struct mlx5e_resources {
0671 struct mlx5e_hw_objs {
0672 u32 pdn;
0673 struct mlx5_td td;
0674 u32 mkey;
0675 struct mlx5_sq_bfreg bfreg;
0676 } hw_objs;
0677 struct devlink_port dl_port;
0678 struct net_device *uplink_netdev;
0679 };
0680
0681 enum mlx5_sw_icm_type {
0682 MLX5_SW_ICM_TYPE_STEERING,
0683 MLX5_SW_ICM_TYPE_HEADER_MODIFY,
0684 MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
0685 };
0686
0687 #define MLX5_MAX_RESERVED_GIDS 8
0688
0689 struct mlx5_rsvd_gids {
0690 unsigned int start;
0691 unsigned int count;
0692 struct ida ida;
0693 };
0694
0695 #define MAX_PIN_NUM 8
0696 struct mlx5_pps {
0697 u8 pin_caps[MAX_PIN_NUM];
0698 struct work_struct out_work;
0699 u64 start[MAX_PIN_NUM];
0700 u8 enabled;
0701 };
0702
0703 struct mlx5_timer {
0704 struct cyclecounter cycles;
0705 struct timecounter tc;
0706 u32 nominal_c_mult;
0707 unsigned long overflow_period;
0708 struct delayed_work overflow_work;
0709 };
0710
0711 struct mlx5_clock {
0712 struct mlx5_nb pps_nb;
0713 seqlock_t lock;
0714 struct hwtstamp_config hwtstamp_config;
0715 struct ptp_clock *ptp;
0716 struct ptp_clock_info ptp_info;
0717 struct mlx5_pps pps_info;
0718 struct mlx5_timer timer;
0719 };
0720
0721 struct mlx5_dm;
0722 struct mlx5_fw_tracer;
0723 struct mlx5_vxlan;
0724 struct mlx5_geneve;
0725 struct mlx5_hv_vhca;
0726
0727 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
0728 #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
0729
0730 enum {
0731 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
0732 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
0733 };
0734
0735 enum {
0736 MKEY_CACHE_LAST_STD_ENTRY = 20,
0737 MLX5_IMR_MTT_CACHE_ENTRY,
0738 MLX5_IMR_KSM_CACHE_ENTRY,
0739 MAX_MKEY_CACHE_ENTRIES
0740 };
0741
0742 struct mlx5_profile {
0743 u64 mask;
0744 u8 log_max_qp;
0745 struct {
0746 int size;
0747 int limit;
0748 } mr_cache[MAX_MKEY_CACHE_ENTRIES];
0749 };
0750
0751 struct mlx5_hca_cap {
0752 u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
0753 u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
0754 };
0755
0756 struct mlx5_core_dev {
0757 struct device *device;
0758 enum mlx5_coredev_type coredev_type;
0759 struct pci_dev *pdev;
0760
0761 struct mutex pci_status_mutex;
0762 enum mlx5_pci_status pci_status;
0763 u8 rev_id;
0764 char board_id[MLX5_BOARD_ID_LEN];
0765 struct mlx5_cmd cmd;
0766 struct {
0767 struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
0768 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
0769 u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
0770 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
0771 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
0772 u8 embedded_cpu;
0773 } caps;
0774 struct mlx5_timeouts *timeouts;
0775 u64 sys_image_guid;
0776 phys_addr_t iseg_base;
0777 struct mlx5_init_seg __iomem *iseg;
0778 phys_addr_t bar_addr;
0779 enum mlx5_device_state state;
0780
0781 struct mutex intf_state_mutex;
0782 struct lock_class_key lock_key;
0783 unsigned long intf_state;
0784 struct mlx5_priv priv;
0785 struct mlx5_profile profile;
0786 u32 issi;
0787 struct mlx5e_resources mlx5e_res;
0788 struct mlx5_dm *dm;
0789 struct mlx5_vxlan *vxlan;
0790 struct mlx5_geneve *geneve;
0791 struct {
0792 struct mlx5_rsvd_gids reserved_gids;
0793 u32 roce_en;
0794 } roce;
0795 #ifdef CONFIG_MLX5_FPGA
0796 struct mlx5_fpga_device *fpga;
0797 #endif
0798 struct mlx5_clock clock;
0799 struct mlx5_ib_clock_info *clock_info;
0800 struct mlx5_fw_tracer *tracer;
0801 struct mlx5_rsc_dump *rsc_dump;
0802 u32 vsc_addr;
0803 struct mlx5_hv_vhca *hv_vhca;
0804 };
0805
0806 struct mlx5_db {
0807 __be32 *db;
0808 union {
0809 struct mlx5_db_pgdir *pgdir;
0810 struct mlx5_ib_user_db_page *user_page;
0811 } u;
0812 dma_addr_t dma;
0813 int index;
0814 };
0815
0816 enum {
0817 MLX5_COMP_EQ_SIZE = 1024,
0818 };
0819
0820 enum {
0821 MLX5_PTYS_IB = 1 << 0,
0822 MLX5_PTYS_EN = 1 << 2,
0823 };
0824
0825 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
0826
0827 enum {
0828 MLX5_CMD_ENT_STATE_PENDING_COMP,
0829 };
0830
0831 struct mlx5_cmd_work_ent {
0832 unsigned long state;
0833 struct mlx5_cmd_msg *in;
0834 struct mlx5_cmd_msg *out;
0835 void *uout;
0836 int uout_size;
0837 mlx5_cmd_cbk_t callback;
0838 struct delayed_work cb_timeout_work;
0839 void *context;
0840 int idx;
0841 struct completion handling;
0842 struct completion done;
0843 struct mlx5_cmd *cmd;
0844 struct work_struct work;
0845 struct mlx5_cmd_layout *lay;
0846 int ret;
0847 int page_queue;
0848 u8 status;
0849 u8 token;
0850 u64 ts1;
0851 u64 ts2;
0852 u16 op;
0853 bool polling;
0854
0855 refcount_t refcnt;
0856 };
0857
0858 struct mlx5_pas {
0859 u64 pa;
0860 u8 log_sz;
0861 };
0862
0863 enum phy_port_state {
0864 MLX5_AAA_111
0865 };
0866
0867 struct mlx5_hca_vport_context {
0868 u32 field_select;
0869 bool sm_virt_aware;
0870 bool has_smi;
0871 bool has_raw;
0872 enum port_state_policy policy;
0873 enum phy_port_state phys_state;
0874 enum ib_port_state vport_state;
0875 u8 port_physical_state;
0876 u64 sys_image_guid;
0877 u64 port_guid;
0878 u64 node_guid;
0879 u32 cap_mask1;
0880 u32 cap_mask1_perm;
0881 u16 cap_mask2;
0882 u16 cap_mask2_perm;
0883 u16 lid;
0884 u8 init_type_reply;
0885 u8 lmc;
0886 u8 subnet_timeout;
0887 u16 sm_lid;
0888 u8 sm_sl;
0889 u16 qkey_violation_counter;
0890 u16 pkey_violation_counter;
0891 bool grh_required;
0892 };
0893
0894 #define STRUCT_FIELD(header, field) \
0895 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
0896 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
0897
0898 extern struct dentry *mlx5_debugfs_root;
0899
0900 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
0901 {
0902 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
0903 }
0904
0905 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
0906 {
0907 return ioread32be(&dev->iseg->fw_rev) >> 16;
0908 }
0909
0910 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
0911 {
0912 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
0913 }
0914
0915 static inline u32 mlx5_base_mkey(const u32 key)
0916 {
0917 return key & 0xffffff00u;
0918 }
0919
0920 static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
0921 {
0922 return ((u32)1 << log_sz) << log_stride;
0923 }
0924
0925 static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
0926 u8 log_stride, u8 log_sz,
0927 u16 strides_offset,
0928 struct mlx5_frag_buf_ctrl *fbc)
0929 {
0930 fbc->frags = frags;
0931 fbc->log_stride = log_stride;
0932 fbc->log_sz = log_sz;
0933 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
0934 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
0935 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
0936 fbc->strides_offset = strides_offset;
0937 }
0938
0939 static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
0940 u8 log_stride, u8 log_sz,
0941 struct mlx5_frag_buf_ctrl *fbc)
0942 {
0943 mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
0944 }
0945
0946 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
0947 u32 ix)
0948 {
0949 unsigned int frag;
0950
0951 ix += fbc->strides_offset;
0952 frag = ix >> fbc->log_frag_strides;
0953
0954 return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
0955 }
0956
0957 static inline u32
0958 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
0959 {
0960 u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
0961
0962 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
0963 }
0964
0965 enum {
0966 CMD_ALLOWED_OPCODE_ALL,
0967 };
0968
0969 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
0970 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
0971 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
0972
0973 struct mlx5_async_ctx {
0974 struct mlx5_core_dev *dev;
0975 atomic_t num_inflight;
0976 struct wait_queue_head wait;
0977 };
0978
0979 struct mlx5_async_work;
0980
0981 typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
0982
0983 struct mlx5_async_work {
0984 struct mlx5_async_ctx *ctx;
0985 mlx5_async_cbk_t user_callback;
0986 u16 opcode;
0987 void *out;
0988 };
0989
0990 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
0991 struct mlx5_async_ctx *ctx);
0992 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
0993 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
0994 void *out, int out_size, mlx5_async_cbk_t callback,
0995 struct mlx5_async_work *work);
0996 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
0997 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
0998 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
0999 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1000 int out_size);
1001
1002 #define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \
1003 ({ \
1004 mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \
1005 MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \
1006 })
1007
1008 #define mlx5_cmd_exec_in(dev, ifc_cmd, in) \
1009 ({ \
1010 u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \
1011 mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \
1012 })
1013
1014 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1015 void *out, int out_size);
1016 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
1017
1018 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
1019 void mlx5_health_flush(struct mlx5_core_dev *dev);
1020 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
1021 int mlx5_health_init(struct mlx5_core_dev *dev);
1022 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
1023 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
1024 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
1025 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
1026 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1027 struct mlx5_frag_buf *buf, int node);
1028 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
1029 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1030 gfp_t flags, int npages);
1031 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1032 struct mlx5_cmd_mailbox *head);
1033 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
1034 int inlen);
1035 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
1036 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
1037 int outlen);
1038 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1039 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1040 int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
1041 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
1042 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
1043 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
1044 void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
1045 void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
1046 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
1047 s32 npages, bool ec_function);
1048 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1049 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1050 void mlx5_register_debugfs(void);
1051 void mlx5_unregister_debugfs(void);
1052
1053 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
1054 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1055 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
1056 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1057 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1058
1059 struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
1060 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
1061 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1062 int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
1063 void *data_out, int size_out, u16 reg_id, int arg,
1064 int write, bool verbose);
1065 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1066 int size_in, void *data_out, int size_out,
1067 u16 reg_num, int arg, int write);
1068
1069 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1070 int node);
1071
1072 static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
1073 {
1074 return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
1075 }
1076
1077 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1078
1079 const char *mlx5_command_str(int command);
1080 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1081 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1082 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1083 int npsvs, u32 *sig_index);
1084 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1085 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1086 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1087 struct mlx5_odp_caps *odp_caps);
1088 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1089 u8 port_num, void *out, size_t sz);
1090
1091 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1092 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1093 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
1094 struct mlx5_rate_limit *rl);
1095 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
1096 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1097 int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
1098 bool dedicated_entry, u16 *index);
1099 void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
1100 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
1101 struct mlx5_rate_limit *rl_1);
1102 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1103 bool map_wc, bool fast_path);
1104 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1105
1106 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
1107 struct cpumask *
1108 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
1109 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1110 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1111 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1112 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1113
1114 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1115 {
1116 return mkey >> 8;
1117 }
1118
1119 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1120 {
1121 return mkey_idx << 8;
1122 }
1123
1124 static inline u8 mlx5_mkey_variant(u32 mkey)
1125 {
1126 return mkey & 0xff;
1127 }
1128
1129
1130
1131
1132
1133 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1134 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1135
1136
1137
1138
1139
1140 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1141 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1142
1143
1144 int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1145 int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1146 int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
1147 void *data);
1148
1149 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1150
1151 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1152 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1153 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1154 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1155 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1156 bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
1157 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
1158 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1159 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1160 struct net_device *slave);
1161 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1162 u64 *values,
1163 int num_counters,
1164 size_t *offsets);
1165 struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
1166 u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
1167 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1168 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1169 int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1170 u64 length, u32 log_alignment, u16 uid,
1171 phys_addr_t *addr, u32 *obj_id);
1172 int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1173 u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
1174
1175 struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
1176 void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
1177
1178 int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
1179 int vf_id,
1180 struct notifier_block *nb);
1181 void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
1182 int vf_id,
1183 struct notifier_block *nb);
1184 #ifdef CONFIG_MLX5_CORE_IPOIB
1185 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1186 struct ib_device *ibdev,
1187 const char *name,
1188 void (*setup)(struct net_device *));
1189 #endif
1190 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1191 struct ib_device *device,
1192 struct rdma_netdev_alloc_params *params);
1193
1194 enum {
1195 MLX5_PCI_DEV_IS_VF = 1 << 0,
1196 };
1197
1198 static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
1199 {
1200 return dev->coredev_type == MLX5_COREDEV_PF;
1201 }
1202
1203 static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
1204 {
1205 return dev->coredev_type == MLX5_COREDEV_VF;
1206 }
1207
1208 static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
1209 {
1210 return dev->caps.embedded_cpu;
1211 }
1212
1213 static inline bool
1214 mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
1215 {
1216 return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1217 }
1218
1219 static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
1220 {
1221 return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
1222 }
1223
1224 static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
1225 {
1226 return dev->priv.sriov.max_vfs;
1227 }
1228
1229 static inline int mlx5_get_gid_table_len(u16 param)
1230 {
1231 if (param > 4) {
1232 pr_warn("gid table length is zero\n");
1233 return 0;
1234 }
1235
1236 return 8 * (1 << param);
1237 }
1238
1239 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1240 {
1241 return !!(dev->priv.rl_table.max_size);
1242 }
1243
1244 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1245 {
1246 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1247 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1248 }
1249
1250 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1251 {
1252 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1253 }
1254
1255 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1256 {
1257 return mlx5_core_is_mp_slave(dev) ||
1258 mlx5_core_is_mp_master(dev);
1259 }
1260
1261 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1262 {
1263 if (!mlx5_core_mp_enabled(dev))
1264 return 1;
1265
1266 return MLX5_CAP_GEN(dev, native_port_num);
1267 }
1268
1269 static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
1270 {
1271 int idx = MLX5_CAP_GEN(dev, native_port_num);
1272
1273 if (idx >= 1 && idx <= MLX5_MAX_PORTS)
1274 return idx - 1;
1275 else
1276 return PCI_FUNC(dev->pdev->devfn);
1277 }
1278
1279 enum {
1280 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1281 };
1282
1283 bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
1284
1285 static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
1286 {
1287 if (MLX5_CAP_GEN(dev, roce_rw_supported))
1288 return MLX5_CAP_GEN(dev, roce);
1289
1290
1291
1292
1293 return mlx5_is_roce_on(dev);
1294 }
1295
1296 #endif