0001
0002
0003
0004
0005
0006
0007 #ifndef _QED_SRIOV_H
0008 #define _QED_SRIOV_H
0009 #include <linux/types.h>
0010 #include "qed_vf.h"
0011
0012 #define QED_ETH_VF_NUM_MAC_FILTERS 1
0013 #define QED_ETH_VF_NUM_VLAN_FILTERS 2
0014 #define QED_VF_ARRAY_LENGTH (3)
0015
0016 #ifdef CONFIG_QED_SRIOV
0017 #define IS_VF(cdev) ((cdev)->b_is_vf)
0018 #define IS_PF(cdev) (!((cdev)->b_is_vf))
0019 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
0020 #else
0021 #define IS_VF(cdev) (0)
0022 #define IS_PF(cdev) (1)
0023 #define IS_PF_SRIOV(p_hwfn) (0)
0024 #endif
0025 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
0026
0027 #define QED_MAX_VF_CHAINS_PER_PF 16
0028
0029 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
0030 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
0031
0032 enum qed_iov_vport_update_flag {
0033 QED_IOV_VP_UPDATE_ACTIVATE,
0034 QED_IOV_VP_UPDATE_VLAN_STRIP,
0035 QED_IOV_VP_UPDATE_TX_SWITCH,
0036 QED_IOV_VP_UPDATE_MCAST,
0037 QED_IOV_VP_UPDATE_ACCEPT_PARAM,
0038 QED_IOV_VP_UPDATE_RSS,
0039 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
0040 QED_IOV_VP_UPDATE_SGE_TPA,
0041 QED_IOV_VP_UPDATE_MAX,
0042 };
0043
0044 struct qed_public_vf_info {
0045
0046
0047
0048 u8 forced_mac[ETH_ALEN];
0049 u16 forced_vlan;
0050 u8 mac[ETH_ALEN];
0051
0052
0053 int link_state;
0054
0055
0056 int tx_rate;
0057
0058
0059
0060
0061 bool is_trusted_configured;
0062 bool is_trusted_request;
0063 u8 rx_accept_mode;
0064 u8 tx_accept_mode;
0065 bool accept_any_vlan;
0066 };
0067
0068 struct qed_iov_vf_init_params {
0069 u16 rel_vf_id;
0070
0071
0072
0073
0074
0075 u16 num_queues;
0076
0077
0078
0079
0080
0081 u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
0082 u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
0083 };
0084
0085
0086
0087
0088 struct qed_hw_sriov_info {
0089 int pos;
0090 int nres;
0091 u32 cap;
0092 u16 ctrl;
0093 u16 total_vfs;
0094 u16 num_vfs;
0095 u16 initial_vfs;
0096 u16 nr_virtfn;
0097 u16 offset;
0098 u16 stride;
0099 u16 vf_device_id;
0100 u32 pgsz;
0101 u8 link;
0102
0103 u32 first_vf_in_pf;
0104 };
0105
0106
0107
0108
0109 struct qed_iov_vf_mbx {
0110 union vfpf_tlvs *req_virt;
0111 dma_addr_t req_phys;
0112 union pfvf_tlvs *reply_virt;
0113 dma_addr_t reply_phys;
0114
0115
0116 dma_addr_t pending_req;
0117
0118
0119 bool b_pending_msg;
0120
0121 u8 *offset;
0122
0123
0124 struct vfpf_first_tlv first_tlv;
0125 };
0126
0127 #define QED_IOV_LEGACY_QID_RX (0)
0128 #define QED_IOV_LEGACY_QID_TX (1)
0129 #define QED_IOV_QID_INVALID (0xFE)
0130
0131 struct qed_vf_queue_cid {
0132 bool b_is_tx;
0133 struct qed_queue_cid *p_cid;
0134 };
0135
0136
0137 struct qed_vf_queue {
0138 u16 fw_rx_qid;
0139 u16 fw_tx_qid;
0140
0141 struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
0142 };
0143
0144 enum vf_state {
0145 VF_FREE = 0,
0146 VF_ACQUIRED,
0147 VF_ENABLED,
0148 VF_RESET,
0149 VF_STOPPED
0150 };
0151
0152 struct qed_vf_vlan_shadow {
0153 bool used;
0154 u16 vid;
0155 };
0156
0157 struct qed_vf_shadow_config {
0158
0159 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
0160
0161
0162 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
0163 u8 inner_vlan_removal;
0164 };
0165
0166
0167 struct qed_vf_info {
0168 struct qed_iov_vf_mbx vf_mbx;
0169 enum vf_state state;
0170 bool b_init;
0171 bool b_malicious;
0172 u8 to_disable;
0173
0174 struct qed_bulletin bulletin;
0175 dma_addr_t vf_bulletin;
0176
0177
0178 struct vfpf_acquire_tlv acquire;
0179
0180 u32 concrete_fid;
0181 u16 opaque_fid;
0182 u16 mtu;
0183
0184 u8 vport_id;
0185 u8 relative_vf_id;
0186 u8 abs_vf_id;
0187 #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
0188 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
0189 (p_vf)->abs_vf_id)
0190
0191 u8 vport_instance;
0192 u8 num_rxqs;
0193 u8 num_txqs;
0194
0195 u16 rx_coal;
0196 u16 tx_coal;
0197
0198 u8 num_sbs;
0199
0200 u8 num_mac_filters;
0201 u8 num_vlan_filters;
0202
0203 struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
0204 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
0205 u8 num_active_rxqs;
0206 struct qed_public_vf_info p_vf_info;
0207 bool spoof_chk;
0208 bool req_spoofchk_val;
0209
0210
0211 struct qed_vf_shadow_config shadow_config;
0212
0213
0214
0215
0216 u64 configured_features;
0217 #define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
0218 (1 << VLAN_ADDR_FORCED))
0219 };
0220
0221
0222
0223
0224 struct qed_pf_iov {
0225 struct qed_vf_info vfs_array[MAX_NUM_VFS];
0226 u64 pending_flr[QED_VF_ARRAY_LENGTH];
0227
0228
0229 void *mbx_msg_virt_addr;
0230 dma_addr_t mbx_msg_phys_addr;
0231 u32 mbx_msg_size;
0232 void *mbx_reply_virt_addr;
0233 dma_addr_t mbx_reply_phys_addr;
0234 u32 mbx_reply_size;
0235 void *p_bulletins;
0236 dma_addr_t bulletins_phys;
0237 u32 bulletins_size;
0238 };
0239
0240 enum qed_iov_wq_flag {
0241 QED_IOV_WQ_MSG_FLAG,
0242 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
0243 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
0244 QED_IOV_WQ_STOP_WQ_FLAG,
0245 QED_IOV_WQ_FLR_FLAG,
0246 QED_IOV_WQ_TRUST_FLAG,
0247 QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
0248 };
0249
0250 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
0251
0252 #ifdef CONFIG_QED_SRIOV
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
0268 int rel_vf_id,
0269 bool b_enabled_only, bool b_non_malicious);
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
0281
0282 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
0283 int vfid, u16 vxlan_port, u16 geneve_port);
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
0326 struct fw_err_data *p_data);
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
0340 union event_ring_data *data, u8 fw_return_code);
0341
0342
0343
0344
0345
0346
0347
0348
0349 int qed_iov_alloc(struct qed_hwfn *p_hwfn);
0350
0351
0352
0353
0354
0355
0356
0357
0358 void qed_iov_setup(struct qed_hwfn *p_hwfn);
0359
0360
0361
0362
0363
0364
0365
0366
0367 void qed_iov_free(struct qed_hwfn *p_hwfn);
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 void qed_iov_free_hw_info(struct qed_dev *cdev);
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
0399 void *p_tlvs_list, u16 req_type);
0400
0401 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
0402 int qed_iov_wq_start(struct qed_dev *cdev);
0403
0404 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
0405 void qed_vf_start_iov_wq(struct qed_dev *cdev);
0406 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
0407 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
0408 #else
0409 static inline bool
0410 qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
0411 int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
0412 {
0413 return false;
0414 }
0415
0416 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
0417 u16 rel_vf_id)
0418 {
0419 return MAX_NUM_VFS;
0420 }
0421
0422 static inline void
0423 qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
0424 u16 vxlan_port, u16 geneve_port)
0425 {
0426 }
0427
0428 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
0429 {
0430 return 0;
0431 }
0432
0433 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
0434 {
0435 return 0;
0436 }
0437
0438 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
0439 {
0440 }
0441
0442 static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
0443 {
0444 }
0445
0446 static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
0447 {
0448 }
0449
0450 static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
0451 u32 *disabled_vfs)
0452 {
0453 return false;
0454 }
0455
0456 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
0457 {
0458 }
0459
0460 static inline int qed_iov_wq_start(struct qed_dev *cdev)
0461 {
0462 return 0;
0463 }
0464
0465 static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
0466 enum qed_iov_wq_flag flag)
0467 {
0468 }
0469
0470 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
0471 {
0472 }
0473
0474 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
0475 {
0476 return 0;
0477 }
0478
0479 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
0480 {
0481 }
0482
0483 static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
0484 struct fw_err_data *p_data)
0485 {
0486 }
0487
0488 static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
0489 __le16 echo, union event_ring_data *data,
0490 u8 fw_return_code)
0491 {
0492 return 0;
0493 }
0494 #endif
0495
0496 #define qed_for_each_vf(_p_hwfn, _i) \
0497 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
0498 _i < MAX_NUM_VFS; \
0499 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
0500
0501 #endif