0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifndef __MLX5_ESWITCH_H__
0034 #define __MLX5_ESWITCH_H__
0035
0036 #include <linux/if_ether.h>
0037 #include <linux/if_link.h>
0038 #include <linux/atomic.h>
0039 #include <linux/xarray.h>
0040 #include <net/devlink.h>
0041 #include <linux/mlx5/device.h>
0042 #include <linux/mlx5/eswitch.h>
0043 #include <linux/mlx5/vport.h>
0044 #include <linux/mlx5/fs.h>
0045 #include "lib/mpfs.h"
0046 #include "lib/fs_chains.h"
0047 #include "sf/sf.h"
0048 #include "en/tc_ct.h"
0049 #include "en/tc/sample.h"
0050
0051 enum mlx5_mapped_obj_type {
0052 MLX5_MAPPED_OBJ_CHAIN,
0053 MLX5_MAPPED_OBJ_SAMPLE,
0054 MLX5_MAPPED_OBJ_INT_PORT_METADATA,
0055 };
0056
0057 struct mlx5_mapped_obj {
0058 enum mlx5_mapped_obj_type type;
0059 union {
0060 u32 chain;
0061 struct {
0062 u32 group_id;
0063 u32 rate;
0064 u32 trunc_size;
0065 u32 tunnel_id;
0066 } sample;
0067 u32 int_port_metadata;
0068 };
0069 };
0070
0071 #ifdef CONFIG_MLX5_ESWITCH
0072
0073 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
0074
0075 #define MLX5_MAX_UC_PER_VPORT(dev) \
0076 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
0077
0078 #define MLX5_MAX_MC_PER_VPORT(dev) \
0079 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
0080
0081 #define mlx5_esw_has_fwd_fdb(dev) \
0082 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
0083
0084 #define esw_chains(esw) \
0085 ((esw)->fdb_table.offloads.esw_chains_priv)
0086
0087 enum {
0088 MAPPING_TYPE_CHAIN,
0089 MAPPING_TYPE_TUNNEL,
0090 MAPPING_TYPE_TUNNEL_ENC_OPTS,
0091 MAPPING_TYPE_LABELS,
0092 MAPPING_TYPE_ZONE,
0093 MAPPING_TYPE_INT_PORT,
0094 };
0095
0096 struct vport_ingress {
0097 struct mlx5_flow_table *acl;
0098 struct mlx5_flow_handle *allow_rule;
0099 struct {
0100 struct mlx5_flow_group *allow_spoofchk_only_grp;
0101 struct mlx5_flow_group *allow_untagged_spoofchk_grp;
0102 struct mlx5_flow_group *allow_untagged_only_grp;
0103 struct mlx5_flow_group *drop_grp;
0104 struct mlx5_flow_handle *drop_rule;
0105 struct mlx5_fc *drop_counter;
0106 } legacy;
0107 struct {
0108
0109
0110
0111 struct mlx5_flow_group *metadata_prio_tag_grp;
0112
0113
0114
0115 struct mlx5_flow_group *metadata_allmatch_grp;
0116
0117 struct mlx5_flow_group *drop_grp;
0118 struct mlx5_modify_hdr *modify_metadata;
0119 struct mlx5_flow_handle *modify_metadata_rule;
0120 struct mlx5_flow_handle *drop_rule;
0121 } offloads;
0122 };
0123
0124 struct vport_egress {
0125 struct mlx5_flow_table *acl;
0126 struct mlx5_flow_handle *allowed_vlan;
0127 struct mlx5_flow_group *vlan_grp;
0128 union {
0129 struct {
0130 struct mlx5_flow_group *drop_grp;
0131 struct mlx5_flow_handle *drop_rule;
0132 struct mlx5_fc *drop_counter;
0133 } legacy;
0134 struct {
0135 struct mlx5_flow_group *fwd_grp;
0136 struct mlx5_flow_handle *fwd_rule;
0137 struct mlx5_flow_handle *bounce_rule;
0138 struct mlx5_flow_group *bounce_grp;
0139 } offloads;
0140 };
0141 };
0142
0143 struct mlx5_vport_drop_stats {
0144 u64 rx_dropped;
0145 u64 tx_dropped;
0146 };
0147
0148 struct mlx5_vport_info {
0149 u8 mac[ETH_ALEN];
0150 u16 vlan;
0151 u64 node_guid;
0152 int link_state;
0153 u8 qos;
0154 u8 spoofchk: 1;
0155 u8 trusted: 1;
0156 };
0157
0158
0159 enum mlx5_eswitch_vport_event {
0160 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
0161 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
0162 MLX5_VPORT_PROMISC_CHANGE = BIT(3),
0163 };
0164
0165 struct mlx5_vport {
0166 struct mlx5_core_dev *dev;
0167 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
0168 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
0169 struct mlx5_flow_handle *promisc_rule;
0170 struct mlx5_flow_handle *allmulti_rule;
0171 struct work_struct vport_change_handler;
0172
0173 struct vport_ingress ingress;
0174 struct vport_egress egress;
0175 u32 default_metadata;
0176 u32 metadata;
0177
0178 struct mlx5_vport_info info;
0179
0180 struct {
0181 bool enabled;
0182 u32 esw_tsar_ix;
0183 u32 bw_share;
0184 u32 min_rate;
0185 u32 max_rate;
0186 struct mlx5_esw_rate_group *group;
0187 } qos;
0188
0189 u16 vport;
0190 bool enabled;
0191 enum mlx5_eswitch_vport_event enabled_events;
0192 int index;
0193 struct devlink_port *dl_port;
0194 struct dentry *dbgfs;
0195 };
0196
0197 struct mlx5_esw_indir_table;
0198
0199 struct mlx5_eswitch_fdb {
0200 union {
0201 struct legacy_fdb {
0202 struct mlx5_flow_table *fdb;
0203 struct mlx5_flow_group *addr_grp;
0204 struct mlx5_flow_group *allmulti_grp;
0205 struct mlx5_flow_group *promisc_grp;
0206 struct mlx5_flow_table *vepa_fdb;
0207 struct mlx5_flow_handle *vepa_uplink_rule;
0208 struct mlx5_flow_handle *vepa_star_rule;
0209 } legacy;
0210
0211 struct offloads_fdb {
0212 struct mlx5_flow_namespace *ns;
0213 struct mlx5_flow_table *tc_miss_table;
0214 struct mlx5_flow_table *slow_fdb;
0215 struct mlx5_flow_group *send_to_vport_grp;
0216 struct mlx5_flow_group *send_to_vport_meta_grp;
0217 struct mlx5_flow_group *peer_miss_grp;
0218 struct mlx5_flow_handle **peer_miss_rules;
0219 struct mlx5_flow_group *miss_grp;
0220 struct mlx5_flow_handle **send_to_vport_meta_rules;
0221 struct mlx5_flow_handle *miss_rule_uni;
0222 struct mlx5_flow_handle *miss_rule_multi;
0223 int vlan_push_pop_refcount;
0224
0225 struct mlx5_fs_chains *esw_chains_priv;
0226 struct {
0227 DECLARE_HASHTABLE(table, 8);
0228
0229 struct mutex lock;
0230 } vports;
0231
0232 struct mlx5_esw_indir_table *indir;
0233
0234 } offloads;
0235 };
0236 u32 flags;
0237 };
0238
0239 struct mlx5_esw_offload {
0240 struct mlx5_flow_table *ft_offloads_restore;
0241 struct mlx5_flow_group *restore_group;
0242 struct mlx5_modify_hdr *restore_copy_hdr_id;
0243 struct mapping_ctx *reg_c0_obj_pool;
0244
0245 struct mlx5_flow_table *ft_offloads;
0246 struct mlx5_flow_group *vport_rx_group;
0247 struct xarray vport_reps;
0248 struct list_head peer_flows;
0249 struct mutex peer_mutex;
0250 struct mutex encap_tbl_lock;
0251 DECLARE_HASHTABLE(encap_tbl, 8);
0252 struct mutex decap_tbl_lock;
0253 DECLARE_HASHTABLE(decap_tbl, 8);
0254 struct mod_hdr_tbl mod_hdr;
0255 DECLARE_HASHTABLE(termtbl_tbl, 8);
0256 struct mutex termtbl_mutex;
0257 struct xarray vhca_map;
0258 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
0259 u8 inline_mode;
0260 atomic64_t num_flows;
0261 enum devlink_eswitch_encap_mode encap;
0262 struct ida vport_metadata_ida;
0263 unsigned int host_number;
0264 };
0265
0266
0267 struct esw_mc_addr {
0268 struct l2addr_node node;
0269 struct mlx5_flow_handle *uplink_rule;
0270 u32 refcnt;
0271 };
0272
0273 struct mlx5_host_work {
0274 struct work_struct work;
0275 struct mlx5_eswitch *esw;
0276 };
0277
0278 struct mlx5_esw_functions {
0279 struct mlx5_nb nb;
0280 u16 num_vfs;
0281 };
0282
0283 enum {
0284 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
0285 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
0286 MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
0287 };
0288
0289 struct mlx5_esw_bridge_offloads;
0290
0291 enum {
0292 MLX5_ESW_FDB_CREATED = BIT(0),
0293 };
0294
0295 struct mlx5_eswitch {
0296 struct mlx5_core_dev *dev;
0297 struct mlx5_nb nb;
0298 struct mlx5_eswitch_fdb fdb_table;
0299
0300 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
0301 struct esw_mc_addr mc_promisc;
0302
0303 struct workqueue_struct *work_queue;
0304 struct xarray vports;
0305 u32 flags;
0306 int total_vports;
0307 int enabled_vports;
0308
0309
0310
0311 struct mutex state_lock;
0312
0313
0314
0315
0316 struct rw_semaphore mode_lock;
0317 atomic64_t user_count;
0318
0319 struct {
0320 u32 root_tsar_ix;
0321 struct mlx5_esw_rate_group *group0;
0322 struct list_head groups;
0323
0324
0325
0326
0327 refcount_t refcnt;
0328 } qos;
0329
0330 struct mlx5_esw_bridge_offloads *br_offloads;
0331 struct mlx5_esw_offload offloads;
0332 int mode;
0333 u16 manager_vport;
0334 u16 first_host_vport;
0335 struct mlx5_esw_functions esw_funcs;
0336 struct {
0337 u32 large_group_num;
0338 } params;
0339 struct blocking_notifier_head n_head;
0340 struct dentry *dbgfs;
0341 };
0342
0343 void esw_offloads_disable(struct mlx5_eswitch *esw);
0344 int esw_offloads_enable(struct mlx5_eswitch *esw);
0345 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
0346 int esw_offloads_init_reps(struct mlx5_eswitch *esw);
0347 void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
0348
0349 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
0350 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
0351 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
0352 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
0353
0354 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
0355
0356
0357 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
0358 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
0359
0360 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
0361 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
0362 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
0363 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
0364 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
0365 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
0366 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
0367 u16 vport, const u8 *mac);
0368 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
0369 u16 vport, int link_state);
0370 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
0371 u16 vport, u16 vlan, u8 qos);
0372 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
0373 u16 vport, bool spoofchk);
0374 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
0375 u16 vport_num, bool setting);
0376 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
0377 u32 max_rate, u32 min_rate);
0378 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
0379 struct mlx5_vport *vport,
0380 struct mlx5_esw_rate_group *group,
0381 struct netlink_ext_ack *extack);
0382 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
0383 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
0384 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
0385 u16 vport, struct ifla_vf_info *ivi);
0386 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
0387 u16 vport,
0388 struct ifla_vf_stats *vf_stats);
0389 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
0390
0391 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
0392 bool other_vport, void *in);
0393
0394 struct mlx5_flow_spec;
0395 struct mlx5_esw_flow_attr;
0396 struct mlx5_termtbl_handle;
0397
0398 bool
0399 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
0400 struct mlx5_flow_attr *attr,
0401 struct mlx5_flow_act *flow_act,
0402 struct mlx5_flow_spec *spec);
0403
0404 struct mlx5_flow_handle *
0405 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
0406 struct mlx5_flow_table *ft,
0407 struct mlx5_flow_spec *spec,
0408 struct mlx5_esw_flow_attr *attr,
0409 struct mlx5_flow_act *flow_act,
0410 struct mlx5_flow_destination *dest,
0411 int num_dest);
0412
0413 void
0414 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
0415 struct mlx5_termtbl_handle *tt);
0416
0417 void
0418 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
0419
0420 struct mlx5_flow_handle *
0421 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
0422 struct mlx5_flow_spec *spec,
0423 struct mlx5_flow_attr *attr);
0424 struct mlx5_flow_handle *
0425 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
0426 struct mlx5_flow_spec *spec,
0427 struct mlx5_flow_attr *attr);
0428 void
0429 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
0430 struct mlx5_flow_handle *rule,
0431 struct mlx5_flow_attr *attr);
0432 void
0433 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
0434 struct mlx5_flow_handle *rule,
0435 struct mlx5_flow_attr *attr);
0436
0437 struct mlx5_flow_handle *
0438 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
0439 struct mlx5_flow_destination *dest);
0440
0441 enum {
0442 SET_VLAN_STRIP = BIT(0),
0443 SET_VLAN_INSERT = BIT(1)
0444 };
0445
0446 enum mlx5_flow_match_level {
0447 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
0448 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
0449 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
0450 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
0451 };
0452
0453
0454 #define MLX5_MAX_FLOW_FWD_VPORTS 32
0455
0456 enum {
0457 MLX5_ESW_DEST_ENCAP = BIT(0),
0458 MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
0459 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
0460 };
0461
0462 struct mlx5_esw_flow_attr {
0463 struct mlx5_eswitch_rep *in_rep;
0464 struct mlx5_core_dev *in_mdev;
0465 struct mlx5_core_dev *counter_dev;
0466 struct mlx5e_tc_int_port *dest_int_port;
0467 struct mlx5e_tc_int_port *int_port;
0468
0469 int split_count;
0470 int out_count;
0471
0472 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
0473 u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
0474 u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
0475 u8 total_vlan;
0476 struct {
0477 u32 flags;
0478 struct mlx5_eswitch_rep *rep;
0479 struct mlx5_pkt_reformat *pkt_reformat;
0480 struct mlx5_core_dev *mdev;
0481 struct mlx5_termtbl_handle *termtbl;
0482 int src_port_rewrite_act_id;
0483 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
0484 struct mlx5_rx_tun_attr *rx_tun_attr;
0485 struct ethhdr eth;
0486 struct mlx5_pkt_reformat *decap_pkt_reformat;
0487 };
0488
0489 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
0490 struct netlink_ext_ack *extack);
0491 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
0492 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
0493 struct netlink_ext_ack *extack);
0494 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
0495 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
0496 enum devlink_eswitch_encap_mode encap,
0497 struct netlink_ext_ack *extack);
0498 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
0499 enum devlink_eswitch_encap_mode *encap);
0500 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
0501 u8 *hw_addr, int *hw_addr_len,
0502 struct netlink_ext_ack *extack);
0503 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
0504 const u8 *hw_addr, int hw_addr_len,
0505 struct netlink_ext_ack *extack);
0506
0507 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
0508
0509 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
0510 struct mlx5_flow_attr *attr);
0511 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
0512 struct mlx5_flow_attr *attr);
0513 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
0514 u16 vport, u16 vlan, u8 qos, u8 set_flags);
0515
0516 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
0517 u8 vlan_depth)
0518 {
0519 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
0520 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
0521
0522 if (vlan_depth == 1)
0523 return ret;
0524
0525 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
0526 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
0527 }
0528
0529 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
0530 struct mlx5_core_dev *dev1);
0531
0532 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
0533
0534 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
0535
0536 #define esw_info(__dev, format, ...) \
0537 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
0538
0539 #define esw_warn(__dev, format, ...) \
0540 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
0541
0542 #define esw_debug(dev, format, ...) \
0543 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
0544
0545 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
0546 {
0547 return esw && MLX5_ESWITCH_MANAGER(esw->dev);
0548 }
0549
0550
0551 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
0552 {
0553 return mlx5_core_is_ecpf_esw_manager(dev) ?
0554 MLX5_VPORT_ECPF : MLX5_VPORT_PF;
0555 }
0556
0557 static inline bool
0558 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
0559 {
0560 return esw->manager_vport == vport_num;
0561 }
0562
0563 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
0564 {
0565 return mlx5_core_is_ecpf_esw_manager(dev) ?
0566 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
0567 }
0568
0569 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
0570 {
0571 return mlx5_core_is_ecpf_esw_manager(dev);
0572 }
0573
0574 static inline unsigned int
0575 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
0576 u16 vport_num)
0577 {
0578 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
0579 }
0580
0581 static inline u16
0582 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
0583 {
0584 return dl_port_index & 0xffff;
0585 }
0586
0587 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
0588 {
0589 return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
0590 }
0591
0592
0593 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
0594
0595
0596
0597
0598
0599
0600
0601 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
0602 #define MLX5_ESW_VPT_VF XA_MARK_1
0603 #define MLX5_ESW_VPT_SF XA_MARK_2
0604
0605
0606
0607
0608
0609 #define mlx5_esw_for_each_vport(esw, index, vport) \
0610 xa_for_each(&((esw)->vports), index, vport)
0611
0612 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
0613 for (index = 0, entry = xa_find(xa, &index, last, filter); \
0614 entry; entry = xa_find_after(xa, &index, last, filter))
0615
0616 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
0617 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
0618
0619 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
0620 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
0621
0622 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
0623 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
0624
0625 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
0626 struct mlx5_vport *__must_check
0627 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
0628
0629 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
0630 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
0631
0632 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
0633
0634 int
0635 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
0636 enum mlx5_eswitch_vport_event enabled_events);
0637 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
0638
0639 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
0640 enum mlx5_eswitch_vport_event enabled_events);
0641 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
0642
0643 int
0644 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
0645 struct mlx5_vport *vport);
0646 void
0647 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
0648 struct mlx5_vport *vport);
0649
0650 struct esw_vport_tbl_namespace {
0651 int max_fte;
0652 int max_num_groups;
0653 u32 flags;
0654 };
0655
0656 struct mlx5_vport_tbl_attr {
0657 u32 chain;
0658 u16 prio;
0659 u16 vport;
0660 const struct esw_vport_tbl_namespace *vport_ns;
0661 };
0662
0663 struct mlx5_flow_table *
0664 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
0665 void
0666 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
0667
0668 struct mlx5_flow_handle *
0669 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
0670
0671 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
0672 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
0673
0674 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
0675 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
0676
0677 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
0678 enum mlx5_eswitch_vport_event enabled_events);
0679 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
0680
0681 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
0682 enum mlx5_eswitch_vport_event enabled_events);
0683 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
0684
0685 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
0686 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
0687 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
0688
0689 void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num);
0690 void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num);
0691
0692 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
0693 u16 vport_num, u32 controller, u32 sfnum);
0694 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
0695
0696 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
0697 u16 vport_num, u32 controller, u32 sfnum);
0698 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
0699 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
0700
0701 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
0702 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
0703 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
0704
0705
0706
0707
0708
0709
0710 struct mlx5_esw_event_info {
0711 u16 new_mode;
0712 };
0713
0714 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
0715 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
0716
0717 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
0718 void mlx5_esw_release(struct mlx5_core_dev *dev);
0719 void mlx5_esw_get(struct mlx5_core_dev *dev);
0720 void mlx5_esw_put(struct mlx5_core_dev *dev);
0721 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
0722 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
0723
0724 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
0725
0726 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
0727
0728 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
0729 struct mlx5_eswitch *slave_esw);
0730 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
0731 struct mlx5_eswitch *slave_esw);
0732 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
0733
0734 #else
0735
0736 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
0737 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
0738 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
0739 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
0740 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
0741 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
0742 static inline
0743 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
0744 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
0745 {
0746 return ERR_PTR(-EOPNOTSUPP);
0747 }
0748
0749 static inline struct mlx5_flow_handle *
0750 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
0751 {
0752 return ERR_PTR(-EOPNOTSUPP);
0753 }
0754
0755 static inline unsigned int
0756 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
0757 u16 vport_num)
0758 {
0759 return vport_num;
0760 }
0761
0762 static inline int
0763 mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
0764 struct mlx5_eswitch *slave_esw)
0765 {
0766 return 0;
0767 }
0768
0769 static inline void
0770 mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
0771 struct mlx5_eswitch *slave_esw) {}
0772
0773 static inline int
0774 mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
0775 {
0776 return 0;
0777 }
0778 #endif
0779
0780 #endif