0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/types.h>
0007 #include <linux/pci.h>
0008 #include <linux/netdevice.h>
0009 #include <linux/etherdevice.h>
0010 #include <linux/ethtool.h>
0011 #include <linux/slab.h>
0012 #include <linux/device.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/if_vlan.h>
0015 #include <linux/if_bridge.h>
0016 #include <linux/workqueue.h>
0017 #include <linux/jiffies.h>
0018 #include <linux/bitops.h>
0019 #include <linux/list.h>
0020 #include <linux/notifier.h>
0021 #include <linux/dcbnl.h>
0022 #include <linux/inetdevice.h>
0023 #include <linux/netlink.h>
0024 #include <linux/jhash.h>
0025 #include <linux/log2.h>
0026 #include <linux/refcount.h>
0027 #include <linux/rhashtable.h>
0028 #include <net/switchdev.h>
0029 #include <net/pkt_cls.h>
0030 #include <net/netevent.h>
0031 #include <net/addrconf.h>
0032 #include <linux/ptp_classify.h>
0033
0034 #include "spectrum.h"
0035 #include "pci.h"
0036 #include "core.h"
0037 #include "core_env.h"
0038 #include "reg.h"
0039 #include "port.h"
0040 #include "trap.h"
0041 #include "txheader.h"
0042 #include "spectrum_cnt.h"
0043 #include "spectrum_dpipe.h"
0044 #include "spectrum_acl_flex_actions.h"
0045 #include "spectrum_span.h"
0046 #include "spectrum_ptp.h"
0047 #include "spectrum_trap.h"
0048
0049 #define MLXSW_SP_FWREV_MINOR 2010
0050 #define MLXSW_SP_FWREV_SUBMINOR 1006
0051
0052 #define MLXSW_SP1_FWREV_MAJOR 13
0053 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
0054
0055 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
0056 .major = MLXSW_SP1_FWREV_MAJOR,
0057 .minor = MLXSW_SP_FWREV_MINOR,
0058 .subminor = MLXSW_SP_FWREV_SUBMINOR,
0059 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
0060 };
0061
0062 #define MLXSW_SP1_FW_FILENAME \
0063 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
0064 "." __stringify(MLXSW_SP_FWREV_MINOR) \
0065 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
0066
0067 #define MLXSW_SP2_FWREV_MAJOR 29
0068
0069 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
0070 .major = MLXSW_SP2_FWREV_MAJOR,
0071 .minor = MLXSW_SP_FWREV_MINOR,
0072 .subminor = MLXSW_SP_FWREV_SUBMINOR,
0073 };
0074
0075 #define MLXSW_SP2_FW_FILENAME \
0076 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
0077 "." __stringify(MLXSW_SP_FWREV_MINOR) \
0078 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
0079
0080 #define MLXSW_SP3_FWREV_MAJOR 30
0081
0082 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
0083 .major = MLXSW_SP3_FWREV_MAJOR,
0084 .minor = MLXSW_SP_FWREV_MINOR,
0085 .subminor = MLXSW_SP_FWREV_SUBMINOR,
0086 };
0087
0088 #define MLXSW_SP3_FW_FILENAME \
0089 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
0090 "." __stringify(MLXSW_SP_FWREV_MINOR) \
0091 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
0092
0093 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
0094 "mellanox/lc_ini_bundle_" \
0095 __stringify(MLXSW_SP_FWREV_MINOR) "_" \
0096 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
0097
0098 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
0099 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
0100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
0101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
0102
0103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
0104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
0105 };
0106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
0107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
0108 };
0109
0110
0111
0112
0113
0114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
0115
0116
0117
0118
0119
0120
0121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
0122
0123
0124
0125
0126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
0127
0128
0129
0130
0131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
0132
0133
0134
0135
0136
0137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
0138
0139
0140
0141
0142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
0143
0144
0145
0146
0147
0148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
0149
0150
0151
0152
0153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
0164
0165
0166
0167
0168
0169
0170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
0171
0172
0173
0174
0175
0176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
0177
0178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
0179 unsigned int counter_index, u64 *packets,
0180 u64 *bytes)
0181 {
0182 char mgpc_pl[MLXSW_REG_MGPC_LEN];
0183 int err;
0184
0185 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
0186 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
0187 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
0188 if (err)
0189 return err;
0190 if (packets)
0191 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
0192 if (bytes)
0193 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
0194 return 0;
0195 }
0196
0197 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
0198 unsigned int counter_index)
0199 {
0200 char mgpc_pl[MLXSW_REG_MGPC_LEN];
0201
0202 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
0203 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
0204 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
0205 }
0206
0207 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
0208 unsigned int *p_counter_index)
0209 {
0210 int err;
0211
0212 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
0213 p_counter_index);
0214 if (err)
0215 return err;
0216 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
0217 if (err)
0218 goto err_counter_clear;
0219 return 0;
0220
0221 err_counter_clear:
0222 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
0223 *p_counter_index);
0224 return err;
0225 }
0226
0227 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
0228 unsigned int counter_index)
0229 {
0230 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
0231 counter_index);
0232 }
0233
0234 void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
0235 const struct mlxsw_tx_info *tx_info)
0236 {
0237 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
0238
0239 memset(txhdr, 0, MLXSW_TXHDR_LEN);
0240
0241 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
0242 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
0243 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
0244 mlxsw_tx_hdr_swid_set(txhdr, 0);
0245 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
0246 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
0247 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
0248 }
0249
0250 int
0251 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
0252 struct mlxsw_sp_port *mlxsw_sp_port,
0253 struct sk_buff *skb,
0254 const struct mlxsw_tx_info *tx_info)
0255 {
0256 char *txhdr;
0257 u16 max_fid;
0258 int err;
0259
0260 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
0261 err = -ENOMEM;
0262 goto err_skb_cow_head;
0263 }
0264
0265 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
0266 err = -EIO;
0267 goto err_res_valid;
0268 }
0269 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
0270
0271 txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
0272 memset(txhdr, 0, MLXSW_TXHDR_LEN);
0273
0274 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
0275 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
0276 mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
0277 mlxsw_tx_hdr_fid_valid_set(txhdr, true);
0278 mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
0279 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
0280 return 0;
0281
0282 err_res_valid:
0283 err_skb_cow_head:
0284 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
0285 dev_kfree_skb_any(skb);
0286 return err;
0287 }
0288
0289 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
0290 {
0291 unsigned int type;
0292
0293 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
0294 return false;
0295
0296 type = ptp_classify_raw(skb);
0297 return !!ptp_parse_header(skb, type);
0298 }
0299
0300 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
0301 struct mlxsw_sp_port *mlxsw_sp_port,
0302 struct sk_buff *skb,
0303 const struct mlxsw_tx_info *tx_info)
0304 {
0305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
0306
0307
0308
0309
0310
0311 if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
0312 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
0313 mlxsw_sp_port, skb,
0314 tx_info);
0315
0316 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
0317 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
0318 dev_kfree_skb_any(skb);
0319 return -ENOMEM;
0320 }
0321
0322 mlxsw_sp_txhdr_construct(skb, tx_info);
0323 return 0;
0324 }
0325
0326 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
0327 {
0328 switch (state) {
0329 case BR_STATE_FORWARDING:
0330 return MLXSW_REG_SPMS_STATE_FORWARDING;
0331 case BR_STATE_LEARNING:
0332 return MLXSW_REG_SPMS_STATE_LEARNING;
0333 case BR_STATE_LISTENING:
0334 case BR_STATE_DISABLED:
0335 case BR_STATE_BLOCKING:
0336 return MLXSW_REG_SPMS_STATE_DISCARDING;
0337 default:
0338 BUG();
0339 }
0340 }
0341
0342 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
0343 u8 state)
0344 {
0345 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
0346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0347 char *spms_pl;
0348 int err;
0349
0350 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
0351 if (!spms_pl)
0352 return -ENOMEM;
0353 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
0354 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
0355
0356 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
0357 kfree(spms_pl);
0358 return err;
0359 }
0360
0361 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
0362 {
0363 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
0364 int err;
0365
0366 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
0367 if (err)
0368 return err;
0369 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
0370 return 0;
0371 }
0372
0373 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
0374 bool is_up)
0375 {
0376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0377 char paos_pl[MLXSW_REG_PAOS_LEN];
0378
0379 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
0380 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
0381 MLXSW_PORT_ADMIN_STATUS_DOWN);
0382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
0383 }
0384
0385 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
0386 const unsigned char *addr)
0387 {
0388 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0389 char ppad_pl[MLXSW_REG_PPAD_LEN];
0390
0391 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
0392 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
0393 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
0394 }
0395
0396 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
0397 {
0398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0399
0400 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
0401 mlxsw_sp_port->local_port);
0402 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
0403 mlxsw_sp_port->dev->dev_addr);
0404 }
0405
0406 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
0407 {
0408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0409 char pmtu_pl[MLXSW_REG_PMTU_LEN];
0410 int err;
0411
0412 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
0413 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
0414 if (err)
0415 return err;
0416
0417 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
0418 return 0;
0419 }
0420
0421 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
0422 {
0423 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0424 char pmtu_pl[MLXSW_REG_PMTU_LEN];
0425
0426 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
0427 if (mtu > mlxsw_sp_port->max_mtu)
0428 return -EINVAL;
0429
0430 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
0431 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
0432 }
0433
0434 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
0435 u16 local_port, u8 swid)
0436 {
0437 char pspa_pl[MLXSW_REG_PSPA_LEN];
0438
0439 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
0440 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
0441 }
0442
0443 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
0444 {
0445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0446 char svpe_pl[MLXSW_REG_SVPE_LEN];
0447
0448 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
0449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
0450 }
0451
0452 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
0453 bool learn_enable)
0454 {
0455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0456 char *spvmlr_pl;
0457 int err;
0458
0459 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
0460 if (!spvmlr_pl)
0461 return -ENOMEM;
0462 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
0463 learn_enable);
0464 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
0465 kfree(spvmlr_pl);
0466 return err;
0467 }
0468
0469 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
0470 {
0471 switch (ethtype) {
0472 case ETH_P_8021Q:
0473 *p_sver_type = 0;
0474 break;
0475 case ETH_P_8021AD:
0476 *p_sver_type = 1;
0477 break;
0478 default:
0479 return -EINVAL;
0480 }
0481
0482 return 0;
0483 }
0484
0485 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
0486 u16 ethtype)
0487 {
0488 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0489 char spevet_pl[MLXSW_REG_SPEVET_LEN];
0490 u8 sver_type;
0491 int err;
0492
0493 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
0494 if (err)
0495 return err;
0496
0497 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
0498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
0499 }
0500
0501 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
0502 u16 vid, u16 ethtype)
0503 {
0504 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0505 char spvid_pl[MLXSW_REG_SPVID_LEN];
0506 u8 sver_type;
0507 int err;
0508
0509 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
0510 if (err)
0511 return err;
0512
0513 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
0514 sver_type);
0515
0516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
0517 }
0518
0519 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
0520 bool allow)
0521 {
0522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0523 char spaft_pl[MLXSW_REG_SPAFT_LEN];
0524
0525 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
0526 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
0527 }
0528
0529 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
0530 u16 ethtype)
0531 {
0532 int err;
0533
0534 if (!vid) {
0535 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
0536 if (err)
0537 return err;
0538 } else {
0539 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
0540 if (err)
0541 return err;
0542 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
0543 if (err)
0544 goto err_port_allow_untagged_set;
0545 }
0546
0547 mlxsw_sp_port->pvid = vid;
0548 return 0;
0549
0550 err_port_allow_untagged_set:
0551 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
0552 return err;
0553 }
0554
0555 static int
0556 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
0557 {
0558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0559 char sspr_pl[MLXSW_REG_SSPR_LEN];
0560
0561 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
0562 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
0563 }
0564
0565 static int
0566 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
0567 u16 local_port, char *pmlp_pl,
0568 struct mlxsw_sp_port_mapping *port_mapping)
0569 {
0570 bool separate_rxtx;
0571 u8 first_lane;
0572 u8 slot_index;
0573 u8 module;
0574 u8 width;
0575 int i;
0576
0577 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
0578 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
0579 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
0580 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
0581 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
0582
0583 if (width && !is_power_of_2(width)) {
0584 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
0585 local_port);
0586 return -EINVAL;
0587 }
0588
0589 for (i = 0; i < width; i++) {
0590 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
0591 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
0592 local_port);
0593 return -EINVAL;
0594 }
0595 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
0596 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
0597 local_port);
0598 return -EINVAL;
0599 }
0600 if (separate_rxtx &&
0601 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
0602 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
0603 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
0604 local_port);
0605 return -EINVAL;
0606 }
0607 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
0608 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
0609 local_port);
0610 return -EINVAL;
0611 }
0612 }
0613
0614 port_mapping->module = module;
0615 port_mapping->slot_index = slot_index;
0616 port_mapping->width = width;
0617 port_mapping->module_width = width;
0618 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
0619 return 0;
0620 }
0621
0622 static int
0623 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0624 struct mlxsw_sp_port_mapping *port_mapping)
0625 {
0626 char pmlp_pl[MLXSW_REG_PMLP_LEN];
0627 int err;
0628
0629 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
0630 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
0631 if (err)
0632 return err;
0633 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
0634 pmlp_pl, port_mapping);
0635 }
0636
0637 static int
0638 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0639 const struct mlxsw_sp_port_mapping *port_mapping)
0640 {
0641 char pmlp_pl[MLXSW_REG_PMLP_LEN];
0642 int i, err;
0643
0644 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
0645 port_mapping->module);
0646
0647 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
0648 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
0649 for (i = 0; i < port_mapping->width; i++) {
0650 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
0651 port_mapping->slot_index);
0652 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
0653 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i);
0654 }
0655
0656 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
0657 if (err)
0658 goto err_pmlp_write;
0659 return 0;
0660
0661 err_pmlp_write:
0662 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
0663 port_mapping->module);
0664 return err;
0665 }
0666
0667 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
0668 u8 slot_index, u8 module)
0669 {
0670 char pmlp_pl[MLXSW_REG_PMLP_LEN];
0671
0672 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
0673 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
0674 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
0675 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
0676 }
0677
0678 static int mlxsw_sp_port_open(struct net_device *dev)
0679 {
0680 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0682 int err;
0683
0684 err = mlxsw_env_module_port_up(mlxsw_sp->core,
0685 mlxsw_sp_port->mapping.slot_index,
0686 mlxsw_sp_port->mapping.module);
0687 if (err)
0688 return err;
0689 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
0690 if (err)
0691 goto err_port_admin_status_set;
0692 netif_start_queue(dev);
0693 return 0;
0694
0695 err_port_admin_status_set:
0696 mlxsw_env_module_port_down(mlxsw_sp->core,
0697 mlxsw_sp_port->mapping.slot_index,
0698 mlxsw_sp_port->mapping.module);
0699 return err;
0700 }
0701
0702 static int mlxsw_sp_port_stop(struct net_device *dev)
0703 {
0704 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0705 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0706
0707 netif_stop_queue(dev);
0708 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
0709 mlxsw_env_module_port_down(mlxsw_sp->core,
0710 mlxsw_sp_port->mapping.slot_index,
0711 mlxsw_sp_port->mapping.module);
0712 return 0;
0713 }
0714
0715 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
0716 struct net_device *dev)
0717 {
0718 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0719 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0720 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
0721 const struct mlxsw_tx_info tx_info = {
0722 .local_port = mlxsw_sp_port->local_port,
0723 .is_emad = false,
0724 };
0725 u64 len;
0726 int err;
0727
0728 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
0729
0730 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
0731 return NETDEV_TX_BUSY;
0732
0733 if (eth_skb_pad(skb)) {
0734 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
0735 return NETDEV_TX_OK;
0736 }
0737
0738 err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
0739 &tx_info);
0740 if (err)
0741 return NETDEV_TX_OK;
0742
0743
0744
0745
0746 len = skb->len - MLXSW_TXHDR_LEN;
0747
0748
0749
0750
0751 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
0752
0753 if (!err) {
0754 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
0755 u64_stats_update_begin(&pcpu_stats->syncp);
0756 pcpu_stats->tx_packets++;
0757 pcpu_stats->tx_bytes += len;
0758 u64_stats_update_end(&pcpu_stats->syncp);
0759 } else {
0760 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
0761 dev_kfree_skb_any(skb);
0762 }
0763 return NETDEV_TX_OK;
0764 }
0765
0766 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
0767 {
0768 }
0769
0770 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
0771 {
0772 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0773 struct sockaddr *addr = p;
0774 int err;
0775
0776 if (!is_valid_ether_addr(addr->sa_data))
0777 return -EADDRNOTAVAIL;
0778
0779 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
0780 if (err)
0781 return err;
0782 eth_hw_addr_set(dev, addr->sa_data);
0783 return 0;
0784 }
0785
0786 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
0787 {
0788 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0789 struct mlxsw_sp_hdroom orig_hdroom;
0790 struct mlxsw_sp_hdroom hdroom;
0791 int err;
0792
0793 orig_hdroom = *mlxsw_sp_port->hdroom;
0794
0795 hdroom = orig_hdroom;
0796 hdroom.mtu = mtu;
0797 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
0798
0799 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
0800 if (err) {
0801 netdev_err(dev, "Failed to configure port's headroom\n");
0802 return err;
0803 }
0804
0805 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
0806 if (err)
0807 goto err_port_mtu_set;
0808 dev->mtu = mtu;
0809 return 0;
0810
0811 err_port_mtu_set:
0812 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
0813 return err;
0814 }
0815
0816 static int
0817 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
0818 struct rtnl_link_stats64 *stats)
0819 {
0820 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0821 struct mlxsw_sp_port_pcpu_stats *p;
0822 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
0823 u32 tx_dropped = 0;
0824 unsigned int start;
0825 int i;
0826
0827 for_each_possible_cpu(i) {
0828 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
0829 do {
0830 start = u64_stats_fetch_begin_irq(&p->syncp);
0831 rx_packets = p->rx_packets;
0832 rx_bytes = p->rx_bytes;
0833 tx_packets = p->tx_packets;
0834 tx_bytes = p->tx_bytes;
0835 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
0836
0837 stats->rx_packets += rx_packets;
0838 stats->rx_bytes += rx_bytes;
0839 stats->tx_packets += tx_packets;
0840 stats->tx_bytes += tx_bytes;
0841
0842 tx_dropped += p->tx_dropped;
0843 }
0844 stats->tx_dropped = tx_dropped;
0845 return 0;
0846 }
0847
0848 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
0849 {
0850 switch (attr_id) {
0851 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
0852 return true;
0853 }
0854
0855 return false;
0856 }
0857
0858 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
0859 void *sp)
0860 {
0861 switch (attr_id) {
0862 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
0863 return mlxsw_sp_port_get_sw_stats64(dev, sp);
0864 }
0865
0866 return -EINVAL;
0867 }
0868
0869 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
0870 int prio, char *ppcnt_pl)
0871 {
0872 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0873 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0874
0875 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
0876 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
0877 }
0878
0879 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
0880 struct rtnl_link_stats64 *stats)
0881 {
0882 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
0883 int err;
0884
0885 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
0886 0, ppcnt_pl);
0887 if (err)
0888 goto out;
0889
0890 stats->tx_packets =
0891 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
0892 stats->rx_packets =
0893 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
0894 stats->tx_bytes =
0895 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
0896 stats->rx_bytes =
0897 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
0898 stats->multicast =
0899 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
0900
0901 stats->rx_crc_errors =
0902 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
0903 stats->rx_frame_errors =
0904 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
0905
0906 stats->rx_length_errors = (
0907 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
0908 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
0909 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
0910
0911 stats->rx_errors = (stats->rx_crc_errors +
0912 stats->rx_frame_errors + stats->rx_length_errors);
0913
0914 out:
0915 return err;
0916 }
0917
0918 static void
0919 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
0920 struct mlxsw_sp_port_xstats *xstats)
0921 {
0922 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
0923 int err, i;
0924
0925 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
0926 ppcnt_pl);
0927 if (!err)
0928 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
0929
0930 for (i = 0; i < TC_MAX_QUEUE; i++) {
0931 err = mlxsw_sp_port_get_stats_raw(dev,
0932 MLXSW_REG_PPCNT_TC_CONG_CNT,
0933 i, ppcnt_pl);
0934 if (err)
0935 goto tc_cnt;
0936
0937 xstats->wred_drop[i] =
0938 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
0939 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
0940
0941 tc_cnt:
0942 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
0943 i, ppcnt_pl);
0944 if (err)
0945 continue;
0946
0947 xstats->backlog[i] =
0948 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
0949 xstats->tail_drop[i] =
0950 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
0951 }
0952
0953 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0954 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
0955 i, ppcnt_pl);
0956 if (err)
0957 continue;
0958
0959 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
0960 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
0961 }
0962 }
0963
0964 static void update_stats_cache(struct work_struct *work)
0965 {
0966 struct mlxsw_sp_port *mlxsw_sp_port =
0967 container_of(work, struct mlxsw_sp_port,
0968 periodic_hw_stats.update_dw.work);
0969
0970 if (!netif_carrier_ok(mlxsw_sp_port->dev))
0971
0972
0973
0974 goto out;
0975
0976 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
0977 &mlxsw_sp_port->periodic_hw_stats.stats);
0978 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
0979 &mlxsw_sp_port->periodic_hw_stats.xstats);
0980
0981 out:
0982 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
0983 MLXSW_HW_STATS_UPDATE_TIME);
0984 }
0985
0986
0987
0988
0989 static void
0990 mlxsw_sp_port_get_stats64(struct net_device *dev,
0991 struct rtnl_link_stats64 *stats)
0992 {
0993 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
0994
0995 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
0996 }
0997
0998 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
0999 u16 vid_begin, u16 vid_end,
1000 bool is_member, bool untagged)
1001 {
1002 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1003 char *spvm_pl;
1004 int err;
1005
1006 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1007 if (!spvm_pl)
1008 return -ENOMEM;
1009
1010 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1011 vid_end, is_member, untagged);
1012 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1013 kfree(spvm_pl);
1014 return err;
1015 }
1016
1017 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1018 u16 vid_end, bool is_member, bool untagged)
1019 {
1020 u16 vid, vid_e;
1021 int err;
1022
1023 for (vid = vid_begin; vid <= vid_end;
1024 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1025 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1026 vid_end);
1027
1028 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1029 is_member, untagged);
1030 if (err)
1031 return err;
1032 }
1033
1034 return 0;
1035 }
1036
1037 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1038 bool flush_default)
1039 {
1040 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1041
1042 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1043 &mlxsw_sp_port->vlans_list, list) {
1044 if (!flush_default &&
1045 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1046 continue;
1047 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1048 }
1049 }
1050
1051 static void
1052 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1053 {
1054 if (mlxsw_sp_port_vlan->bridge_port)
1055 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1056 else if (mlxsw_sp_port_vlan->fid)
1057 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1058 }
1059
1060 struct mlxsw_sp_port_vlan *
1061 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1062 {
1063 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1064 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1065 int err;
1066
1067 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1068 if (mlxsw_sp_port_vlan)
1069 return ERR_PTR(-EEXIST);
1070
1071 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1072 if (err)
1073 return ERR_PTR(err);
1074
1075 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1076 if (!mlxsw_sp_port_vlan) {
1077 err = -ENOMEM;
1078 goto err_port_vlan_alloc;
1079 }
1080
1081 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1082 mlxsw_sp_port_vlan->vid = vid;
1083 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1084
1085 return mlxsw_sp_port_vlan;
1086
1087 err_port_vlan_alloc:
1088 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1089 return ERR_PTR(err);
1090 }
1091
1092 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1093 {
1094 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1095 u16 vid = mlxsw_sp_port_vlan->vid;
1096
1097 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1098 list_del(&mlxsw_sp_port_vlan->list);
1099 kfree(mlxsw_sp_port_vlan);
1100 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1101 }
1102
1103 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1104 __be16 __always_unused proto, u16 vid)
1105 {
1106 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1107
1108
1109
1110
1111 if (!vid)
1112 return 0;
1113
1114 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1115 }
1116
1117 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1118 __be16 __always_unused proto, u16 vid)
1119 {
1120 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1121 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1122
1123
1124
1125
1126 if (!vid)
1127 return 0;
1128
1129 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1130 if (!mlxsw_sp_port_vlan)
1131 return 0;
1132 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1133
1134 return 0;
1135 }
1136
1137 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1138 struct flow_block_offload *f)
1139 {
1140 switch (f->binder_type) {
1141 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1142 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1143 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1144 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1145 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1146 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1147 case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1148 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1149 default:
1150 return -EOPNOTSUPP;
1151 }
1152 }
1153
1154 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1155 void *type_data)
1156 {
1157 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1158
1159 switch (type) {
1160 case TC_SETUP_BLOCK:
1161 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1162 case TC_SETUP_QDISC_RED:
1163 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1164 case TC_SETUP_QDISC_PRIO:
1165 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1166 case TC_SETUP_QDISC_ETS:
1167 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1168 case TC_SETUP_QDISC_TBF:
1169 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1170 case TC_SETUP_QDISC_FIFO:
1171 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1172 default:
1173 return -EOPNOTSUPP;
1174 }
1175 }
1176
1177 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1178 {
1179 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1180
1181 if (!enable) {
1182 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1183 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1184 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1185 return -EINVAL;
1186 }
1187 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1188 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1189 } else {
1190 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1191 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1192 }
1193 return 0;
1194 }
1195
1196 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1197 {
1198 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1199 char pplr_pl[MLXSW_REG_PPLR_LEN];
1200 int err;
1201
1202 if (netif_running(dev))
1203 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1204
1205 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1206 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1207 pplr_pl);
1208
1209 if (netif_running(dev))
1210 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1211
1212 return err;
1213 }
1214
1215 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1216
1217 static int mlxsw_sp_handle_feature(struct net_device *dev,
1218 netdev_features_t wanted_features,
1219 netdev_features_t feature,
1220 mlxsw_sp_feature_handler feature_handler)
1221 {
1222 netdev_features_t changes = wanted_features ^ dev->features;
1223 bool enable = !!(wanted_features & feature);
1224 int err;
1225
1226 if (!(changes & feature))
1227 return 0;
1228
1229 err = feature_handler(dev, enable);
1230 if (err) {
1231 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1232 enable ? "Enable" : "Disable", &feature, err);
1233 return err;
1234 }
1235
1236 if (enable)
1237 dev->features |= feature;
1238 else
1239 dev->features &= ~feature;
1240
1241 return 0;
1242 }
1243 static int mlxsw_sp_set_features(struct net_device *dev,
1244 netdev_features_t features)
1245 {
1246 netdev_features_t oper_features = dev->features;
1247 int err = 0;
1248
1249 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1250 mlxsw_sp_feature_hw_tc);
1251 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1252 mlxsw_sp_feature_loopback);
1253
1254 if (err) {
1255 dev->features = oper_features;
1256 return -EINVAL;
1257 }
1258
1259 return 0;
1260 }
1261
1262 static struct devlink_port *
1263 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1264 {
1265 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1267
1268 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1269 mlxsw_sp_port->local_port);
1270 }
1271
1272 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1273 struct ifreq *ifr)
1274 {
1275 struct hwtstamp_config config;
1276 int err;
1277
1278 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1279 return -EFAULT;
1280
1281 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1282 &config);
1283 if (err)
1284 return err;
1285
1286 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1287 return -EFAULT;
1288
1289 return 0;
1290 }
1291
1292 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1293 struct ifreq *ifr)
1294 {
1295 struct hwtstamp_config config;
1296 int err;
1297
1298 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1299 &config);
1300 if (err)
1301 return err;
1302
1303 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1304 return -EFAULT;
1305
1306 return 0;
1307 }
1308
1309 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1310 {
1311 struct hwtstamp_config config = {0};
1312
1313 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1314 }
1315
1316 static int
1317 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1318 {
1319 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1320
1321 switch (cmd) {
1322 case SIOCSHWTSTAMP:
1323 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1324 case SIOCGHWTSTAMP:
1325 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1326 default:
1327 return -EOPNOTSUPP;
1328 }
1329 }
1330
1331 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1332 .ndo_open = mlxsw_sp_port_open,
1333 .ndo_stop = mlxsw_sp_port_stop,
1334 .ndo_start_xmit = mlxsw_sp_port_xmit,
1335 .ndo_setup_tc = mlxsw_sp_setup_tc,
1336 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1337 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1338 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1339 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1340 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1341 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1342 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1343 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1344 .ndo_set_features = mlxsw_sp_set_features,
1345 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
1346 .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
1347 };
1348
1349 static int
1350 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1351 {
1352 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1353 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1354 const struct mlxsw_sp_port_type_speed_ops *ops;
1355 char ptys_pl[MLXSW_REG_PTYS_LEN];
1356 u32 eth_proto_cap_masked;
1357 int err;
1358
1359 ops = mlxsw_sp->port_type_speed_ops;
1360
1361
1362
1363
1364 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1365 0, false);
1366 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1367 if (err)
1368 return err;
1369
1370 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
1371 ð_proto_admin, ð_proto_oper);
1372 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1373 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1374 eth_proto_cap_masked,
1375 mlxsw_sp_port->link.autoneg);
1376 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1377 }
1378
1379 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1380 {
1381 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1382 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1383 char ptys_pl[MLXSW_REG_PTYS_LEN];
1384 u32 eth_proto_oper;
1385 int err;
1386
1387 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1388 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1389 mlxsw_sp_port->local_port, 0,
1390 false);
1391 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1392 if (err)
1393 return err;
1394 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1395 ð_proto_oper);
1396 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1397 return 0;
1398 }
1399
1400 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1401 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1402 bool dwrr, u8 dwrr_weight)
1403 {
1404 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1405 char qeec_pl[MLXSW_REG_QEEC_LEN];
1406
1407 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1408 next_index);
1409 mlxsw_reg_qeec_de_set(qeec_pl, true);
1410 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1411 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1412 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1413 }
1414
1415 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1416 enum mlxsw_reg_qeec_hr hr, u8 index,
1417 u8 next_index, u32 maxrate, u8 burst_size)
1418 {
1419 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1420 char qeec_pl[MLXSW_REG_QEEC_LEN];
1421
1422 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1423 next_index);
1424 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1425 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1426 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1427 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1428 }
1429
1430 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1431 enum mlxsw_reg_qeec_hr hr, u8 index,
1432 u8 next_index, u32 minrate)
1433 {
1434 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1435 char qeec_pl[MLXSW_REG_QEEC_LEN];
1436
1437 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1438 next_index);
1439 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1440 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1441
1442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1443 }
1444
1445 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1446 u8 switch_prio, u8 tclass)
1447 {
1448 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1449 char qtct_pl[MLXSW_REG_QTCT_LEN];
1450
1451 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1452 tclass);
1453 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1454 }
1455
1456 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1457 {
1458 int err, i;
1459
1460
1461
1462
1463 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1464 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1465 if (err)
1466 return err;
1467 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1468 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1469 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1470 0, false, 0);
1471 if (err)
1472 return err;
1473 }
1474 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1475 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1476 MLXSW_REG_QEEC_HR_TC, i, i,
1477 false, 0);
1478 if (err)
1479 return err;
1480
1481 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1482 MLXSW_REG_QEEC_HR_TC,
1483 i + 8, i,
1484 true, 100);
1485 if (err)
1486 return err;
1487 }
1488
1489
1490
1491
1492
1493 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1494 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1495 MLXSW_REG_QEEC_MAS_DIS, 0);
1496 if (err)
1497 return err;
1498 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1499 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1500 MLXSW_REG_QEEC_HR_SUBGROUP,
1501 i, 0,
1502 MLXSW_REG_QEEC_MAS_DIS, 0);
1503 if (err)
1504 return err;
1505 }
1506 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1507 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1508 MLXSW_REG_QEEC_HR_TC,
1509 i, i,
1510 MLXSW_REG_QEEC_MAS_DIS, 0);
1511 if (err)
1512 return err;
1513
1514 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1515 MLXSW_REG_QEEC_HR_TC,
1516 i + 8, i,
1517 MLXSW_REG_QEEC_MAS_DIS, 0);
1518 if (err)
1519 return err;
1520 }
1521
1522
1523 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1524 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1525 MLXSW_REG_QEEC_HR_TC,
1526 i + 8, i,
1527 MLXSW_REG_QEEC_MIS_MIN);
1528 if (err)
1529 return err;
1530 }
1531
1532
1533 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1534 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1535 if (err)
1536 return err;
1537 }
1538
1539 return 0;
1540 }
1541
1542 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1543 bool enable)
1544 {
1545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1546 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1547
1548 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1549 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1550 }
1551
1552 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1553 {
1554 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1555 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1556 u8 module = mlxsw_sp_port->mapping.module;
1557 u64 overheat_counter;
1558 int err;
1559
1560 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1561 module, &overheat_counter);
1562 if (err)
1563 return err;
1564
1565 mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1566 return 0;
1567 }
1568
1569 int
1570 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1571 bool is_8021ad_tagged,
1572 bool is_8021q_tagged)
1573 {
1574 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1575 char spvc_pl[MLXSW_REG_SPVC_LEN];
1576
1577 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1578 is_8021ad_tagged, is_8021q_tagged);
1579 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1580 }
1581
1582 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1583 u16 local_port, u8 *port_number,
1584 u8 *split_port_subnumber,
1585 u8 *slot_index)
1586 {
1587 char pllp_pl[MLXSW_REG_PLLP_LEN];
1588 int err;
1589
1590 mlxsw_reg_pllp_pack(pllp_pl, local_port);
1591 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1592 if (err)
1593 return err;
1594 mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1595 split_port_subnumber, slot_index);
1596 return 0;
1597 }
1598
1599 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1600 bool split,
1601 struct mlxsw_sp_port_mapping *port_mapping)
1602 {
1603 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1604 struct mlxsw_sp_port *mlxsw_sp_port;
1605 u32 lanes = port_mapping->width;
1606 u8 split_port_subnumber;
1607 struct net_device *dev;
1608 u8 port_number;
1609 u8 slot_index;
1610 bool splittable;
1611 int err;
1612
1613 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1614 if (err) {
1615 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1616 local_port);
1617 return err;
1618 }
1619
1620 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1621 if (err) {
1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1623 local_port);
1624 goto err_port_swid_set;
1625 }
1626
1627 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1628 &split_port_subnumber, &slot_index);
1629 if (err) {
1630 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1631 local_port);
1632 goto err_port_label_info_get;
1633 }
1634
1635 splittable = lanes > 1 && !split;
1636 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1637 port_number, split, split_port_subnumber,
1638 splittable, lanes, mlxsw_sp->base_mac,
1639 sizeof(mlxsw_sp->base_mac));
1640 if (err) {
1641 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1642 local_port);
1643 goto err_core_port_init;
1644 }
1645
1646 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1647 if (!dev) {
1648 err = -ENOMEM;
1649 goto err_alloc_etherdev;
1650 }
1651 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1652 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1653 mlxsw_sp_port = netdev_priv(dev);
1654 mlxsw_sp_port->dev = dev;
1655 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1656 mlxsw_sp_port->local_port = local_port;
1657 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1658 mlxsw_sp_port->split = split;
1659 mlxsw_sp_port->mapping = *port_mapping;
1660 mlxsw_sp_port->link.autoneg = 1;
1661 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1662
1663 mlxsw_sp_port->pcpu_stats =
1664 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1665 if (!mlxsw_sp_port->pcpu_stats) {
1666 err = -ENOMEM;
1667 goto err_alloc_stats;
1668 }
1669
1670 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1671 &update_stats_cache);
1672
1673 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1674 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1675
1676 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1677 if (err) {
1678 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1679 mlxsw_sp_port->local_port);
1680 goto err_dev_addr_init;
1681 }
1682
1683 netif_carrier_off(dev);
1684
1685 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1686 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1687 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1688
1689 dev->min_mtu = 0;
1690 dev->max_mtu = ETH_MAX_MTU;
1691
1692
1693
1694
1695 dev->needed_headroom = MLXSW_TXHDR_LEN;
1696
1697 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1698 if (err) {
1699 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1700 mlxsw_sp_port->local_port);
1701 goto err_port_system_port_mapping_set;
1702 }
1703
1704 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1705 if (err) {
1706 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1707 mlxsw_sp_port->local_port);
1708 goto err_port_speed_by_width_set;
1709 }
1710
1711 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1712 &mlxsw_sp_port->max_speed);
1713 if (err) {
1714 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1715 mlxsw_sp_port->local_port);
1716 goto err_max_speed_get;
1717 }
1718
1719 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1720 if (err) {
1721 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1722 mlxsw_sp_port->local_port);
1723 goto err_port_max_mtu_get;
1724 }
1725
1726 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1727 if (err) {
1728 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1729 mlxsw_sp_port->local_port);
1730 goto err_port_mtu_set;
1731 }
1732
1733 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1734 if (err)
1735 goto err_port_admin_status_set;
1736
1737 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1738 if (err) {
1739 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1740 mlxsw_sp_port->local_port);
1741 goto err_port_buffers_init;
1742 }
1743
1744 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1745 if (err) {
1746 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1747 mlxsw_sp_port->local_port);
1748 goto err_port_ets_init;
1749 }
1750
1751 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1752 if (err) {
1753 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1754 mlxsw_sp_port->local_port);
1755 goto err_port_tc_mc_mode;
1756 }
1757
1758
1759 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1760 if (err) {
1761 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1762 mlxsw_sp_port->local_port);
1763 goto err_port_dcb_init;
1764 }
1765
1766 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1767 if (err) {
1768 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1769 mlxsw_sp_port->local_port);
1770 goto err_port_fids_init;
1771 }
1772
1773 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1774 if (err) {
1775 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1776 mlxsw_sp_port->local_port);
1777 goto err_port_qdiscs_init;
1778 }
1779
1780 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1781 false);
1782 if (err) {
1783 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1784 mlxsw_sp_port->local_port);
1785 goto err_port_vlan_clear;
1786 }
1787
1788 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1789 if (err) {
1790 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1791 mlxsw_sp_port->local_port);
1792 goto err_port_nve_init;
1793 }
1794
1795 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1796 ETH_P_8021Q);
1797 if (err) {
1798 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1799 mlxsw_sp_port->local_port);
1800 goto err_port_pvid_set;
1801 }
1802
1803 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1804 MLXSW_SP_DEFAULT_VID);
1805 if (IS_ERR(mlxsw_sp_port_vlan)) {
1806 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1807 mlxsw_sp_port->local_port);
1808 err = PTR_ERR(mlxsw_sp_port_vlan);
1809 goto err_port_vlan_create;
1810 }
1811 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1812
1813
1814
1815
1816 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1817 if (err) {
1818 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1819 local_port);
1820 goto err_port_vlan_classification_set;
1821 }
1822
1823 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1824 mlxsw_sp->ptp_ops->shaper_work);
1825
1826 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1827
1828 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1829 if (err) {
1830 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1831 mlxsw_sp_port->local_port);
1832 goto err_port_overheat_init_val_set;
1833 }
1834
1835 err = register_netdev(dev);
1836 if (err) {
1837 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1838 mlxsw_sp_port->local_port);
1839 goto err_register_netdev;
1840 }
1841
1842 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1843 mlxsw_sp_port, dev);
1844 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1845 return 0;
1846
1847 err_register_netdev:
1848 err_port_overheat_init_val_set:
1849 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1850 err_port_vlan_classification_set:
1851 mlxsw_sp->ports[local_port] = NULL;
1852 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1853 err_port_vlan_create:
1854 err_port_pvid_set:
1855 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1856 err_port_nve_init:
1857 err_port_vlan_clear:
1858 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1859 err_port_qdiscs_init:
1860 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1861 err_port_fids_init:
1862 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1863 err_port_dcb_init:
1864 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1865 err_port_tc_mc_mode:
1866 err_port_ets_init:
1867 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1868 err_port_buffers_init:
1869 err_port_admin_status_set:
1870 err_port_mtu_set:
1871 err_port_max_mtu_get:
1872 err_max_speed_get:
1873 err_port_speed_by_width_set:
1874 err_port_system_port_mapping_set:
1875 err_dev_addr_init:
1876 free_percpu(mlxsw_sp_port->pcpu_stats);
1877 err_alloc_stats:
1878 free_netdev(dev);
1879 err_alloc_etherdev:
1880 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1881 err_core_port_init:
1882 err_port_label_info_get:
1883 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1884 MLXSW_PORT_SWID_DISABLED_PORT);
1885 err_port_swid_set:
1886 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1887 port_mapping->slot_index,
1888 port_mapping->module);
1889 return err;
1890 }
1891
1892 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1893 {
1894 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1895 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1896 u8 module = mlxsw_sp_port->mapping.module;
1897
1898 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1899 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1900 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1901 unregister_netdev(mlxsw_sp_port->dev);
1902 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1903 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1904 mlxsw_sp->ports[local_port] = NULL;
1905 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1906 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1907 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1908 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1909 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1910 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1911 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1912 free_percpu(mlxsw_sp_port->pcpu_stats);
1913 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1914 free_netdev(mlxsw_sp_port->dev);
1915 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1916 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1917 MLXSW_PORT_SWID_DISABLED_PORT);
1918 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1919 }
1920
1921 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1922 {
1923 struct mlxsw_sp_port *mlxsw_sp_port;
1924 int err;
1925
1926 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1927 if (!mlxsw_sp_port)
1928 return -ENOMEM;
1929
1930 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1931 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1932
1933 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1934 mlxsw_sp_port,
1935 mlxsw_sp->base_mac,
1936 sizeof(mlxsw_sp->base_mac));
1937 if (err) {
1938 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1939 goto err_core_cpu_port_init;
1940 }
1941
1942 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1943 return 0;
1944
1945 err_core_cpu_port_init:
1946 kfree(mlxsw_sp_port);
1947 return err;
1948 }
1949
1950 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1951 {
1952 struct mlxsw_sp_port *mlxsw_sp_port =
1953 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1954
1955 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1956 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1957 kfree(mlxsw_sp_port);
1958 }
1959
1960 static bool mlxsw_sp_local_port_valid(u16 local_port)
1961 {
1962 return local_port != MLXSW_PORT_CPU_PORT;
1963 }
1964
1965 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1966 {
1967 if (!mlxsw_sp_local_port_valid(local_port))
1968 return false;
1969 return mlxsw_sp->ports[local_port] != NULL;
1970 }
1971
1972 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1973 u16 local_port, bool enable)
1974 {
1975 char pmecr_pl[MLXSW_REG_PMECR_LEN];
1976
1977 mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1978 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1979 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1980 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1981 }
1982
1983 struct mlxsw_sp_port_mapping_event {
1984 struct list_head list;
1985 char pmlp_pl[MLXSW_REG_PMLP_LEN];
1986 };
1987
1988 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1989 {
1990 struct mlxsw_sp_port_mapping_event *event, *next_event;
1991 struct mlxsw_sp_port_mapping_events *events;
1992 struct mlxsw_sp_port_mapping port_mapping;
1993 struct mlxsw_sp *mlxsw_sp;
1994 struct devlink *devlink;
1995 LIST_HEAD(event_queue);
1996 u16 local_port;
1997 int err;
1998
1999 events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2000 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2001 devlink = priv_to_devlink(mlxsw_sp->core);
2002
2003 spin_lock_bh(&events->queue_lock);
2004 list_splice_init(&events->queue, &event_queue);
2005 spin_unlock_bh(&events->queue_lock);
2006
2007 list_for_each_entry_safe(event, next_event, &event_queue, list) {
2008 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2009 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2010 event->pmlp_pl, &port_mapping);
2011 if (err)
2012 goto out;
2013
2014 if (WARN_ON_ONCE(!port_mapping.width))
2015 goto out;
2016
2017 devl_lock(devlink);
2018
2019 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2020 mlxsw_sp_port_create(mlxsw_sp, local_port,
2021 false, &port_mapping);
2022 else
2023 WARN_ON_ONCE(1);
2024
2025 devl_unlock(devlink);
2026
2027 mlxsw_sp->port_mapping[local_port] = port_mapping;
2028
2029 out:
2030 kfree(event);
2031 }
2032 }
2033
2034 static void
2035 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2036 char *pmlp_pl, void *priv)
2037 {
2038 struct mlxsw_sp_port_mapping_events *events;
2039 struct mlxsw_sp_port_mapping_event *event;
2040 struct mlxsw_sp *mlxsw_sp = priv;
2041 u16 local_port;
2042
2043 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2044 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2045 return;
2046
2047 events = &mlxsw_sp->port_mapping_events;
2048 event = kmalloc(sizeof(*event), GFP_ATOMIC);
2049 if (!event)
2050 return;
2051 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2052 spin_lock(&events->queue_lock);
2053 list_add_tail(&event->list, &events->queue);
2054 spin_unlock(&events->queue_lock);
2055 mlxsw_core_schedule_work(&events->work);
2056 }
2057
2058 static void
2059 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2060 {
2061 struct mlxsw_sp_port_mapping_event *event, *next_event;
2062 struct mlxsw_sp_port_mapping_events *events;
2063
2064 events = &mlxsw_sp->port_mapping_events;
2065
2066
2067 cancel_work_sync(&events->work);
2068 list_for_each_entry_safe(event, next_event, &events->queue, list) {
2069 list_del(&event->list);
2070 kfree(event);
2071 }
2072 }
2073
2074 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2075 {
2076 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2077 int i;
2078
2079 for (i = 1; i < max_ports; i++)
2080 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2081
2082 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2083
2084 for (i = 1; i < max_ports; i++)
2085 if (mlxsw_sp_port_created(mlxsw_sp, i))
2086 mlxsw_sp_port_remove(mlxsw_sp, i);
2087 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2088 kfree(mlxsw_sp->ports);
2089 mlxsw_sp->ports = NULL;
2090 }
2091
2092 static void
2093 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2094 bool (*selector)(void *priv, u16 local_port),
2095 void *priv)
2096 {
2097 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2098 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2099 int i;
2100
2101 for (i = 1; i < max_ports; i++)
2102 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2103 mlxsw_sp_port_remove(mlxsw_sp, i);
2104 }
2105
2106 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2107 {
2108 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2109 struct mlxsw_sp_port_mapping_events *events;
2110 struct mlxsw_sp_port_mapping *port_mapping;
2111 size_t alloc_size;
2112 int i;
2113 int err;
2114
2115 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2116 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2117 if (!mlxsw_sp->ports)
2118 return -ENOMEM;
2119
2120 events = &mlxsw_sp->port_mapping_events;
2121 INIT_LIST_HEAD(&events->queue);
2122 spin_lock_init(&events->queue_lock);
2123 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2124
2125 for (i = 1; i < max_ports; i++) {
2126 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2127 if (err)
2128 goto err_event_enable;
2129 }
2130
2131 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2132 if (err)
2133 goto err_cpu_port_create;
2134
2135 for (i = 1; i < max_ports; i++) {
2136 port_mapping = &mlxsw_sp->port_mapping[i];
2137 if (!port_mapping->width)
2138 continue;
2139 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2140 if (err)
2141 goto err_port_create;
2142 }
2143 return 0;
2144
2145 err_port_create:
2146 for (i--; i >= 1; i--)
2147 if (mlxsw_sp_port_created(mlxsw_sp, i))
2148 mlxsw_sp_port_remove(mlxsw_sp, i);
2149 i = max_ports;
2150 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2151 err_cpu_port_create:
2152 err_event_enable:
2153 for (i--; i >= 1; i--)
2154 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2155
2156 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2157 kfree(mlxsw_sp->ports);
2158 mlxsw_sp->ports = NULL;
2159 return err;
2160 }
2161
2162 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2163 {
2164 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2165 struct mlxsw_sp_port_mapping *port_mapping;
2166 int i;
2167 int err;
2168
2169 mlxsw_sp->port_mapping = kcalloc(max_ports,
2170 sizeof(struct mlxsw_sp_port_mapping),
2171 GFP_KERNEL);
2172 if (!mlxsw_sp->port_mapping)
2173 return -ENOMEM;
2174
2175 for (i = 1; i < max_ports; i++) {
2176 port_mapping = &mlxsw_sp->port_mapping[i];
2177 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2178 if (err)
2179 goto err_port_module_info_get;
2180 }
2181 return 0;
2182
2183 err_port_module_info_get:
2184 kfree(mlxsw_sp->port_mapping);
2185 return err;
2186 }
2187
2188 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2189 {
2190 kfree(mlxsw_sp->port_mapping);
2191 }
2192
2193 static int
2194 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2195 struct mlxsw_sp_port_mapping *port_mapping,
2196 unsigned int count, const char *pmtdb_pl)
2197 {
2198 struct mlxsw_sp_port_mapping split_port_mapping;
2199 int err, i;
2200
2201 split_port_mapping = *port_mapping;
2202 split_port_mapping.width /= count;
2203 for (i = 0; i < count; i++) {
2204 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2205
2206 if (!mlxsw_sp_local_port_valid(s_local_port))
2207 continue;
2208
2209 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2210 true, &split_port_mapping);
2211 if (err)
2212 goto err_port_create;
2213 split_port_mapping.lane += split_port_mapping.width;
2214 }
2215
2216 return 0;
2217
2218 err_port_create:
2219 for (i--; i >= 0; i--) {
2220 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2221
2222 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2223 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2224 }
2225 return err;
2226 }
2227
2228 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2229 unsigned int count,
2230 const char *pmtdb_pl)
2231 {
2232 struct mlxsw_sp_port_mapping *port_mapping;
2233 int i;
2234
2235
2236 for (i = 0; i < count; i++) {
2237 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2238
2239 port_mapping = &mlxsw_sp->port_mapping[local_port];
2240 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2241 continue;
2242 mlxsw_sp_port_create(mlxsw_sp, local_port,
2243 false, port_mapping);
2244 }
2245 }
2246
2247 static struct mlxsw_sp_port *
2248 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2249 {
2250 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2251 return mlxsw_sp->ports[local_port];
2252 return NULL;
2253 }
2254
2255 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2256 unsigned int count,
2257 struct netlink_ext_ack *extack)
2258 {
2259 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2260 struct mlxsw_sp_port_mapping port_mapping;
2261 struct mlxsw_sp_port *mlxsw_sp_port;
2262 enum mlxsw_reg_pmtdb_status status;
2263 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2264 int i;
2265 int err;
2266
2267 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2268 if (!mlxsw_sp_port) {
2269 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2270 local_port);
2271 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2272 return -EINVAL;
2273 }
2274
2275 if (mlxsw_sp_port->split) {
2276 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2277 return -EINVAL;
2278 }
2279
2280 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2281 mlxsw_sp_port->mapping.module,
2282 mlxsw_sp_port->mapping.module_width / count,
2283 count);
2284 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2285 if (err) {
2286 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2287 return err;
2288 }
2289
2290 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2291 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2292 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2293 return -EINVAL;
2294 }
2295
2296 port_mapping = mlxsw_sp_port->mapping;
2297
2298 for (i = 0; i < count; i++) {
2299 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2300
2301 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2302 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2303 }
2304
2305 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2306 count, pmtdb_pl);
2307 if (err) {
2308 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2309 goto err_port_split_create;
2310 }
2311
2312 return 0;
2313
2314 err_port_split_create:
2315 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2316
2317 return err;
2318 }
2319
2320 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2321 struct netlink_ext_ack *extack)
2322 {
2323 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2324 struct mlxsw_sp_port *mlxsw_sp_port;
2325 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2326 unsigned int count;
2327 int i;
2328 int err;
2329
2330 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2331 if (!mlxsw_sp_port) {
2332 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2333 local_port);
2334 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2335 return -EINVAL;
2336 }
2337
2338 if (!mlxsw_sp_port->split) {
2339 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2340 return -EINVAL;
2341 }
2342
2343 count = mlxsw_sp_port->mapping.module_width /
2344 mlxsw_sp_port->mapping.width;
2345
2346 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2347 mlxsw_sp_port->mapping.module,
2348 mlxsw_sp_port->mapping.module_width / count,
2349 count);
2350 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2351 if (err) {
2352 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2353 return err;
2354 }
2355
2356 for (i = 0; i < count; i++) {
2357 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2358
2359 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2360 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2361 }
2362
2363 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2364
2365 return 0;
2366 }
2367
2368 static void
2369 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2370 {
2371 int i;
2372
2373 for (i = 0; i < TC_MAX_QUEUE; i++)
2374 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2375 }
2376
2377 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2378 char *pude_pl, void *priv)
2379 {
2380 struct mlxsw_sp *mlxsw_sp = priv;
2381 struct mlxsw_sp_port *mlxsw_sp_port;
2382 enum mlxsw_reg_pude_oper_status status;
2383 u16 local_port;
2384
2385 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2386
2387 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2388 return;
2389 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2390 if (!mlxsw_sp_port)
2391 return;
2392
2393 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2394 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2395 netdev_info(mlxsw_sp_port->dev, "link up\n");
2396 netif_carrier_on(mlxsw_sp_port->dev);
2397 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2398 } else {
2399 netdev_info(mlxsw_sp_port->dev, "link down\n");
2400 netif_carrier_off(mlxsw_sp_port->dev);
2401 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2402 }
2403 }
2404
2405 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2406 char *mtpptr_pl, bool ingress)
2407 {
2408 u16 local_port;
2409 u8 num_rec;
2410 int i;
2411
2412 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2413 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2414 for (i = 0; i < num_rec; i++) {
2415 u8 domain_number;
2416 u8 message_type;
2417 u16 sequence_id;
2418 u64 timestamp;
2419
2420 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2421 &domain_number, &sequence_id,
2422 ×tamp);
2423 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2424 message_type, domain_number,
2425 sequence_id, timestamp);
2426 }
2427 }
2428
2429 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2430 char *mtpptr_pl, void *priv)
2431 {
2432 struct mlxsw_sp *mlxsw_sp = priv;
2433
2434 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2435 }
2436
2437 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2438 char *mtpptr_pl, void *priv)
2439 {
2440 struct mlxsw_sp *mlxsw_sp = priv;
2441
2442 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2443 }
2444
2445 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2446 u16 local_port, void *priv)
2447 {
2448 struct mlxsw_sp *mlxsw_sp = priv;
2449 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2450 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2451
2452 if (unlikely(!mlxsw_sp_port)) {
2453 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2454 local_port);
2455 return;
2456 }
2457
2458 skb->dev = mlxsw_sp_port->dev;
2459
2460 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2461 u64_stats_update_begin(&pcpu_stats->syncp);
2462 pcpu_stats->rx_packets++;
2463 pcpu_stats->rx_bytes += skb->len;
2464 u64_stats_update_end(&pcpu_stats->syncp);
2465
2466 skb->protocol = eth_type_trans(skb, skb->dev);
2467 netif_receive_skb(skb);
2468 }
2469
2470 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2471 void *priv)
2472 {
2473 skb->offload_fwd_mark = 1;
2474 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2475 }
2476
2477 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2478 u16 local_port, void *priv)
2479 {
2480 skb->offload_l3_fwd_mark = 1;
2481 skb->offload_fwd_mark = 1;
2482 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2483 }
2484
2485 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2486 u16 local_port)
2487 {
2488 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2489 }
2490
2491 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2492 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2493 _is_ctrl, SP_##_trap_group, DISCARD)
2494
2495 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2496 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2497 _is_ctrl, SP_##_trap_group, DISCARD)
2498
2499 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2500 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2501 _is_ctrl, SP_##_trap_group, DISCARD)
2502
2503 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2504 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2505
2506 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2507
2508 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2509
2510 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2511
2512 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2513 false),
2514 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2515 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2516 false),
2517 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2518 ROUTER_EXP, false),
2519 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2520 ROUTER_EXP, false),
2521 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2522 ROUTER_EXP, false),
2523 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2524 ROUTER_EXP, false),
2525
2526 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2527 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2528
2529 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2530 };
2531
2532 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2533
2534 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2535 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2536 };
2537
2538 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2539
2540 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2541 };
2542
2543 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2544 {
2545 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2546 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2547 enum mlxsw_reg_qpcr_ir_units ir_units;
2548 int max_cpu_policers;
2549 bool is_bytes;
2550 u8 burst_size;
2551 u32 rate;
2552 int i, err;
2553
2554 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2555 return -EIO;
2556
2557 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2558
2559 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2560 for (i = 0; i < max_cpu_policers; i++) {
2561 is_bytes = false;
2562 switch (i) {
2563 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2564 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2565 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2566 rate = 1024;
2567 burst_size = 7;
2568 break;
2569 default:
2570 continue;
2571 }
2572
2573 __set_bit(i, mlxsw_sp->trap->policers_usage);
2574 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2575 burst_size);
2576 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2577 if (err)
2578 return err;
2579 }
2580
2581 return 0;
2582 }
2583
2584 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2585 {
2586 char htgt_pl[MLXSW_REG_HTGT_LEN];
2587 enum mlxsw_reg_htgt_trap_group i;
2588 int max_cpu_policers;
2589 int max_trap_groups;
2590 u8 priority, tc;
2591 u16 policer_id;
2592 int err;
2593
2594 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2595 return -EIO;
2596
2597 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2598 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2599
2600 for (i = 0; i < max_trap_groups; i++) {
2601 policer_id = i;
2602 switch (i) {
2603 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2604 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2605 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2606 priority = 1;
2607 tc = 1;
2608 break;
2609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2610 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2611 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2612 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2613 break;
2614 default:
2615 continue;
2616 }
2617
2618 if (max_cpu_policers <= policer_id &&
2619 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2620 return -EIO;
2621
2622 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2623 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2624 if (err)
2625 return err;
2626 }
2627
2628 return 0;
2629 }
2630
2631 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2632 {
2633 struct mlxsw_sp_trap *trap;
2634 u64 max_policers;
2635 int err;
2636
2637 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2638 return -EIO;
2639 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2640 trap = kzalloc(struct_size(trap, policers_usage,
2641 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2642 if (!trap)
2643 return -ENOMEM;
2644 trap->max_policers = max_policers;
2645 mlxsw_sp->trap = trap;
2646
2647 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2648 if (err)
2649 goto err_cpu_policers_set;
2650
2651 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2652 if (err)
2653 goto err_trap_groups_set;
2654
2655 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2656 ARRAY_SIZE(mlxsw_sp_listener),
2657 mlxsw_sp);
2658 if (err)
2659 goto err_traps_register;
2660
2661 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2662 mlxsw_sp->listeners_count, mlxsw_sp);
2663 if (err)
2664 goto err_extra_traps_init;
2665
2666 return 0;
2667
2668 err_extra_traps_init:
2669 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2670 ARRAY_SIZE(mlxsw_sp_listener),
2671 mlxsw_sp);
2672 err_traps_register:
2673 err_trap_groups_set:
2674 err_cpu_policers_set:
2675 kfree(trap);
2676 return err;
2677 }
2678
2679 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2680 {
2681 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2682 mlxsw_sp->listeners_count,
2683 mlxsw_sp);
2684 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2685 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2686 kfree(mlxsw_sp->trap);
2687 }
2688
2689 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2690
2691 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2692 {
2693 char slcr_pl[MLXSW_REG_SLCR_LEN];
2694 u32 seed;
2695 int err;
2696
2697 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2698 MLXSW_SP_LAG_SEED_INIT);
2699 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2700 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2701 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2702 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2703 MLXSW_REG_SLCR_LAG_HASH_SIP |
2704 MLXSW_REG_SLCR_LAG_HASH_DIP |
2705 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2706 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2707 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2708 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2709 if (err)
2710 return err;
2711
2712 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2713 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2714 return -EIO;
2715
2716 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2717 sizeof(struct mlxsw_sp_upper),
2718 GFP_KERNEL);
2719 if (!mlxsw_sp->lags)
2720 return -ENOMEM;
2721
2722 return 0;
2723 }
2724
2725 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2726 {
2727 kfree(mlxsw_sp->lags);
2728 }
2729
2730 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2731 .clock_init = mlxsw_sp1_ptp_clock_init,
2732 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2733 .init = mlxsw_sp1_ptp_init,
2734 .fini = mlxsw_sp1_ptp_fini,
2735 .receive = mlxsw_sp1_ptp_receive,
2736 .transmitted = mlxsw_sp1_ptp_transmitted,
2737 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2738 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2739 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2740 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2741 .get_stats_count = mlxsw_sp1_get_stats_count,
2742 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2743 .get_stats = mlxsw_sp1_get_stats,
2744 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2745 };
2746
2747 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2748 .clock_init = mlxsw_sp2_ptp_clock_init,
2749 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2750 .init = mlxsw_sp2_ptp_init,
2751 .fini = mlxsw_sp2_ptp_fini,
2752 .receive = mlxsw_sp2_ptp_receive,
2753 .transmitted = mlxsw_sp2_ptp_transmitted,
2754 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2755 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2756 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2757 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2758 .get_stats_count = mlxsw_sp2_get_stats_count,
2759 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2760 .get_stats = mlxsw_sp2_get_stats,
2761 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2762 };
2763
2764 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2765 .clock_init = mlxsw_sp2_ptp_clock_init,
2766 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2767 .init = mlxsw_sp2_ptp_init,
2768 .fini = mlxsw_sp2_ptp_fini,
2769 .receive = mlxsw_sp2_ptp_receive,
2770 .transmitted = mlxsw_sp2_ptp_transmitted,
2771 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2772 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2773 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2774 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2775 .get_stats_count = mlxsw_sp2_get_stats_count,
2776 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2777 .get_stats = mlxsw_sp2_get_stats,
2778 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2779 };
2780
2781 struct mlxsw_sp_sample_trigger_node {
2782 struct mlxsw_sp_sample_trigger trigger;
2783 struct mlxsw_sp_sample_params params;
2784 struct rhash_head ht_node;
2785 struct rcu_head rcu;
2786 refcount_t refcount;
2787 };
2788
2789 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2790 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2791 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2792 .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2793 .automatic_shrinking = true,
2794 };
2795
2796 static void
2797 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2798 const struct mlxsw_sp_sample_trigger *trigger)
2799 {
2800 memset(key, 0, sizeof(*key));
2801 key->type = trigger->type;
2802 key->local_port = trigger->local_port;
2803 }
2804
2805
2806 struct mlxsw_sp_sample_params *
2807 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2808 const struct mlxsw_sp_sample_trigger *trigger)
2809 {
2810 struct mlxsw_sp_sample_trigger_node *trigger_node;
2811 struct mlxsw_sp_sample_trigger key;
2812
2813 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2814 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2815 mlxsw_sp_sample_trigger_ht_params);
2816 if (!trigger_node)
2817 return NULL;
2818
2819 return &trigger_node->params;
2820 }
2821
2822 static int
2823 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2824 const struct mlxsw_sp_sample_trigger *trigger,
2825 const struct mlxsw_sp_sample_params *params)
2826 {
2827 struct mlxsw_sp_sample_trigger_node *trigger_node;
2828 int err;
2829
2830 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2831 if (!trigger_node)
2832 return -ENOMEM;
2833
2834 trigger_node->trigger = *trigger;
2835 trigger_node->params = *params;
2836 refcount_set(&trigger_node->refcount, 1);
2837
2838 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2839 &trigger_node->ht_node,
2840 mlxsw_sp_sample_trigger_ht_params);
2841 if (err)
2842 goto err_rhashtable_insert;
2843
2844 return 0;
2845
2846 err_rhashtable_insert:
2847 kfree(trigger_node);
2848 return err;
2849 }
2850
2851 static void
2852 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2853 struct mlxsw_sp_sample_trigger_node *trigger_node)
2854 {
2855 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2856 &trigger_node->ht_node,
2857 mlxsw_sp_sample_trigger_ht_params);
2858 kfree_rcu(trigger_node, rcu);
2859 }
2860
2861 int
2862 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2863 const struct mlxsw_sp_sample_trigger *trigger,
2864 const struct mlxsw_sp_sample_params *params,
2865 struct netlink_ext_ack *extack)
2866 {
2867 struct mlxsw_sp_sample_trigger_node *trigger_node;
2868 struct mlxsw_sp_sample_trigger key;
2869
2870 ASSERT_RTNL();
2871
2872 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2873
2874 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2875 &key,
2876 mlxsw_sp_sample_trigger_ht_params);
2877 if (!trigger_node)
2878 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2879 params);
2880
2881 if (trigger_node->trigger.local_port) {
2882 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2883 return -EINVAL;
2884 }
2885
2886 if (trigger_node->params.psample_group != params->psample_group ||
2887 trigger_node->params.truncate != params->truncate ||
2888 trigger_node->params.rate != params->rate ||
2889 trigger_node->params.trunc_size != params->trunc_size) {
2890 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2891 return -EINVAL;
2892 }
2893
2894 refcount_inc(&trigger_node->refcount);
2895
2896 return 0;
2897 }
2898
2899 void
2900 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2901 const struct mlxsw_sp_sample_trigger *trigger)
2902 {
2903 struct mlxsw_sp_sample_trigger_node *trigger_node;
2904 struct mlxsw_sp_sample_trigger key;
2905
2906 ASSERT_RTNL();
2907
2908 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2909
2910 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2911 &key,
2912 mlxsw_sp_sample_trigger_ht_params);
2913 if (!trigger_node)
2914 return;
2915
2916 if (!refcount_dec_and_test(&trigger_node->refcount))
2917 return;
2918
2919 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2920 }
2921
2922 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2923 unsigned long event, void *ptr);
2924
2925 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2926 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2927 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
2928
2929 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
2930 {
2931 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
2932 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
2933 mutex_init(&mlxsw_sp->parsing.lock);
2934 }
2935
2936 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
2937 {
2938 mutex_destroy(&mlxsw_sp->parsing.lock);
2939 }
2940
2941 struct mlxsw_sp_ipv6_addr_node {
2942 struct in6_addr key;
2943 struct rhash_head ht_node;
2944 u32 kvdl_index;
2945 refcount_t refcount;
2946 };
2947
2948 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
2949 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
2950 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
2951 .key_len = sizeof(struct in6_addr),
2952 .automatic_shrinking = true,
2953 };
2954
2955 static int
2956 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
2957 u32 *p_kvdl_index)
2958 {
2959 struct mlxsw_sp_ipv6_addr_node *node;
2960 char rips_pl[MLXSW_REG_RIPS_LEN];
2961 int err;
2962
2963 err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
2964 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2965 p_kvdl_index);
2966 if (err)
2967 return err;
2968
2969 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
2970 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
2971 if (err)
2972 goto err_rips_write;
2973
2974 node = kzalloc(sizeof(*node), GFP_KERNEL);
2975 if (!node) {
2976 err = -ENOMEM;
2977 goto err_node_alloc;
2978 }
2979
2980 node->key = *addr6;
2981 node->kvdl_index = *p_kvdl_index;
2982 refcount_set(&node->refcount, 1);
2983
2984 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
2985 &node->ht_node,
2986 mlxsw_sp_ipv6_addr_ht_params);
2987 if (err)
2988 goto err_rhashtable_insert;
2989
2990 return 0;
2991
2992 err_rhashtable_insert:
2993 kfree(node);
2994 err_node_alloc:
2995 err_rips_write:
2996 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2997 *p_kvdl_index);
2998 return err;
2999 }
3000
3001 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3002 struct mlxsw_sp_ipv6_addr_node *node)
3003 {
3004 u32 kvdl_index = node->kvdl_index;
3005
3006 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3007 mlxsw_sp_ipv6_addr_ht_params);
3008 kfree(node);
3009 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3010 kvdl_index);
3011 }
3012
3013 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3014 const struct in6_addr *addr6,
3015 u32 *p_kvdl_index)
3016 {
3017 struct mlxsw_sp_ipv6_addr_node *node;
3018 int err = 0;
3019
3020 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3021 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3022 mlxsw_sp_ipv6_addr_ht_params);
3023 if (node) {
3024 refcount_inc(&node->refcount);
3025 *p_kvdl_index = node->kvdl_index;
3026 goto out_unlock;
3027 }
3028
3029 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3030
3031 out_unlock:
3032 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3033 return err;
3034 }
3035
3036 void
3037 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3038 {
3039 struct mlxsw_sp_ipv6_addr_node *node;
3040
3041 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3042 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3043 mlxsw_sp_ipv6_addr_ht_params);
3044 if (WARN_ON(!node))
3045 goto out_unlock;
3046
3047 if (!refcount_dec_and_test(&node->refcount))
3048 goto out_unlock;
3049
3050 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3051
3052 out_unlock:
3053 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3054 }
3055
3056 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3057 {
3058 int err;
3059
3060 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3061 &mlxsw_sp_ipv6_addr_ht_params);
3062 if (err)
3063 return err;
3064
3065 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3066 return 0;
3067 }
3068
3069 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3070 {
3071 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3072 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3073 }
3074
3075 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3076 const struct mlxsw_bus_info *mlxsw_bus_info,
3077 struct netlink_ext_ack *extack)
3078 {
3079 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3080 int err;
3081
3082 mlxsw_sp->core = mlxsw_core;
3083 mlxsw_sp->bus_info = mlxsw_bus_info;
3084
3085 mlxsw_sp_parsing_init(mlxsw_sp);
3086 mlxsw_core_emad_string_tlv_enable(mlxsw_core);
3087
3088 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3089 if (err) {
3090 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3091 return err;
3092 }
3093
3094 err = mlxsw_sp_kvdl_init(mlxsw_sp);
3095 if (err) {
3096 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3097 return err;
3098 }
3099
3100 err = mlxsw_sp_pgt_init(mlxsw_sp);
3101 if (err) {
3102 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3103 goto err_pgt_init;
3104 }
3105
3106 err = mlxsw_sp_fids_init(mlxsw_sp);
3107 if (err) {
3108 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3109 goto err_fids_init;
3110 }
3111
3112 err = mlxsw_sp_policers_init(mlxsw_sp);
3113 if (err) {
3114 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3115 goto err_policers_init;
3116 }
3117
3118 err = mlxsw_sp_traps_init(mlxsw_sp);
3119 if (err) {
3120 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3121 goto err_traps_init;
3122 }
3123
3124 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3125 if (err) {
3126 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3127 goto err_devlink_traps_init;
3128 }
3129
3130 err = mlxsw_sp_buffers_init(mlxsw_sp);
3131 if (err) {
3132 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3133 goto err_buffers_init;
3134 }
3135
3136 err = mlxsw_sp_lag_init(mlxsw_sp);
3137 if (err) {
3138 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3139 goto err_lag_init;
3140 }
3141
3142
3143
3144
3145 err = mlxsw_sp_span_init(mlxsw_sp);
3146 if (err) {
3147 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3148 goto err_span_init;
3149 }
3150
3151 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3152 if (err) {
3153 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3154 goto err_switchdev_init;
3155 }
3156
3157 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3158 if (err) {
3159 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3160 goto err_counter_pool_init;
3161 }
3162
3163 err = mlxsw_sp_afa_init(mlxsw_sp);
3164 if (err) {
3165 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3166 goto err_afa_init;
3167 }
3168
3169 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3170 if (err) {
3171 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3172 goto err_ipv6_addr_ht_init;
3173 }
3174
3175 err = mlxsw_sp_nve_init(mlxsw_sp);
3176 if (err) {
3177 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3178 goto err_nve_init;
3179 }
3180
3181 err = mlxsw_sp_acl_init(mlxsw_sp);
3182 if (err) {
3183 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3184 goto err_acl_init;
3185 }
3186
3187 err = mlxsw_sp_router_init(mlxsw_sp, extack);
3188 if (err) {
3189 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3190 goto err_router_init;
3191 }
3192
3193 if (mlxsw_sp->bus_info->read_clock_capable) {
3194
3195 mlxsw_sp->clock =
3196 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3197 mlxsw_sp->bus_info->dev);
3198 if (IS_ERR(mlxsw_sp->clock)) {
3199 err = PTR_ERR(mlxsw_sp->clock);
3200 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3201 goto err_ptp_clock_init;
3202 }
3203 }
3204
3205 if (mlxsw_sp->clock) {
3206
3207 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3208 if (IS_ERR(mlxsw_sp->ptp_state)) {
3209 err = PTR_ERR(mlxsw_sp->ptp_state);
3210 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3211 goto err_ptp_init;
3212 }
3213 }
3214
3215
3216
3217
3218 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3219 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3220 &mlxsw_sp->netdevice_nb);
3221 if (err) {
3222 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3223 goto err_netdev_notifier;
3224 }
3225
3226 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3227 if (err) {
3228 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3229 goto err_dpipe_init;
3230 }
3231
3232 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3233 if (err) {
3234 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3235 goto err_port_module_info_init;
3236 }
3237
3238 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3239 &mlxsw_sp_sample_trigger_ht_params);
3240 if (err) {
3241 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3242 goto err_sample_trigger_init;
3243 }
3244
3245 err = mlxsw_sp_ports_create(mlxsw_sp);
3246 if (err) {
3247 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3248 goto err_ports_create;
3249 }
3250
3251 return 0;
3252
3253 err_ports_create:
3254 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3255 err_sample_trigger_init:
3256 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3257 err_port_module_info_init:
3258 mlxsw_sp_dpipe_fini(mlxsw_sp);
3259 err_dpipe_init:
3260 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3261 &mlxsw_sp->netdevice_nb);
3262 err_netdev_notifier:
3263 if (mlxsw_sp->clock)
3264 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3265 err_ptp_init:
3266 if (mlxsw_sp->clock)
3267 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3268 err_ptp_clock_init:
3269 mlxsw_sp_router_fini(mlxsw_sp);
3270 err_router_init:
3271 mlxsw_sp_acl_fini(mlxsw_sp);
3272 err_acl_init:
3273 mlxsw_sp_nve_fini(mlxsw_sp);
3274 err_nve_init:
3275 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3276 err_ipv6_addr_ht_init:
3277 mlxsw_sp_afa_fini(mlxsw_sp);
3278 err_afa_init:
3279 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3280 err_counter_pool_init:
3281 mlxsw_sp_switchdev_fini(mlxsw_sp);
3282 err_switchdev_init:
3283 mlxsw_sp_span_fini(mlxsw_sp);
3284 err_span_init:
3285 mlxsw_sp_lag_fini(mlxsw_sp);
3286 err_lag_init:
3287 mlxsw_sp_buffers_fini(mlxsw_sp);
3288 err_buffers_init:
3289 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3290 err_devlink_traps_init:
3291 mlxsw_sp_traps_fini(mlxsw_sp);
3292 err_traps_init:
3293 mlxsw_sp_policers_fini(mlxsw_sp);
3294 err_policers_init:
3295 mlxsw_sp_fids_fini(mlxsw_sp);
3296 err_fids_init:
3297 mlxsw_sp_pgt_fini(mlxsw_sp);
3298 err_pgt_init:
3299 mlxsw_sp_kvdl_fini(mlxsw_sp);
3300 mlxsw_sp_parsing_fini(mlxsw_sp);
3301 return err;
3302 }
3303
3304 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3305 const struct mlxsw_bus_info *mlxsw_bus_info,
3306 struct netlink_ext_ack *extack)
3307 {
3308 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3309
3310 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3311 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3312 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3313 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3314 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3315 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3316 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3317 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3318 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3319 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3320 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3321 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3322 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3323 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3324 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3325 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3326 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3327 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3328 mlxsw_sp->listeners = mlxsw_sp1_listener;
3329 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3330 mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
3331 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3332 mlxsw_sp->pgt_smpe_index_valid = true;
3333
3334 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3335 }
3336
3337 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3338 const struct mlxsw_bus_info *mlxsw_bus_info,
3339 struct netlink_ext_ack *extack)
3340 {
3341 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3342
3343 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3344 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3345 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3346 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3347 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3348 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3349 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3350 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3351 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3352 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3353 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3354 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3355 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3356 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3357 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3358 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3359 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3360 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3361 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3362 mlxsw_sp->listeners = mlxsw_sp2_listener;
3363 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3364 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3365 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3366 mlxsw_sp->pgt_smpe_index_valid = false;
3367
3368 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3369 }
3370
3371 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3372 const struct mlxsw_bus_info *mlxsw_bus_info,
3373 struct netlink_ext_ack *extack)
3374 {
3375 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3376
3377 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3378 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3379 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3380 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3381 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3382 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3383 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3384 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3385 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3386 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3387 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3388 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3389 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3390 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3391 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3392 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3393 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3394 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3395 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3396 mlxsw_sp->listeners = mlxsw_sp2_listener;
3397 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3398 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3399 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3400 mlxsw_sp->pgt_smpe_index_valid = false;
3401
3402 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3403 }
3404
3405 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3406 const struct mlxsw_bus_info *mlxsw_bus_info,
3407 struct netlink_ext_ack *extack)
3408 {
3409 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3410
3411 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3412 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3413 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3414 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3415 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3416 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3417 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3418 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3419 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3420 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3421 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3422 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3423 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3424 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3425 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3426 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3427 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3428 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3429 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3430 mlxsw_sp->listeners = mlxsw_sp2_listener;
3431 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3432 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3433 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3434 mlxsw_sp->pgt_smpe_index_valid = false;
3435
3436 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3437 }
3438
3439 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3440 {
3441 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3442
3443 mlxsw_sp_ports_remove(mlxsw_sp);
3444 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3445 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3446 mlxsw_sp_dpipe_fini(mlxsw_sp);
3447 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3448 &mlxsw_sp->netdevice_nb);
3449 if (mlxsw_sp->clock) {
3450 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3451 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3452 }
3453 mlxsw_sp_router_fini(mlxsw_sp);
3454 mlxsw_sp_acl_fini(mlxsw_sp);
3455 mlxsw_sp_nve_fini(mlxsw_sp);
3456 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3457 mlxsw_sp_afa_fini(mlxsw_sp);
3458 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3459 mlxsw_sp_switchdev_fini(mlxsw_sp);
3460 mlxsw_sp_span_fini(mlxsw_sp);
3461 mlxsw_sp_lag_fini(mlxsw_sp);
3462 mlxsw_sp_buffers_fini(mlxsw_sp);
3463 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3464 mlxsw_sp_traps_fini(mlxsw_sp);
3465 mlxsw_sp_policers_fini(mlxsw_sp);
3466 mlxsw_sp_fids_fini(mlxsw_sp);
3467 mlxsw_sp_pgt_fini(mlxsw_sp);
3468 mlxsw_sp_kvdl_fini(mlxsw_sp);
3469 mlxsw_sp_parsing_fini(mlxsw_sp);
3470 }
3471
3472 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3473 .used_flood_mode = 1,
3474 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3475 .used_max_ib_mc = 1,
3476 .max_ib_mc = 0,
3477 .used_max_pkey = 1,
3478 .max_pkey = 0,
3479 .used_ubridge = 1,
3480 .ubridge = 1,
3481 .used_kvd_sizes = 1,
3482 .kvd_hash_single_parts = 59,
3483 .kvd_hash_double_parts = 41,
3484 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3485 .swid_config = {
3486 {
3487 .used_type = 1,
3488 .type = MLXSW_PORT_SWID_TYPE_ETH,
3489 }
3490 },
3491 };
3492
3493 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3494 .used_flood_mode = 1,
3495 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3496 .used_max_ib_mc = 1,
3497 .max_ib_mc = 0,
3498 .used_max_pkey = 1,
3499 .max_pkey = 0,
3500 .used_ubridge = 1,
3501 .ubridge = 1,
3502 .swid_config = {
3503 {
3504 .used_type = 1,
3505 .type = MLXSW_PORT_SWID_TYPE_ETH,
3506 }
3507 },
3508 .used_cqe_time_stamp_type = 1,
3509 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3510 };
3511
3512 static void
3513 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3514 struct devlink_resource_size_params *kvd_size_params,
3515 struct devlink_resource_size_params *linear_size_params,
3516 struct devlink_resource_size_params *hash_double_size_params,
3517 struct devlink_resource_size_params *hash_single_size_params)
3518 {
3519 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3520 KVD_SINGLE_MIN_SIZE);
3521 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3522 KVD_DOUBLE_MIN_SIZE);
3523 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3524 u32 linear_size_min = 0;
3525
3526 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3527 MLXSW_SP_KVD_GRANULARITY,
3528 DEVLINK_RESOURCE_UNIT_ENTRY);
3529 devlink_resource_size_params_init(linear_size_params, linear_size_min,
3530 kvd_size - single_size_min -
3531 double_size_min,
3532 MLXSW_SP_KVD_GRANULARITY,
3533 DEVLINK_RESOURCE_UNIT_ENTRY);
3534 devlink_resource_size_params_init(hash_double_size_params,
3535 double_size_min,
3536 kvd_size - single_size_min -
3537 linear_size_min,
3538 MLXSW_SP_KVD_GRANULARITY,
3539 DEVLINK_RESOURCE_UNIT_ENTRY);
3540 devlink_resource_size_params_init(hash_single_size_params,
3541 single_size_min,
3542 kvd_size - double_size_min -
3543 linear_size_min,
3544 MLXSW_SP_KVD_GRANULARITY,
3545 DEVLINK_RESOURCE_UNIT_ENTRY);
3546 }
3547
3548 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3549 {
3550 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3551 struct devlink_resource_size_params hash_single_size_params;
3552 struct devlink_resource_size_params hash_double_size_params;
3553 struct devlink_resource_size_params linear_size_params;
3554 struct devlink_resource_size_params kvd_size_params;
3555 u32 kvd_size, single_size, double_size, linear_size;
3556 const struct mlxsw_config_profile *profile;
3557 int err;
3558
3559 profile = &mlxsw_sp1_config_profile;
3560 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3561 return -EIO;
3562
3563 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3564 &linear_size_params,
3565 &hash_double_size_params,
3566 &hash_single_size_params);
3567
3568 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3569 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3570 kvd_size, MLXSW_SP_RESOURCE_KVD,
3571 DEVLINK_RESOURCE_ID_PARENT_TOP,
3572 &kvd_size_params);
3573 if (err)
3574 return err;
3575
3576 linear_size = profile->kvd_linear_size;
3577 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3578 linear_size,
3579 MLXSW_SP_RESOURCE_KVD_LINEAR,
3580 MLXSW_SP_RESOURCE_KVD,
3581 &linear_size_params);
3582 if (err)
3583 return err;
3584
3585 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3586 if (err)
3587 return err;
3588
3589 double_size = kvd_size - linear_size;
3590 double_size *= profile->kvd_hash_double_parts;
3591 double_size /= profile->kvd_hash_double_parts +
3592 profile->kvd_hash_single_parts;
3593 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3594 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3595 double_size,
3596 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3597 MLXSW_SP_RESOURCE_KVD,
3598 &hash_double_size_params);
3599 if (err)
3600 return err;
3601
3602 single_size = kvd_size - double_size - linear_size;
3603 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3604 single_size,
3605 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3606 MLXSW_SP_RESOURCE_KVD,
3607 &hash_single_size_params);
3608 if (err)
3609 return err;
3610
3611 return 0;
3612 }
3613
3614 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3615 {
3616 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3617 struct devlink_resource_size_params kvd_size_params;
3618 u32 kvd_size;
3619
3620 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3621 return -EIO;
3622
3623 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3624 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3625 MLXSW_SP_KVD_GRANULARITY,
3626 DEVLINK_RESOURCE_UNIT_ENTRY);
3627
3628 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3629 kvd_size, MLXSW_SP_RESOURCE_KVD,
3630 DEVLINK_RESOURCE_ID_PARENT_TOP,
3631 &kvd_size_params);
3632 }
3633
3634 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3635 {
3636 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3637 struct devlink_resource_size_params span_size_params;
3638 u32 max_span;
3639
3640 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3641 return -EIO;
3642
3643 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3644 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3645 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3646
3647 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3648 max_span, MLXSW_SP_RESOURCE_SPAN,
3649 DEVLINK_RESOURCE_ID_PARENT_TOP,
3650 &span_size_params);
3651 }
3652
3653 static int
3654 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3655 {
3656 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3657 struct devlink_resource_size_params size_params;
3658 u8 max_rif_mac_profiles;
3659
3660 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3661 max_rif_mac_profiles = 1;
3662 else
3663 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3664 MAX_RIF_MAC_PROFILES);
3665 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3666 max_rif_mac_profiles, 1,
3667 DEVLINK_RESOURCE_UNIT_ENTRY);
3668
3669 return devl_resource_register(devlink,
3670 "rif_mac_profiles",
3671 max_rif_mac_profiles,
3672 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3673 DEVLINK_RESOURCE_ID_PARENT_TOP,
3674 &size_params);
3675 }
3676
3677 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3678 {
3679 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3680 struct devlink_resource_size_params size_params;
3681 u64 max_rifs;
3682
3683 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3684 return -EIO;
3685
3686 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3687 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3688 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3689
3690 return devl_resource_register(devlink, "rifs", max_rifs,
3691 MLXSW_SP_RESOURCE_RIFS,
3692 DEVLINK_RESOURCE_ID_PARENT_TOP,
3693 &size_params);
3694 }
3695
3696 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3697 {
3698 int err;
3699
3700 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3701 if (err)
3702 return err;
3703
3704 err = mlxsw_sp_resources_span_register(mlxsw_core);
3705 if (err)
3706 goto err_resources_span_register;
3707
3708 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3709 if (err)
3710 goto err_resources_counter_register;
3711
3712 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3713 if (err)
3714 goto err_policer_resources_register;
3715
3716 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3717 if (err)
3718 goto err_resources_rif_mac_profile_register;
3719
3720 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3721 if (err)
3722 goto err_resources_rifs_register;
3723
3724 return 0;
3725
3726 err_resources_rifs_register:
3727 err_resources_rif_mac_profile_register:
3728 err_policer_resources_register:
3729 err_resources_counter_register:
3730 err_resources_span_register:
3731 devl_resources_unregister(priv_to_devlink(mlxsw_core));
3732 return err;
3733 }
3734
3735 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3736 {
3737 int err;
3738
3739 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3740 if (err)
3741 return err;
3742
3743 err = mlxsw_sp_resources_span_register(mlxsw_core);
3744 if (err)
3745 goto err_resources_span_register;
3746
3747 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3748 if (err)
3749 goto err_resources_counter_register;
3750
3751 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3752 if (err)
3753 goto err_policer_resources_register;
3754
3755 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3756 if (err)
3757 goto err_resources_rif_mac_profile_register;
3758
3759 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3760 if (err)
3761 goto err_resources_rifs_register;
3762
3763 return 0;
3764
3765 err_resources_rifs_register:
3766 err_resources_rif_mac_profile_register:
3767 err_policer_resources_register:
3768 err_resources_counter_register:
3769 err_resources_span_register:
3770 devl_resources_unregister(priv_to_devlink(mlxsw_core));
3771 return err;
3772 }
3773
3774 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3775 const struct mlxsw_config_profile *profile,
3776 u64 *p_single_size, u64 *p_double_size,
3777 u64 *p_linear_size)
3778 {
3779 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3780 u32 double_size;
3781 int err;
3782
3783 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3784 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3785 return -EIO;
3786
3787
3788
3789
3790
3791
3792
3793
3794 err = devl_resource_size_get(devlink,
3795 MLXSW_SP_RESOURCE_KVD_LINEAR,
3796 p_linear_size);
3797 if (err)
3798 *p_linear_size = profile->kvd_linear_size;
3799
3800 err = devl_resource_size_get(devlink,
3801 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3802 p_double_size);
3803 if (err) {
3804 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3805 *p_linear_size;
3806 double_size *= profile->kvd_hash_double_parts;
3807 double_size /= profile->kvd_hash_double_parts +
3808 profile->kvd_hash_single_parts;
3809 *p_double_size = rounddown(double_size,
3810 MLXSW_SP_KVD_GRANULARITY);
3811 }
3812
3813 err = devl_resource_size_get(devlink,
3814 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3815 p_single_size);
3816 if (err)
3817 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3818 *p_double_size - *p_linear_size;
3819
3820
3821 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3822 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3823 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3824 return -EIO;
3825
3826 return 0;
3827 }
3828
3829 static int
3830 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3831 struct devlink_param_gset_ctx *ctx)
3832 {
3833 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3834 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3835
3836 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3837 return 0;
3838 }
3839
3840 static int
3841 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3842 struct devlink_param_gset_ctx *ctx)
3843 {
3844 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3845 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3846
3847 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3848 }
3849
3850 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3851 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3852 "acl_region_rehash_interval",
3853 DEVLINK_PARAM_TYPE_U32,
3854 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3855 mlxsw_sp_params_acl_region_rehash_intrvl_get,
3856 mlxsw_sp_params_acl_region_rehash_intrvl_set,
3857 NULL),
3858 };
3859
3860 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3861 {
3862 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3863 union devlink_param_value value;
3864 int err;
3865
3866 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3867 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3868 if (err)
3869 return err;
3870
3871 value.vu32 = 0;
3872 devlink_param_driverinit_value_set(devlink,
3873 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3874 value);
3875 return 0;
3876 }
3877
3878 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3879 {
3880 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3881 mlxsw_sp2_devlink_params,
3882 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3883 }
3884
3885 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3886 struct sk_buff *skb, u16 local_port)
3887 {
3888 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3889
3890 skb_pull(skb, MLXSW_TXHDR_LEN);
3891 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3892 }
3893
3894 static struct mlxsw_driver mlxsw_sp1_driver = {
3895 .kind = mlxsw_sp1_driver_name,
3896 .priv_size = sizeof(struct mlxsw_sp),
3897 .fw_req_rev = &mlxsw_sp1_fw_rev,
3898 .fw_filename = MLXSW_SP1_FW_FILENAME,
3899 .init = mlxsw_sp1_init,
3900 .fini = mlxsw_sp_fini,
3901 .port_split = mlxsw_sp_port_split,
3902 .port_unsplit = mlxsw_sp_port_unsplit,
3903 .sb_pool_get = mlxsw_sp_sb_pool_get,
3904 .sb_pool_set = mlxsw_sp_sb_pool_set,
3905 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3906 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3907 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3908 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3909 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3910 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3911 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3912 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3913 .trap_init = mlxsw_sp_trap_init,
3914 .trap_fini = mlxsw_sp_trap_fini,
3915 .trap_action_set = mlxsw_sp_trap_action_set,
3916 .trap_group_init = mlxsw_sp_trap_group_init,
3917 .trap_group_set = mlxsw_sp_trap_group_set,
3918 .trap_policer_init = mlxsw_sp_trap_policer_init,
3919 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3920 .trap_policer_set = mlxsw_sp_trap_policer_set,
3921 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3922 .txhdr_construct = mlxsw_sp_txhdr_construct,
3923 .resources_register = mlxsw_sp1_resources_register,
3924 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
3925 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3926 .txhdr_len = MLXSW_TXHDR_LEN,
3927 .profile = &mlxsw_sp1_config_profile,
3928 .sdq_supports_cqe_v2 = false,
3929 };
3930
3931 static struct mlxsw_driver mlxsw_sp2_driver = {
3932 .kind = mlxsw_sp2_driver_name,
3933 .priv_size = sizeof(struct mlxsw_sp),
3934 .fw_req_rev = &mlxsw_sp2_fw_rev,
3935 .fw_filename = MLXSW_SP2_FW_FILENAME,
3936 .init = mlxsw_sp2_init,
3937 .fini = mlxsw_sp_fini,
3938 .port_split = mlxsw_sp_port_split,
3939 .port_unsplit = mlxsw_sp_port_unsplit,
3940 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
3941 .sb_pool_get = mlxsw_sp_sb_pool_get,
3942 .sb_pool_set = mlxsw_sp_sb_pool_set,
3943 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3944 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3945 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3946 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3947 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3948 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3949 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3950 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3951 .trap_init = mlxsw_sp_trap_init,
3952 .trap_fini = mlxsw_sp_trap_fini,
3953 .trap_action_set = mlxsw_sp_trap_action_set,
3954 .trap_group_init = mlxsw_sp_trap_group_init,
3955 .trap_group_set = mlxsw_sp_trap_group_set,
3956 .trap_policer_init = mlxsw_sp_trap_policer_init,
3957 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3958 .trap_policer_set = mlxsw_sp_trap_policer_set,
3959 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3960 .txhdr_construct = mlxsw_sp_txhdr_construct,
3961 .resources_register = mlxsw_sp2_resources_register,
3962 .params_register = mlxsw_sp2_params_register,
3963 .params_unregister = mlxsw_sp2_params_unregister,
3964 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3965 .txhdr_len = MLXSW_TXHDR_LEN,
3966 .profile = &mlxsw_sp2_config_profile,
3967 .sdq_supports_cqe_v2 = true,
3968 };
3969
3970 static struct mlxsw_driver mlxsw_sp3_driver = {
3971 .kind = mlxsw_sp3_driver_name,
3972 .priv_size = sizeof(struct mlxsw_sp),
3973 .fw_req_rev = &mlxsw_sp3_fw_rev,
3974 .fw_filename = MLXSW_SP3_FW_FILENAME,
3975 .init = mlxsw_sp3_init,
3976 .fini = mlxsw_sp_fini,
3977 .port_split = mlxsw_sp_port_split,
3978 .port_unsplit = mlxsw_sp_port_unsplit,
3979 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
3980 .sb_pool_get = mlxsw_sp_sb_pool_get,
3981 .sb_pool_set = mlxsw_sp_sb_pool_set,
3982 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3983 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3984 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3985 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3986 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3987 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3988 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3989 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3990 .trap_init = mlxsw_sp_trap_init,
3991 .trap_fini = mlxsw_sp_trap_fini,
3992 .trap_action_set = mlxsw_sp_trap_action_set,
3993 .trap_group_init = mlxsw_sp_trap_group_init,
3994 .trap_group_set = mlxsw_sp_trap_group_set,
3995 .trap_policer_init = mlxsw_sp_trap_policer_init,
3996 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3997 .trap_policer_set = mlxsw_sp_trap_policer_set,
3998 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3999 .txhdr_construct = mlxsw_sp_txhdr_construct,
4000 .resources_register = mlxsw_sp2_resources_register,
4001 .params_register = mlxsw_sp2_params_register,
4002 .params_unregister = mlxsw_sp2_params_unregister,
4003 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4004 .txhdr_len = MLXSW_TXHDR_LEN,
4005 .profile = &mlxsw_sp2_config_profile,
4006 .sdq_supports_cqe_v2 = true,
4007 };
4008
4009 static struct mlxsw_driver mlxsw_sp4_driver = {
4010 .kind = mlxsw_sp4_driver_name,
4011 .priv_size = sizeof(struct mlxsw_sp),
4012 .init = mlxsw_sp4_init,
4013 .fini = mlxsw_sp_fini,
4014 .port_split = mlxsw_sp_port_split,
4015 .port_unsplit = mlxsw_sp_port_unsplit,
4016 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4017 .sb_pool_get = mlxsw_sp_sb_pool_get,
4018 .sb_pool_set = mlxsw_sp_sb_pool_set,
4019 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4020 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4021 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4022 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4023 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4024 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4025 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4026 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4027 .trap_init = mlxsw_sp_trap_init,
4028 .trap_fini = mlxsw_sp_trap_fini,
4029 .trap_action_set = mlxsw_sp_trap_action_set,
4030 .trap_group_init = mlxsw_sp_trap_group_init,
4031 .trap_group_set = mlxsw_sp_trap_group_set,
4032 .trap_policer_init = mlxsw_sp_trap_policer_init,
4033 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4034 .trap_policer_set = mlxsw_sp_trap_policer_set,
4035 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4036 .txhdr_construct = mlxsw_sp_txhdr_construct,
4037 .resources_register = mlxsw_sp2_resources_register,
4038 .params_register = mlxsw_sp2_params_register,
4039 .params_unregister = mlxsw_sp2_params_unregister,
4040 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4041 .txhdr_len = MLXSW_TXHDR_LEN,
4042 .profile = &mlxsw_sp2_config_profile,
4043 .sdq_supports_cqe_v2 = true,
4044 };
4045
4046 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4047 {
4048 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4049 }
4050
4051 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4052 struct netdev_nested_priv *priv)
4053 {
4054 int ret = 0;
4055
4056 if (mlxsw_sp_port_dev_check(lower_dev)) {
4057 priv->data = (void *)netdev_priv(lower_dev);
4058 ret = 1;
4059 }
4060
4061 return ret;
4062 }
4063
4064 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4065 {
4066 struct netdev_nested_priv priv = {
4067 .data = NULL,
4068 };
4069
4070 if (mlxsw_sp_port_dev_check(dev))
4071 return netdev_priv(dev);
4072
4073 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4074
4075 return (struct mlxsw_sp_port *)priv.data;
4076 }
4077
4078 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4079 {
4080 struct mlxsw_sp_port *mlxsw_sp_port;
4081
4082 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4083 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4084 }
4085
4086 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4087 {
4088 struct netdev_nested_priv priv = {
4089 .data = NULL,
4090 };
4091
4092 if (mlxsw_sp_port_dev_check(dev))
4093 return netdev_priv(dev);
4094
4095 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4096 &priv);
4097
4098 return (struct mlxsw_sp_port *)priv.data;
4099 }
4100
4101 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4102 {
4103 struct mlxsw_sp_port *mlxsw_sp_port;
4104
4105 rcu_read_lock();
4106 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4107 if (mlxsw_sp_port)
4108 dev_hold(mlxsw_sp_port->dev);
4109 rcu_read_unlock();
4110 return mlxsw_sp_port;
4111 }
4112
4113 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4114 {
4115 dev_put(mlxsw_sp_port->dev);
4116 }
4117
4118 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4119 {
4120 char mprs_pl[MLXSW_REG_MPRS_LEN];
4121 int err = 0;
4122
4123 mutex_lock(&mlxsw_sp->parsing.lock);
4124
4125 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4126 goto out_unlock;
4127
4128 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4129 mlxsw_sp->parsing.vxlan_udp_dport);
4130 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4131 if (err)
4132 goto out_unlock;
4133
4134 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4135 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4136
4137 out_unlock:
4138 mutex_unlock(&mlxsw_sp->parsing.lock);
4139 return err;
4140 }
4141
4142 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4143 {
4144 char mprs_pl[MLXSW_REG_MPRS_LEN];
4145
4146 mutex_lock(&mlxsw_sp->parsing.lock);
4147
4148 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4149 goto out_unlock;
4150
4151 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4152 mlxsw_sp->parsing.vxlan_udp_dport);
4153 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4154 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4155
4156 out_unlock:
4157 mutex_unlock(&mlxsw_sp->parsing.lock);
4158 }
4159
4160 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4161 __be16 udp_dport)
4162 {
4163 char mprs_pl[MLXSW_REG_MPRS_LEN];
4164 int err;
4165
4166 mutex_lock(&mlxsw_sp->parsing.lock);
4167
4168 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4169 be16_to_cpu(udp_dport));
4170 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4171 if (err)
4172 goto out_unlock;
4173
4174 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4175
4176 out_unlock:
4177 mutex_unlock(&mlxsw_sp->parsing.lock);
4178 return err;
4179 }
4180
4181 static void
4182 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4183 struct net_device *lag_dev)
4184 {
4185 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4186 struct net_device *upper_dev;
4187 struct list_head *iter;
4188
4189 if (netif_is_bridge_port(lag_dev))
4190 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4191
4192 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4193 if (!netif_is_bridge_port(upper_dev))
4194 continue;
4195 br_dev = netdev_master_upper_dev_get(upper_dev);
4196 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4197 }
4198 }
4199
4200 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4201 {
4202 char sldr_pl[MLXSW_REG_SLDR_LEN];
4203
4204 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4205 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4206 }
4207
4208 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4209 {
4210 char sldr_pl[MLXSW_REG_SLDR_LEN];
4211
4212 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4213 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4214 }
4215
4216 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4217 u16 lag_id, u8 port_index)
4218 {
4219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4220 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4221
4222 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4223 lag_id, port_index);
4224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4225 }
4226
4227 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4228 u16 lag_id)
4229 {
4230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4231 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4232
4233 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4234 lag_id);
4235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4236 }
4237
4238 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4239 u16 lag_id)
4240 {
4241 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4242 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4243
4244 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4245 lag_id);
4246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4247 }
4248
4249 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4250 u16 lag_id)
4251 {
4252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4253 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4254
4255 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4256 lag_id);
4257 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4258 }
4259
4260 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4261 struct net_device *lag_dev,
4262 u16 *p_lag_id)
4263 {
4264 struct mlxsw_sp_upper *lag;
4265 int free_lag_id = -1;
4266 u64 max_lag;
4267 int i;
4268
4269 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4270 for (i = 0; i < max_lag; i++) {
4271 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4272 if (lag->ref_count) {
4273 if (lag->dev == lag_dev) {
4274 *p_lag_id = i;
4275 return 0;
4276 }
4277 } else if (free_lag_id < 0) {
4278 free_lag_id = i;
4279 }
4280 }
4281 if (free_lag_id < 0)
4282 return -EBUSY;
4283 *p_lag_id = free_lag_id;
4284 return 0;
4285 }
4286
4287 static bool
4288 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4289 struct net_device *lag_dev,
4290 struct netdev_lag_upper_info *lag_upper_info,
4291 struct netlink_ext_ack *extack)
4292 {
4293 u16 lag_id;
4294
4295 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4296 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4297 return false;
4298 }
4299 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4300 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4301 return false;
4302 }
4303 return true;
4304 }
4305
4306 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4307 u16 lag_id, u8 *p_port_index)
4308 {
4309 u64 max_lag_members;
4310 int i;
4311
4312 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4313 MAX_LAG_MEMBERS);
4314 for (i = 0; i < max_lag_members; i++) {
4315 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4316 *p_port_index = i;
4317 return 0;
4318 }
4319 }
4320 return -EBUSY;
4321 }
4322
4323 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4324 struct net_device *lag_dev,
4325 struct netlink_ext_ack *extack)
4326 {
4327 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4328 struct mlxsw_sp_upper *lag;
4329 u16 lag_id;
4330 u8 port_index;
4331 int err;
4332
4333 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4334 if (err)
4335 return err;
4336 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4337 if (!lag->ref_count) {
4338 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4339 if (err)
4340 return err;
4341 lag->dev = lag_dev;
4342 }
4343
4344 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4345 if (err)
4346 return err;
4347 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4348 if (err)
4349 goto err_col_port_add;
4350
4351 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4352 mlxsw_sp_port->local_port);
4353 mlxsw_sp_port->lag_id = lag_id;
4354 mlxsw_sp_port->lagged = 1;
4355 lag->ref_count++;
4356
4357
4358 if (mlxsw_sp_port->default_vlan->fid)
4359 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4360
4361
4362 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
4363 lag_dev, extack);
4364 if (err)
4365 goto err_router_join;
4366
4367 return 0;
4368
4369 err_router_join:
4370 lag->ref_count--;
4371 mlxsw_sp_port->lagged = 0;
4372 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4373 mlxsw_sp_port->local_port);
4374 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4375 err_col_port_add:
4376 if (!lag->ref_count)
4377 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4378 return err;
4379 }
4380
4381 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4382 struct net_device *lag_dev)
4383 {
4384 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4385 u16 lag_id = mlxsw_sp_port->lag_id;
4386 struct mlxsw_sp_upper *lag;
4387
4388 if (!mlxsw_sp_port->lagged)
4389 return;
4390 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4391 WARN_ON(lag->ref_count == 0);
4392
4393 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4394
4395
4396 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4397 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4398
4399
4400
4401 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4402
4403 if (lag->ref_count == 1)
4404 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4405
4406 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4407 mlxsw_sp_port->local_port);
4408 mlxsw_sp_port->lagged = 0;
4409 lag->ref_count--;
4410
4411
4412 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4413 ETH_P_8021Q);
4414 }
4415
4416 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4417 u16 lag_id)
4418 {
4419 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4420 char sldr_pl[MLXSW_REG_SLDR_LEN];
4421
4422 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4423 mlxsw_sp_port->local_port);
4424 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4425 }
4426
4427 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4428 u16 lag_id)
4429 {
4430 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4431 char sldr_pl[MLXSW_REG_SLDR_LEN];
4432
4433 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4434 mlxsw_sp_port->local_port);
4435 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4436 }
4437
4438 static int
4439 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4440 {
4441 int err;
4442
4443 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4444 mlxsw_sp_port->lag_id);
4445 if (err)
4446 return err;
4447
4448 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4449 if (err)
4450 goto err_dist_port_add;
4451
4452 return 0;
4453
4454 err_dist_port_add:
4455 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4456 return err;
4457 }
4458
4459 static int
4460 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4461 {
4462 int err;
4463
4464 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4465 mlxsw_sp_port->lag_id);
4466 if (err)
4467 return err;
4468
4469 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4470 mlxsw_sp_port->lag_id);
4471 if (err)
4472 goto err_col_port_disable;
4473
4474 return 0;
4475
4476 err_col_port_disable:
4477 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4478 return err;
4479 }
4480
4481 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4482 struct netdev_lag_lower_state_info *info)
4483 {
4484 if (info->tx_enabled)
4485 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4486 else
4487 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4488 }
4489
4490 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4491 bool enable)
4492 {
4493 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4494 enum mlxsw_reg_spms_state spms_state;
4495 char *spms_pl;
4496 u16 vid;
4497 int err;
4498
4499 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4500 MLXSW_REG_SPMS_STATE_DISCARDING;
4501
4502 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4503 if (!spms_pl)
4504 return -ENOMEM;
4505 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4506
4507 for (vid = 0; vid < VLAN_N_VID; vid++)
4508 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4509
4510 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4511 kfree(spms_pl);
4512 return err;
4513 }
4514
4515 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4516 {
4517 u16 vid = 1;
4518 int err;
4519
4520 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4521 if (err)
4522 return err;
4523 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4524 if (err)
4525 goto err_port_stp_set;
4526 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4527 true, false);
4528 if (err)
4529 goto err_port_vlan_set;
4530
4531 for (; vid <= VLAN_N_VID - 1; vid++) {
4532 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4533 vid, false);
4534 if (err)
4535 goto err_vid_learning_set;
4536 }
4537
4538 return 0;
4539
4540 err_vid_learning_set:
4541 for (vid--; vid >= 1; vid--)
4542 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4543 err_port_vlan_set:
4544 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4545 err_port_stp_set:
4546 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4547 return err;
4548 }
4549
4550 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4551 {
4552 u16 vid;
4553
4554 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4555 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4556 vid, true);
4557
4558 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4559 false, false);
4560 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4561 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4562 }
4563
4564 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4565 {
4566 unsigned int num_vxlans = 0;
4567 struct net_device *dev;
4568 struct list_head *iter;
4569
4570 netdev_for_each_lower_dev(br_dev, dev, iter) {
4571 if (netif_is_vxlan(dev))
4572 num_vxlans++;
4573 }
4574
4575 return num_vxlans > 1;
4576 }
4577
4578 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4579 {
4580 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4581 struct net_device *dev;
4582 struct list_head *iter;
4583
4584 netdev_for_each_lower_dev(br_dev, dev, iter) {
4585 u16 pvid;
4586 int err;
4587
4588 if (!netif_is_vxlan(dev))
4589 continue;
4590
4591 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4592 if (err || !pvid)
4593 continue;
4594
4595 if (test_and_set_bit(pvid, vlans))
4596 return false;
4597 }
4598
4599 return true;
4600 }
4601
4602 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4603 struct netlink_ext_ack *extack)
4604 {
4605 if (br_multicast_enabled(br_dev)) {
4606 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4607 return false;
4608 }
4609
4610 if (!br_vlan_enabled(br_dev) &&
4611 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4612 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4613 return false;
4614 }
4615
4616 if (br_vlan_enabled(br_dev) &&
4617 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4618 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4619 return false;
4620 }
4621
4622 return true;
4623 }
4624
4625 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4626 struct net_device *dev,
4627 unsigned long event, void *ptr)
4628 {
4629 struct netdev_notifier_changeupper_info *info;
4630 struct mlxsw_sp_port *mlxsw_sp_port;
4631 struct netlink_ext_ack *extack;
4632 struct net_device *upper_dev;
4633 struct mlxsw_sp *mlxsw_sp;
4634 int err = 0;
4635 u16 proto;
4636
4637 mlxsw_sp_port = netdev_priv(dev);
4638 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4639 info = ptr;
4640 extack = netdev_notifier_info_to_extack(&info->info);
4641
4642 switch (event) {
4643 case NETDEV_PRECHANGEUPPER:
4644 upper_dev = info->upper_dev;
4645 if (!is_vlan_dev(upper_dev) &&
4646 !netif_is_lag_master(upper_dev) &&
4647 !netif_is_bridge_master(upper_dev) &&
4648 !netif_is_ovs_master(upper_dev) &&
4649 !netif_is_macvlan(upper_dev) &&
4650 !netif_is_l3_master(upper_dev)) {
4651 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4652 return -EINVAL;
4653 }
4654 if (!info->linking)
4655 break;
4656 if (netif_is_bridge_master(upper_dev) &&
4657 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4658 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4659 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4660 return -EOPNOTSUPP;
4661 if (netdev_has_any_upper_dev(upper_dev) &&
4662 (!netif_is_bridge_master(upper_dev) ||
4663 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4664 upper_dev))) {
4665 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4666 return -EINVAL;
4667 }
4668 if (netif_is_lag_master(upper_dev) &&
4669 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4670 info->upper_info, extack))
4671 return -EINVAL;
4672 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4673 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4674 return -EINVAL;
4675 }
4676 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4677 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4678 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4679 return -EINVAL;
4680 }
4681 if (netif_is_macvlan(upper_dev) &&
4682 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
4683 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4684 return -EOPNOTSUPP;
4685 }
4686 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4687 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4688 return -EINVAL;
4689 }
4690 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4691 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4692 return -EINVAL;
4693 }
4694 if (netif_is_bridge_master(upper_dev)) {
4695 br_vlan_get_proto(upper_dev, &proto);
4696 if (br_vlan_enabled(upper_dev) &&
4697 proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4698 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4699 return -EOPNOTSUPP;
4700 }
4701 if (vlan_uses_dev(lower_dev) &&
4702 br_vlan_enabled(upper_dev) &&
4703 proto == ETH_P_8021AD) {
4704 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4705 return -EOPNOTSUPP;
4706 }
4707 }
4708 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4709 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4710
4711 if (br_vlan_enabled(br_dev)) {
4712 br_vlan_get_proto(br_dev, &proto);
4713 if (proto == ETH_P_8021AD) {
4714 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4715 return -EOPNOTSUPP;
4716 }
4717 }
4718 }
4719 if (is_vlan_dev(upper_dev) &&
4720 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4721 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4722 return -EOPNOTSUPP;
4723 }
4724 break;
4725 case NETDEV_CHANGEUPPER:
4726 upper_dev = info->upper_dev;
4727 if (netif_is_bridge_master(upper_dev)) {
4728 if (info->linking)
4729 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4730 lower_dev,
4731 upper_dev,
4732 extack);
4733 else
4734 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4735 lower_dev,
4736 upper_dev);
4737 } else if (netif_is_lag_master(upper_dev)) {
4738 if (info->linking) {
4739 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4740 upper_dev, extack);
4741 } else {
4742 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4743 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4744 upper_dev);
4745 }
4746 } else if (netif_is_ovs_master(upper_dev)) {
4747 if (info->linking)
4748 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4749 else
4750 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4751 } else if (netif_is_macvlan(upper_dev)) {
4752 if (!info->linking)
4753 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4754 } else if (is_vlan_dev(upper_dev)) {
4755 struct net_device *br_dev;
4756
4757 if (!netif_is_bridge_port(upper_dev))
4758 break;
4759 if (info->linking)
4760 break;
4761 br_dev = netdev_master_upper_dev_get(upper_dev);
4762 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4763 br_dev);
4764 }
4765 break;
4766 }
4767
4768 return err;
4769 }
4770
4771 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4772 unsigned long event, void *ptr)
4773 {
4774 struct netdev_notifier_changelowerstate_info *info;
4775 struct mlxsw_sp_port *mlxsw_sp_port;
4776 int err;
4777
4778 mlxsw_sp_port = netdev_priv(dev);
4779 info = ptr;
4780
4781 switch (event) {
4782 case NETDEV_CHANGELOWERSTATE:
4783 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4784 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4785 info->lower_state_info);
4786 if (err)
4787 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4788 }
4789 break;
4790 }
4791
4792 return 0;
4793 }
4794
4795 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4796 struct net_device *port_dev,
4797 unsigned long event, void *ptr)
4798 {
4799 switch (event) {
4800 case NETDEV_PRECHANGEUPPER:
4801 case NETDEV_CHANGEUPPER:
4802 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4803 event, ptr);
4804 case NETDEV_CHANGELOWERSTATE:
4805 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4806 ptr);
4807 }
4808
4809 return 0;
4810 }
4811
4812 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4813 unsigned long event, void *ptr)
4814 {
4815 struct net_device *dev;
4816 struct list_head *iter;
4817 int ret;
4818
4819 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4820 if (mlxsw_sp_port_dev_check(dev)) {
4821 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4822 ptr);
4823 if (ret)
4824 return ret;
4825 }
4826 }
4827
4828 return 0;
4829 }
4830
4831 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4832 struct net_device *dev,
4833 unsigned long event, void *ptr,
4834 u16 vid)
4835 {
4836 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4838 struct netdev_notifier_changeupper_info *info = ptr;
4839 struct netlink_ext_ack *extack;
4840 struct net_device *upper_dev;
4841 int err = 0;
4842
4843 extack = netdev_notifier_info_to_extack(&info->info);
4844
4845 switch (event) {
4846 case NETDEV_PRECHANGEUPPER:
4847 upper_dev = info->upper_dev;
4848 if (!netif_is_bridge_master(upper_dev) &&
4849 !netif_is_macvlan(upper_dev) &&
4850 !netif_is_l3_master(upper_dev)) {
4851 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4852 return -EINVAL;
4853 }
4854 if (!info->linking)
4855 break;
4856 if (netif_is_bridge_master(upper_dev) &&
4857 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4858 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4859 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4860 return -EOPNOTSUPP;
4861 if (netdev_has_any_upper_dev(upper_dev) &&
4862 (!netif_is_bridge_master(upper_dev) ||
4863 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4864 upper_dev))) {
4865 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4866 return -EINVAL;
4867 }
4868 if (netif_is_macvlan(upper_dev) &&
4869 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4870 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4871 return -EOPNOTSUPP;
4872 }
4873 break;
4874 case NETDEV_CHANGEUPPER:
4875 upper_dev = info->upper_dev;
4876 if (netif_is_bridge_master(upper_dev)) {
4877 if (info->linking)
4878 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4879 vlan_dev,
4880 upper_dev,
4881 extack);
4882 else
4883 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4884 vlan_dev,
4885 upper_dev);
4886 } else if (netif_is_macvlan(upper_dev)) {
4887 if (!info->linking)
4888 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4889 }
4890 break;
4891 }
4892
4893 return err;
4894 }
4895
4896 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4897 struct net_device *lag_dev,
4898 unsigned long event,
4899 void *ptr, u16 vid)
4900 {
4901 struct net_device *dev;
4902 struct list_head *iter;
4903 int ret;
4904
4905 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4906 if (mlxsw_sp_port_dev_check(dev)) {
4907 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4908 event, ptr,
4909 vid);
4910 if (ret)
4911 return ret;
4912 }
4913 }
4914
4915 return 0;
4916 }
4917
4918 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4919 struct net_device *br_dev,
4920 unsigned long event, void *ptr,
4921 u16 vid)
4922 {
4923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4924 struct netdev_notifier_changeupper_info *info = ptr;
4925 struct netlink_ext_ack *extack;
4926 struct net_device *upper_dev;
4927
4928 if (!mlxsw_sp)
4929 return 0;
4930
4931 extack = netdev_notifier_info_to_extack(&info->info);
4932
4933 switch (event) {
4934 case NETDEV_PRECHANGEUPPER:
4935 upper_dev = info->upper_dev;
4936 if (!netif_is_macvlan(upper_dev) &&
4937 !netif_is_l3_master(upper_dev)) {
4938 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4939 return -EOPNOTSUPP;
4940 }
4941 if (!info->linking)
4942 break;
4943 if (netif_is_macvlan(upper_dev) &&
4944 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4945 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4946 return -EOPNOTSUPP;
4947 }
4948 break;
4949 case NETDEV_CHANGEUPPER:
4950 upper_dev = info->upper_dev;
4951 if (info->linking)
4952 break;
4953 if (netif_is_macvlan(upper_dev))
4954 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4955 break;
4956 }
4957
4958 return 0;
4959 }
4960
4961 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4962 unsigned long event, void *ptr)
4963 {
4964 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4965 u16 vid = vlan_dev_vlan_id(vlan_dev);
4966
4967 if (mlxsw_sp_port_dev_check(real_dev))
4968 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4969 event, ptr, vid);
4970 else if (netif_is_lag_master(real_dev))
4971 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4972 real_dev, event,
4973 ptr, vid);
4974 else if (netif_is_bridge_master(real_dev))
4975 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4976 event, ptr, vid);
4977
4978 return 0;
4979 }
4980
4981 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4982 unsigned long event, void *ptr)
4983 {
4984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4985 struct netdev_notifier_changeupper_info *info = ptr;
4986 struct netlink_ext_ack *extack;
4987 struct net_device *upper_dev;
4988 u16 proto;
4989
4990 if (!mlxsw_sp)
4991 return 0;
4992
4993 extack = netdev_notifier_info_to_extack(&info->info);
4994
4995 switch (event) {
4996 case NETDEV_PRECHANGEUPPER:
4997 upper_dev = info->upper_dev;
4998 if (!is_vlan_dev(upper_dev) &&
4999 !netif_is_macvlan(upper_dev) &&
5000 !netif_is_l3_master(upper_dev)) {
5001 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5002 return -EOPNOTSUPP;
5003 }
5004 if (!info->linking)
5005 break;
5006 if (br_vlan_enabled(br_dev)) {
5007 br_vlan_get_proto(br_dev, &proto);
5008 if (proto == ETH_P_8021AD) {
5009 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5010 return -EOPNOTSUPP;
5011 }
5012 }
5013 if (is_vlan_dev(upper_dev) &&
5014 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5015 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5016 return -EOPNOTSUPP;
5017 }
5018 if (netif_is_macvlan(upper_dev) &&
5019 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
5020 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5021 return -EOPNOTSUPP;
5022 }
5023 break;
5024 case NETDEV_CHANGEUPPER:
5025 upper_dev = info->upper_dev;
5026 if (info->linking)
5027 break;
5028 if (is_vlan_dev(upper_dev))
5029 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5030 if (netif_is_macvlan(upper_dev))
5031 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5032 break;
5033 }
5034
5035 return 0;
5036 }
5037
5038 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5039 unsigned long event, void *ptr)
5040 {
5041 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5042 struct netdev_notifier_changeupper_info *info = ptr;
5043 struct netlink_ext_ack *extack;
5044 struct net_device *upper_dev;
5045
5046 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5047 return 0;
5048
5049 extack = netdev_notifier_info_to_extack(&info->info);
5050 upper_dev = info->upper_dev;
5051
5052 if (!netif_is_l3_master(upper_dev)) {
5053 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5054 return -EOPNOTSUPP;
5055 }
5056
5057 return 0;
5058 }
5059
5060 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5061 struct net_device *dev,
5062 unsigned long event, void *ptr)
5063 {
5064 struct netdev_notifier_changeupper_info *cu_info;
5065 struct netdev_notifier_info *info = ptr;
5066 struct netlink_ext_ack *extack;
5067 struct net_device *upper_dev;
5068
5069 extack = netdev_notifier_info_to_extack(info);
5070
5071 switch (event) {
5072 case NETDEV_CHANGEUPPER:
5073 cu_info = container_of(info,
5074 struct netdev_notifier_changeupper_info,
5075 info);
5076 upper_dev = cu_info->upper_dev;
5077 if (!netif_is_bridge_master(upper_dev))
5078 return 0;
5079 if (!mlxsw_sp_lower_get(upper_dev))
5080 return 0;
5081 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5082 return -EOPNOTSUPP;
5083 if (cu_info->linking) {
5084 if (!netif_running(dev))
5085 return 0;
5086
5087
5088
5089
5090 if (br_vlan_enabled(upper_dev))
5091 return 0;
5092 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5093 dev, 0, extack);
5094 } else {
5095
5096
5097
5098 if (br_vlan_enabled(upper_dev))
5099 return 0;
5100 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5101 }
5102 break;
5103 case NETDEV_PRE_UP:
5104 upper_dev = netdev_master_upper_dev_get(dev);
5105 if (!upper_dev)
5106 return 0;
5107 if (!netif_is_bridge_master(upper_dev))
5108 return 0;
5109 if (!mlxsw_sp_lower_get(upper_dev))
5110 return 0;
5111 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5112 extack);
5113 case NETDEV_DOWN:
5114 upper_dev = netdev_master_upper_dev_get(dev);
5115 if (!upper_dev)
5116 return 0;
5117 if (!netif_is_bridge_master(upper_dev))
5118 return 0;
5119 if (!mlxsw_sp_lower_get(upper_dev))
5120 return 0;
5121 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5122 break;
5123 }
5124
5125 return 0;
5126 }
5127
5128 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5129 unsigned long event, void *ptr)
5130 {
5131 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5132 struct mlxsw_sp_span_entry *span_entry;
5133 struct mlxsw_sp *mlxsw_sp;
5134 int err = 0;
5135
5136 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5137 if (event == NETDEV_UNREGISTER) {
5138 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5139 if (span_entry)
5140 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5141 }
5142 mlxsw_sp_span_respin(mlxsw_sp);
5143
5144 if (netif_is_vxlan(dev))
5145 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5146 else if (mlxsw_sp_port_dev_check(dev))
5147 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
5148 else if (netif_is_lag_master(dev))
5149 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5150 else if (is_vlan_dev(dev))
5151 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
5152 else if (netif_is_bridge_master(dev))
5153 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
5154 else if (netif_is_macvlan(dev))
5155 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5156
5157 return notifier_from_errno(err);
5158 }
5159
5160 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
5161 .notifier_call = mlxsw_sp_inetaddr_valid_event,
5162 };
5163
5164 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
5165 .notifier_call = mlxsw_sp_inet6addr_valid_event,
5166 };
5167
5168 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5169 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5170 {0, },
5171 };
5172
5173 static struct pci_driver mlxsw_sp1_pci_driver = {
5174 .name = mlxsw_sp1_driver_name,
5175 .id_table = mlxsw_sp1_pci_id_table,
5176 };
5177
5178 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5179 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5180 {0, },
5181 };
5182
5183 static struct pci_driver mlxsw_sp2_pci_driver = {
5184 .name = mlxsw_sp2_driver_name,
5185 .id_table = mlxsw_sp2_pci_id_table,
5186 };
5187
5188 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5189 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5190 {0, },
5191 };
5192
5193 static struct pci_driver mlxsw_sp3_pci_driver = {
5194 .name = mlxsw_sp3_driver_name,
5195 .id_table = mlxsw_sp3_pci_id_table,
5196 };
5197
5198 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5199 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5200 {0, },
5201 };
5202
5203 static struct pci_driver mlxsw_sp4_pci_driver = {
5204 .name = mlxsw_sp4_driver_name,
5205 .id_table = mlxsw_sp4_pci_id_table,
5206 };
5207
5208 static int __init mlxsw_sp_module_init(void)
5209 {
5210 int err;
5211
5212 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5213 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5214
5215 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5216 if (err)
5217 goto err_sp1_core_driver_register;
5218
5219 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5220 if (err)
5221 goto err_sp2_core_driver_register;
5222
5223 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5224 if (err)
5225 goto err_sp3_core_driver_register;
5226
5227 err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5228 if (err)
5229 goto err_sp4_core_driver_register;
5230
5231 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5232 if (err)
5233 goto err_sp1_pci_driver_register;
5234
5235 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5236 if (err)
5237 goto err_sp2_pci_driver_register;
5238
5239 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5240 if (err)
5241 goto err_sp3_pci_driver_register;
5242
5243 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5244 if (err)
5245 goto err_sp4_pci_driver_register;
5246
5247 return 0;
5248
5249 err_sp4_pci_driver_register:
5250 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5251 err_sp3_pci_driver_register:
5252 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5253 err_sp2_pci_driver_register:
5254 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5255 err_sp1_pci_driver_register:
5256 mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5257 err_sp4_core_driver_register:
5258 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5259 err_sp3_core_driver_register:
5260 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5261 err_sp2_core_driver_register:
5262 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5263 err_sp1_core_driver_register:
5264 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5265 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5266 return err;
5267 }
5268
5269 static void __exit mlxsw_sp_module_exit(void)
5270 {
5271 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5272 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5273 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5274 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5275 mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5276 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5277 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5278 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5279 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5280 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5281 }
5282
5283 module_init(mlxsw_sp_module_init);
5284 module_exit(mlxsw_sp_module_exit);
5285
5286 MODULE_LICENSE("Dual BSD/GPL");
5287 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5288 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5289 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5290 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5291 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5292 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5293 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5294 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5295 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5296 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);