0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/device.h>
0007 #include <linux/export.h>
0008 #include <linux/err.h>
0009 #include <linux/if_link.h>
0010 #include <linux/netdevice.h>
0011 #include <linux/completion.h>
0012 #include <linux/skbuff.h>
0013 #include <linux/etherdevice.h>
0014 #include <linux/types.h>
0015 #include <linux/string.h>
0016 #include <linux/gfp.h>
0017 #include <linux/random.h>
0018 #include <linux/jiffies.h>
0019 #include <linux/mutex.h>
0020 #include <linux/rcupdate.h>
0021 #include <linux/slab.h>
0022 #include <linux/workqueue.h>
0023 #include <linux/firmware.h>
0024 #include <asm/byteorder.h>
0025 #include <net/devlink.h>
0026 #include <trace/events/devlink.h>
0027
0028 #include "core.h"
0029 #include "core_env.h"
0030 #include "item.h"
0031 #include "cmd.h"
0032 #include "port.h"
0033 #include "trap.h"
0034 #include "emad.h"
0035 #include "reg.h"
0036 #include "resources.h"
0037 #include "../mlxfw/mlxfw.h"
0038
0039 static LIST_HEAD(mlxsw_core_driver_list);
0040 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
0041
0042 static const char mlxsw_core_driver_name[] = "mlxsw_core";
0043
0044 static struct workqueue_struct *mlxsw_wq;
0045 static struct workqueue_struct *mlxsw_owq;
0046
0047 struct mlxsw_core_port {
0048 struct devlink_port devlink_port;
0049 void *port_driver_priv;
0050 u16 local_port;
0051 struct mlxsw_linecard *linecard;
0052 };
0053
0054 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
0055 {
0056 return mlxsw_core_port->port_driver_priv;
0057 }
0058 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
0059
0060 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
0061 {
0062 return mlxsw_core_port->port_driver_priv != NULL;
0063 }
0064
0065 struct mlxsw_core {
0066 struct mlxsw_driver *driver;
0067 const struct mlxsw_bus *bus;
0068 void *bus_priv;
0069 const struct mlxsw_bus_info *bus_info;
0070 struct workqueue_struct *emad_wq;
0071 struct list_head rx_listener_list;
0072 struct list_head event_listener_list;
0073 struct {
0074 atomic64_t tid;
0075 struct list_head trans_list;
0076 spinlock_t trans_list_lock;
0077 bool use_emad;
0078 bool enable_string_tlv;
0079 } emad;
0080 struct {
0081 u16 *mapping;
0082 } lag;
0083 struct mlxsw_res res;
0084 struct mlxsw_hwmon *hwmon;
0085 struct mlxsw_thermal *thermal;
0086 struct mlxsw_linecards *linecards;
0087 struct mlxsw_core_port *ports;
0088 unsigned int max_ports;
0089 atomic_t active_ports_count;
0090 bool fw_flash_in_progress;
0091 struct {
0092 struct devlink_health_reporter *fw_fatal;
0093 } health;
0094 struct mlxsw_env *env;
0095 unsigned long driver_priv[];
0096
0097 };
0098
0099 struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
0100 {
0101 return mlxsw_core->linecards;
0102 }
0103
0104 void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
0105 struct mlxsw_linecards *linecards)
0106 {
0107 mlxsw_core->linecards = linecards;
0108 }
0109
0110 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
0111
0112 static u64 mlxsw_ports_occ_get(void *priv)
0113 {
0114 struct mlxsw_core *mlxsw_core = priv;
0115
0116 return atomic_read(&mlxsw_core->active_ports_count);
0117 }
0118
0119 static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
0120 {
0121 struct devlink *devlink = priv_to_devlink(mlxsw_core);
0122 struct devlink_resource_size_params ports_num_params;
0123 u32 max_ports;
0124
0125 max_ports = mlxsw_core->max_ports - 1;
0126 devlink_resource_size_params_init(&ports_num_params, max_ports,
0127 max_ports, 1,
0128 DEVLINK_RESOURCE_UNIT_ENTRY);
0129
0130 return devl_resource_register(devlink,
0131 DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
0132 max_ports, MLXSW_CORE_RESOURCE_PORTS,
0133 DEVLINK_RESOURCE_ID_PARENT_TOP,
0134 &ports_num_params);
0135 }
0136
0137 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
0138 {
0139 struct devlink *devlink = priv_to_devlink(mlxsw_core);
0140 int err;
0141
0142
0143 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
0144 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
0145 MAX_SYSTEM_PORT) + 1;
0146 else
0147 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
0148
0149 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
0150 sizeof(struct mlxsw_core_port), GFP_KERNEL);
0151 if (!mlxsw_core->ports)
0152 return -ENOMEM;
0153
0154 if (!reload) {
0155 err = mlxsw_core_resources_ports_register(mlxsw_core);
0156 if (err)
0157 goto err_resources_ports_register;
0158 }
0159 atomic_set(&mlxsw_core->active_ports_count, 0);
0160 devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
0161 mlxsw_ports_occ_get, mlxsw_core);
0162
0163 return 0;
0164
0165 err_resources_ports_register:
0166 kfree(mlxsw_core->ports);
0167 return err;
0168 }
0169
0170 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
0171 {
0172 struct devlink *devlink = priv_to_devlink(mlxsw_core);
0173
0174 devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
0175 if (!reload)
0176 devl_resources_unregister(priv_to_devlink(mlxsw_core));
0177
0178 kfree(mlxsw_core->ports);
0179 }
0180
0181 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
0182 {
0183 return mlxsw_core->max_ports;
0184 }
0185 EXPORT_SYMBOL(mlxsw_core_max_ports);
0186
0187 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
0188 {
0189 return mlxsw_core->driver_priv;
0190 }
0191 EXPORT_SYMBOL(mlxsw_core_driver_priv);
0192
0193 bool
0194 mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
0195 const struct mlxsw_fw_rev *req_rev)
0196 {
0197 return rev->minor > req_rev->minor ||
0198 (rev->minor == req_rev->minor &&
0199 rev->subminor >= req_rev->subminor);
0200 }
0201 EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
0202
0203 struct mlxsw_rx_listener_item {
0204 struct list_head list;
0205 struct mlxsw_rx_listener rxl;
0206 void *priv;
0207 bool enabled;
0208 };
0209
0210 struct mlxsw_event_listener_item {
0211 struct list_head list;
0212 struct mlxsw_core *mlxsw_core;
0213 struct mlxsw_event_listener el;
0214 void *priv;
0215 };
0216
0217 static const u8 mlxsw_core_trap_groups[] = {
0218 MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
0219 MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
0220 };
0221
0222 static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core)
0223 {
0224 char htgt_pl[MLXSW_REG_HTGT_LEN];
0225 int err;
0226 int i;
0227
0228 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
0229 return 0;
0230
0231 for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) {
0232 mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i],
0233 MLXSW_REG_HTGT_INVALID_POLICER,
0234 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
0235 MLXSW_REG_HTGT_DEFAULT_TC);
0236 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
0237 if (err)
0238 return err;
0239 }
0240 return 0;
0241 }
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
0252
0253
0254
0255
0256
0257 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
0258
0259
0260
0261
0262
0263 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
0264
0265
0266
0267
0268
0269 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
0270
0271
0272
0273
0274
0275 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
0276
0277
0278
0279
0280
0281 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
0282
0283
0284
0285
0286
0287 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
0288
0289
0290
0291
0292
0293
0294
0295 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
0313
0314
0315
0316
0317 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
0318
0319
0320
0321
0322 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
0323
0324
0325
0326
0327
0328
0329
0330
0331 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
0332
0333
0334
0335
0336 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
0337
0338
0339
0340
0341 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
0342
0343
0344
0345
0346
0347 MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
0348
0349
0350
0351
0352 MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
0353
0354 #define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
0355
0356
0357
0358
0359 MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
0360 MLXSW_EMAD_STRING_TLV_STRING_LEN);
0361
0362
0363
0364
0365
0366 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
0367
0368
0369
0370
0371 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
0372
0373
0374
0375
0376
0377 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
0378
0379
0380
0381
0382
0383 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
0384
0385 enum mlxsw_core_reg_access_type {
0386 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
0387 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
0388 };
0389
0390 static inline const char *
0391 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
0392 {
0393 switch (type) {
0394 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
0395 return "query";
0396 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
0397 return "write";
0398 }
0399 BUG();
0400 }
0401
0402 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
0403 {
0404 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
0405 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
0406 }
0407
0408 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
0409 const struct mlxsw_reg_info *reg,
0410 char *payload)
0411 {
0412 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
0413 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
0414 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
0415 }
0416
0417 static void mlxsw_emad_pack_string_tlv(char *string_tlv)
0418 {
0419 mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
0420 mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
0421 }
0422
0423 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
0424 const struct mlxsw_reg_info *reg,
0425 enum mlxsw_core_reg_access_type type,
0426 u64 tid)
0427 {
0428 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
0429 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
0430 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
0431 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
0432 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
0433 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
0434 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
0435 mlxsw_emad_op_tlv_method_set(op_tlv,
0436 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
0437 else
0438 mlxsw_emad_op_tlv_method_set(op_tlv,
0439 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
0440 mlxsw_emad_op_tlv_class_set(op_tlv,
0441 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
0442 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
0443 }
0444
0445 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
0446 {
0447 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
0448
0449 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
0450 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
0451 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
0452 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
0453 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
0454
0455 skb_reset_mac_header(skb);
0456
0457 return 0;
0458 }
0459
0460 static void mlxsw_emad_construct(struct sk_buff *skb,
0461 const struct mlxsw_reg_info *reg,
0462 char *payload,
0463 enum mlxsw_core_reg_access_type type,
0464 u64 tid, bool enable_string_tlv)
0465 {
0466 char *buf;
0467
0468 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
0469 mlxsw_emad_pack_end_tlv(buf);
0470
0471 buf = skb_push(skb, reg->len + sizeof(u32));
0472 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
0473
0474 if (enable_string_tlv) {
0475 buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
0476 mlxsw_emad_pack_string_tlv(buf);
0477 }
0478
0479 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
0480 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
0481
0482 mlxsw_emad_construct_eth_hdr(skb);
0483 }
0484
0485 struct mlxsw_emad_tlv_offsets {
0486 u16 op_tlv;
0487 u16 string_tlv;
0488 u16 reg_tlv;
0489 };
0490
0491 static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
0492 {
0493 u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
0494
0495 return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
0496 }
0497
0498 static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
0499 {
0500 struct mlxsw_emad_tlv_offsets *offsets =
0501 (struct mlxsw_emad_tlv_offsets *) skb->cb;
0502
0503 offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
0504 offsets->string_tlv = 0;
0505 offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
0506 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
0507
0508
0509 if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
0510 offsets->string_tlv = offsets->reg_tlv;
0511 offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
0512 }
0513 }
0514
0515 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
0516 {
0517 struct mlxsw_emad_tlv_offsets *offsets =
0518 (struct mlxsw_emad_tlv_offsets *) skb->cb;
0519
0520 return ((char *) (skb->data + offsets->op_tlv));
0521 }
0522
0523 static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
0524 {
0525 struct mlxsw_emad_tlv_offsets *offsets =
0526 (struct mlxsw_emad_tlv_offsets *) skb->cb;
0527
0528 if (!offsets->string_tlv)
0529 return NULL;
0530
0531 return ((char *) (skb->data + offsets->string_tlv));
0532 }
0533
0534 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
0535 {
0536 struct mlxsw_emad_tlv_offsets *offsets =
0537 (struct mlxsw_emad_tlv_offsets *) skb->cb;
0538
0539 return ((char *) (skb->data + offsets->reg_tlv));
0540 }
0541
0542 static char *mlxsw_emad_reg_payload(const char *reg_tlv)
0543 {
0544 return ((char *) (reg_tlv + sizeof(u32)));
0545 }
0546
0547 static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
0548 {
0549 return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
0550 }
0551
0552 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
0553 {
0554 char *op_tlv;
0555
0556 op_tlv = mlxsw_emad_op_tlv(skb);
0557 return mlxsw_emad_op_tlv_tid_get(op_tlv);
0558 }
0559
0560 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
0561 {
0562 char *op_tlv;
0563
0564 op_tlv = mlxsw_emad_op_tlv(skb);
0565 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
0566 }
0567
0568 static int mlxsw_emad_process_status(char *op_tlv,
0569 enum mlxsw_emad_op_tlv_status *p_status)
0570 {
0571 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
0572
0573 switch (*p_status) {
0574 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
0575 return 0;
0576 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
0577 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
0578 return -EAGAIN;
0579 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
0580 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
0581 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
0582 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
0583 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
0584 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
0585 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
0586 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
0587 default:
0588 return -EIO;
0589 }
0590 }
0591
0592 static int
0593 mlxsw_emad_process_status_skb(struct sk_buff *skb,
0594 enum mlxsw_emad_op_tlv_status *p_status)
0595 {
0596 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
0597 }
0598
0599 struct mlxsw_reg_trans {
0600 struct list_head list;
0601 struct list_head bulk_list;
0602 struct mlxsw_core *core;
0603 struct sk_buff *tx_skb;
0604 struct mlxsw_tx_info tx_info;
0605 struct delayed_work timeout_dw;
0606 unsigned int retries;
0607 u64 tid;
0608 struct completion completion;
0609 atomic_t active;
0610 mlxsw_reg_trans_cb_t *cb;
0611 unsigned long cb_priv;
0612 const struct mlxsw_reg_info *reg;
0613 enum mlxsw_core_reg_access_type type;
0614 int err;
0615 char *emad_err_string;
0616 enum mlxsw_emad_op_tlv_status emad_status;
0617 struct rcu_head rcu;
0618 };
0619
0620 static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
0621 struct mlxsw_reg_trans *trans)
0622 {
0623 char *string_tlv;
0624 char *string;
0625
0626 string_tlv = mlxsw_emad_string_tlv(skb);
0627 if (!string_tlv)
0628 return;
0629
0630 trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
0631 GFP_ATOMIC);
0632 if (!trans->emad_err_string)
0633 return;
0634
0635 string = mlxsw_emad_string_tlv_string_data(string_tlv);
0636 strlcpy(trans->emad_err_string, string,
0637 MLXSW_EMAD_STRING_TLV_STRING_LEN);
0638 }
0639
0640 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
0641 #define MLXSW_EMAD_TIMEOUT_MS 200
0642
0643 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
0644 {
0645 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
0646
0647 if (trans->core->fw_flash_in_progress)
0648 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
0649
0650 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
0651 timeout << trans->retries);
0652 }
0653
0654 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
0655 struct mlxsw_reg_trans *trans)
0656 {
0657 struct sk_buff *skb;
0658 int err;
0659
0660 skb = skb_clone(trans->tx_skb, GFP_KERNEL);
0661 if (!skb)
0662 return -ENOMEM;
0663
0664 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
0665 skb->data + mlxsw_core->driver->txhdr_len,
0666 skb->len - mlxsw_core->driver->txhdr_len);
0667
0668 atomic_set(&trans->active, 1);
0669 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
0670 if (err) {
0671 dev_kfree_skb(skb);
0672 return err;
0673 }
0674 mlxsw_emad_trans_timeout_schedule(trans);
0675 return 0;
0676 }
0677
0678 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
0679 {
0680 struct mlxsw_core *mlxsw_core = trans->core;
0681
0682 dev_kfree_skb(trans->tx_skb);
0683 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
0684 list_del_rcu(&trans->list);
0685 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
0686 trans->err = err;
0687 complete(&trans->completion);
0688 }
0689
0690 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
0691 struct mlxsw_reg_trans *trans)
0692 {
0693 int err;
0694
0695 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
0696 trans->retries++;
0697 err = mlxsw_emad_transmit(trans->core, trans);
0698 if (err == 0)
0699 return;
0700
0701 if (!atomic_dec_and_test(&trans->active))
0702 return;
0703 } else {
0704 err = -EIO;
0705 }
0706 mlxsw_emad_trans_finish(trans, err);
0707 }
0708
0709 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
0710 {
0711 struct mlxsw_reg_trans *trans = container_of(work,
0712 struct mlxsw_reg_trans,
0713 timeout_dw.work);
0714
0715 if (!atomic_dec_and_test(&trans->active))
0716 return;
0717
0718 mlxsw_emad_transmit_retry(trans->core, trans);
0719 }
0720
0721 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
0722 struct mlxsw_reg_trans *trans,
0723 struct sk_buff *skb)
0724 {
0725 int err;
0726
0727 if (!atomic_dec_and_test(&trans->active))
0728 return;
0729
0730 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
0731 if (err == -EAGAIN) {
0732 mlxsw_emad_transmit_retry(mlxsw_core, trans);
0733 } else {
0734 if (err == 0) {
0735 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
0736
0737 if (trans->cb)
0738 trans->cb(mlxsw_core,
0739 mlxsw_emad_reg_payload(reg_tlv),
0740 trans->reg->len, trans->cb_priv);
0741 } else {
0742 mlxsw_emad_process_string_tlv(skb, trans);
0743 }
0744 mlxsw_emad_trans_finish(trans, err);
0745 }
0746 }
0747
0748
0749 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
0750 void *priv)
0751 {
0752 struct mlxsw_core *mlxsw_core = priv;
0753 struct mlxsw_reg_trans *trans;
0754
0755 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
0756 skb->data, skb->len);
0757
0758 mlxsw_emad_tlv_parse(skb);
0759
0760 if (!mlxsw_emad_is_resp(skb))
0761 goto free_skb;
0762
0763 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
0764 if (mlxsw_emad_get_tid(skb) == trans->tid) {
0765 mlxsw_emad_process_response(mlxsw_core, trans, skb);
0766 break;
0767 }
0768 }
0769
0770 free_skb:
0771 dev_kfree_skb(skb);
0772 }
0773
0774 static const struct mlxsw_listener mlxsw_emad_rx_listener =
0775 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
0776 EMAD, DISCARD);
0777
0778 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
0779 {
0780 struct workqueue_struct *emad_wq;
0781 u64 tid;
0782 int err;
0783
0784 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
0785 return 0;
0786
0787 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
0788 if (!emad_wq)
0789 return -ENOMEM;
0790 mlxsw_core->emad_wq = emad_wq;
0791
0792
0793
0794
0795
0796 get_random_bytes(&tid, 4);
0797 tid <<= 32;
0798 atomic64_set(&mlxsw_core->emad.tid, tid);
0799
0800 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
0801 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
0802
0803 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
0804 mlxsw_core);
0805 if (err)
0806 goto err_trap_register;
0807
0808 mlxsw_core->emad.use_emad = true;
0809
0810 return 0;
0811
0812 err_trap_register:
0813 destroy_workqueue(mlxsw_core->emad_wq);
0814 return err;
0815 }
0816
0817 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
0818 {
0819
0820 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
0821 return;
0822
0823 mlxsw_core->emad.use_emad = false;
0824 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
0825 mlxsw_core);
0826 destroy_workqueue(mlxsw_core->emad_wq);
0827 }
0828
0829 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
0830 u16 reg_len, bool enable_string_tlv)
0831 {
0832 struct sk_buff *skb;
0833 u16 emad_len;
0834
0835 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
0836 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
0837 sizeof(u32) + mlxsw_core->driver->txhdr_len);
0838 if (enable_string_tlv)
0839 emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
0840 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
0841 return NULL;
0842
0843 skb = netdev_alloc_skb(NULL, emad_len);
0844 if (!skb)
0845 return NULL;
0846 memset(skb->data, 0, emad_len);
0847 skb_reserve(skb, emad_len);
0848
0849 return skb;
0850 }
0851
0852 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
0853 const struct mlxsw_reg_info *reg,
0854 char *payload,
0855 enum mlxsw_core_reg_access_type type,
0856 struct mlxsw_reg_trans *trans,
0857 struct list_head *bulk_list,
0858 mlxsw_reg_trans_cb_t *cb,
0859 unsigned long cb_priv, u64 tid)
0860 {
0861 bool enable_string_tlv;
0862 struct sk_buff *skb;
0863 int err;
0864
0865 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
0866 tid, reg->id, mlxsw_reg_id_str(reg->id),
0867 mlxsw_core_reg_access_type_str(type));
0868
0869
0870
0871
0872 enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
0873
0874 skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
0875 if (!skb)
0876 return -ENOMEM;
0877
0878 list_add_tail(&trans->bulk_list, bulk_list);
0879 trans->core = mlxsw_core;
0880 trans->tx_skb = skb;
0881 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
0882 trans->tx_info.is_emad = true;
0883 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
0884 trans->tid = tid;
0885 init_completion(&trans->completion);
0886 trans->cb = cb;
0887 trans->cb_priv = cb_priv;
0888 trans->reg = reg;
0889 trans->type = type;
0890
0891 mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
0892 enable_string_tlv);
0893 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
0894
0895 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
0896 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
0897 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
0898 err = mlxsw_emad_transmit(mlxsw_core, trans);
0899 if (err)
0900 goto err_out;
0901 return 0;
0902
0903 err_out:
0904 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
0905 list_del_rcu(&trans->list);
0906 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
0907 list_del(&trans->bulk_list);
0908 dev_kfree_skb(trans->tx_skb);
0909 return err;
0910 }
0911
0912
0913
0914
0915
0916 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
0917 {
0918 spin_lock(&mlxsw_core_driver_list_lock);
0919 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
0920 spin_unlock(&mlxsw_core_driver_list_lock);
0921 return 0;
0922 }
0923 EXPORT_SYMBOL(mlxsw_core_driver_register);
0924
0925 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
0926 {
0927 spin_lock(&mlxsw_core_driver_list_lock);
0928 list_del(&mlxsw_driver->list);
0929 spin_unlock(&mlxsw_core_driver_list_lock);
0930 }
0931 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
0932
0933 static struct mlxsw_driver *__driver_find(const char *kind)
0934 {
0935 struct mlxsw_driver *mlxsw_driver;
0936
0937 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
0938 if (strcmp(mlxsw_driver->kind, kind) == 0)
0939 return mlxsw_driver;
0940 }
0941 return NULL;
0942 }
0943
0944 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
0945 {
0946 struct mlxsw_driver *mlxsw_driver;
0947
0948 spin_lock(&mlxsw_core_driver_list_lock);
0949 mlxsw_driver = __driver_find(kind);
0950 spin_unlock(&mlxsw_core_driver_list_lock);
0951 return mlxsw_driver;
0952 }
0953
0954 int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
0955 struct mlxfw_dev *mlxfw_dev,
0956 const struct firmware *firmware,
0957 struct netlink_ext_ack *extack)
0958 {
0959 int err;
0960
0961 mlxsw_core->fw_flash_in_progress = true;
0962 err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack);
0963 mlxsw_core->fw_flash_in_progress = false;
0964
0965 return err;
0966 }
0967
0968 struct mlxsw_core_fw_info {
0969 struct mlxfw_dev mlxfw_dev;
0970 struct mlxsw_core *mlxsw_core;
0971 };
0972
0973 static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
0974 u16 component_index, u32 *p_max_size,
0975 u8 *p_align_bits, u16 *p_max_write_size)
0976 {
0977 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
0978 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
0979 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
0980 char mcqi_pl[MLXSW_REG_MCQI_LEN];
0981 int err;
0982
0983 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
0984 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
0985 if (err)
0986 return err;
0987 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
0988
0989 *p_align_bits = max_t(u8, *p_align_bits, 2);
0990 *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
0991 return 0;
0992 }
0993
0994 static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
0995 {
0996 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
0997 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
0998 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
0999 char mcc_pl[MLXSW_REG_MCC_LEN];
1000 u8 control_state;
1001 int err;
1002
1003 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
1004 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1005 if (err)
1006 return err;
1007
1008 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
1009 if (control_state != MLXFW_FSM_STATE_IDLE)
1010 return -EBUSY;
1011
1012 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
1013 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1014 }
1015
1016 static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1017 u16 component_index, u32 component_size)
1018 {
1019 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1020 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1021 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1022 char mcc_pl[MLXSW_REG_MCC_LEN];
1023
1024 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
1025 component_index, fwhandle, component_size);
1026 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1027 }
1028
1029 static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1030 u8 *data, u16 size, u32 offset)
1031 {
1032 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1033 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1034 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1035 char mcda_pl[MLXSW_REG_MCDA_LEN];
1036
1037 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
1038 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
1039 }
1040
1041 static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1042 u16 component_index)
1043 {
1044 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1045 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1046 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1047 char mcc_pl[MLXSW_REG_MCC_LEN];
1048
1049 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
1050 component_index, fwhandle, 0);
1051 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1052 }
1053
1054 static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1055 {
1056 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1057 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1058 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1059 char mcc_pl[MLXSW_REG_MCC_LEN];
1060
1061 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
1062 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1063 }
1064
1065 static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1066 enum mlxfw_fsm_state *fsm_state,
1067 enum mlxfw_fsm_state_err *fsm_state_err)
1068 {
1069 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1070 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1071 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1072 char mcc_pl[MLXSW_REG_MCC_LEN];
1073 u8 control_state;
1074 u8 error_code;
1075 int err;
1076
1077 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
1078 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1079 if (err)
1080 return err;
1081
1082 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
1083 *fsm_state = control_state;
1084 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
1085 return 0;
1086 }
1087
1088 static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1089 {
1090 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1091 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1092 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1093 char mcc_pl[MLXSW_REG_MCC_LEN];
1094
1095 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
1096 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1097 }
1098
1099 static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1100 {
1101 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1102 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1103 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1104 char mcc_pl[MLXSW_REG_MCC_LEN];
1105
1106 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
1107 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
1108 }
1109
1110 static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
1111 .component_query = mlxsw_core_fw_component_query,
1112 .fsm_lock = mlxsw_core_fw_fsm_lock,
1113 .fsm_component_update = mlxsw_core_fw_fsm_component_update,
1114 .fsm_block_download = mlxsw_core_fw_fsm_block_download,
1115 .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
1116 .fsm_activate = mlxsw_core_fw_fsm_activate,
1117 .fsm_query_state = mlxsw_core_fw_fsm_query_state,
1118 .fsm_cancel = mlxsw_core_fw_fsm_cancel,
1119 .fsm_release = mlxsw_core_fw_fsm_release,
1120 };
1121
1122 static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core,
1123 const struct firmware *firmware,
1124 struct netlink_ext_ack *extack)
1125 {
1126 struct mlxsw_core_fw_info mlxsw_core_fw_info = {
1127 .mlxfw_dev = {
1128 .ops = &mlxsw_core_fw_mlxsw_dev_ops,
1129 .psid = mlxsw_core->bus_info->psid,
1130 .psid_size = strlen(mlxsw_core->bus_info->psid),
1131 .devlink = priv_to_devlink(mlxsw_core),
1132 },
1133 .mlxsw_core = mlxsw_core
1134 };
1135
1136 return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev,
1137 firmware, extack);
1138 }
1139
1140 static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
1141 const struct mlxsw_bus_info *mlxsw_bus_info,
1142 const struct mlxsw_fw_rev *req_rev,
1143 const char *filename)
1144 {
1145 const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
1146 union devlink_param_value value;
1147 const struct firmware *firmware;
1148 int err;
1149
1150
1151 if (!req_rev || !filename)
1152 return 0;
1153
1154
1155 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
1156 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
1157 &value);
1158 if (err)
1159 return err;
1160 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
1161 return 0;
1162
1163
1164 if (rev->major != req_rev->major) {
1165 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
1166 rev->major, req_rev->major);
1167 return -EINVAL;
1168 }
1169 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
1170 return 0;
1171
1172 dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
1173 rev->major, rev->minor, rev->subminor, req_rev->major,
1174 req_rev->minor, req_rev->subminor);
1175 dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
1176
1177 err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
1178 if (err) {
1179 dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
1180 return err;
1181 }
1182
1183 err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL);
1184 release_firmware(firmware);
1185 if (err)
1186 dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
1187
1188
1189
1190
1191 if (rev->minor >= req_rev->can_reset_minor)
1192 return err ? err : -EAGAIN;
1193 else
1194 return 0;
1195 }
1196
1197 static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
1198 struct devlink_flash_update_params *params,
1199 struct netlink_ext_ack *extack)
1200 {
1201 return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack);
1202 }
1203
1204 static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
1205 union devlink_param_value val,
1206 struct netlink_ext_ack *extack)
1207 {
1208 if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
1209 val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
1210 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
1211 return -EINVAL;
1212 }
1213
1214 return 0;
1215 }
1216
1217 static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
1218 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
1219 mlxsw_core_devlink_param_fw_load_policy_validate),
1220 };
1221
1222 static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
1223 {
1224 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1225 union devlink_param_value value;
1226 int err;
1227
1228 err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
1229 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1230 if (err)
1231 return err;
1232
1233 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
1234 devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
1235 return 0;
1236 }
1237
1238 static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
1239 {
1240 devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
1241 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1242 }
1243
1244 static void *__dl_port(struct devlink_port *devlink_port)
1245 {
1246 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
1247 }
1248
1249 static int mlxsw_devlink_port_split(struct devlink *devlink,
1250 struct devlink_port *port,
1251 unsigned int count,
1252 struct netlink_ext_ack *extack)
1253 {
1254 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
1255 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1256
1257 if (!mlxsw_core->driver->port_split)
1258 return -EOPNOTSUPP;
1259 return mlxsw_core->driver->port_split(mlxsw_core,
1260 mlxsw_core_port->local_port,
1261 count, extack);
1262 }
1263
1264 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
1265 struct devlink_port *port,
1266 struct netlink_ext_ack *extack)
1267 {
1268 struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
1269 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1270
1271 if (!mlxsw_core->driver->port_unsplit)
1272 return -EOPNOTSUPP;
1273 return mlxsw_core->driver->port_unsplit(mlxsw_core,
1274 mlxsw_core_port->local_port,
1275 extack);
1276 }
1277
1278 static int
1279 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
1280 unsigned int sb_index, u16 pool_index,
1281 struct devlink_sb_pool_info *pool_info)
1282 {
1283 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1284 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1285
1286 if (!mlxsw_driver->sb_pool_get)
1287 return -EOPNOTSUPP;
1288 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
1289 pool_index, pool_info);
1290 }
1291
1292 static int
1293 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
1294 unsigned int sb_index, u16 pool_index, u32 size,
1295 enum devlink_sb_threshold_type threshold_type,
1296 struct netlink_ext_ack *extack)
1297 {
1298 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1299 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1300
1301 if (!mlxsw_driver->sb_pool_set)
1302 return -EOPNOTSUPP;
1303 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
1304 pool_index, size, threshold_type,
1305 extack);
1306 }
1307
1308 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
1309 enum devlink_port_type port_type)
1310 {
1311 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1312 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1313 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1314
1315 if (!mlxsw_driver->port_type_set)
1316 return -EOPNOTSUPP;
1317
1318 return mlxsw_driver->port_type_set(mlxsw_core,
1319 mlxsw_core_port->local_port,
1320 port_type);
1321 }
1322
1323 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
1324 unsigned int sb_index, u16 pool_index,
1325 u32 *p_threshold)
1326 {
1327 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1328 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1329 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1330
1331 if (!mlxsw_driver->sb_port_pool_get ||
1332 !mlxsw_core_port_check(mlxsw_core_port))
1333 return -EOPNOTSUPP;
1334 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
1335 pool_index, p_threshold);
1336 }
1337
1338 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
1339 unsigned int sb_index, u16 pool_index,
1340 u32 threshold,
1341 struct netlink_ext_ack *extack)
1342 {
1343 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1344 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1345 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1346
1347 if (!mlxsw_driver->sb_port_pool_set ||
1348 !mlxsw_core_port_check(mlxsw_core_port))
1349 return -EOPNOTSUPP;
1350 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
1351 pool_index, threshold, extack);
1352 }
1353
1354 static int
1355 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
1356 unsigned int sb_index, u16 tc_index,
1357 enum devlink_sb_pool_type pool_type,
1358 u16 *p_pool_index, u32 *p_threshold)
1359 {
1360 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1361 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1362 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1363
1364 if (!mlxsw_driver->sb_tc_pool_bind_get ||
1365 !mlxsw_core_port_check(mlxsw_core_port))
1366 return -EOPNOTSUPP;
1367 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
1368 tc_index, pool_type,
1369 p_pool_index, p_threshold);
1370 }
1371
1372 static int
1373 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
1374 unsigned int sb_index, u16 tc_index,
1375 enum devlink_sb_pool_type pool_type,
1376 u16 pool_index, u32 threshold,
1377 struct netlink_ext_ack *extack)
1378 {
1379 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1380 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1381 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1382
1383 if (!mlxsw_driver->sb_tc_pool_bind_set ||
1384 !mlxsw_core_port_check(mlxsw_core_port))
1385 return -EOPNOTSUPP;
1386 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
1387 tc_index, pool_type,
1388 pool_index, threshold, extack);
1389 }
1390
1391 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
1392 unsigned int sb_index)
1393 {
1394 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1395 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1396
1397 if (!mlxsw_driver->sb_occ_snapshot)
1398 return -EOPNOTSUPP;
1399 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1400 }
1401
1402 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1403 unsigned int sb_index)
1404 {
1405 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1406 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1407
1408 if (!mlxsw_driver->sb_occ_max_clear)
1409 return -EOPNOTSUPP;
1410 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1411 }
1412
1413 static int
1414 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1415 unsigned int sb_index, u16 pool_index,
1416 u32 *p_cur, u32 *p_max)
1417 {
1418 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1419 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1420 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1421
1422 if (!mlxsw_driver->sb_occ_port_pool_get ||
1423 !mlxsw_core_port_check(mlxsw_core_port))
1424 return -EOPNOTSUPP;
1425 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1426 pool_index, p_cur, p_max);
1427 }
1428
1429 static int
1430 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1431 unsigned int sb_index, u16 tc_index,
1432 enum devlink_sb_pool_type pool_type,
1433 u32 *p_cur, u32 *p_max)
1434 {
1435 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1436 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1437 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1438
1439 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
1440 !mlxsw_core_port_check(mlxsw_core_port))
1441 return -EOPNOTSUPP;
1442 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1443 sb_index, tc_index,
1444 pool_type, p_cur, p_max);
1445 }
1446
1447 static int
1448 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
1449 struct netlink_ext_ack *extack)
1450 {
1451 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1452 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
1453 u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
1454 char mgir_pl[MLXSW_REG_MGIR_LEN];
1455 char buf[32];
1456 int err;
1457
1458 err = devlink_info_driver_name_put(req,
1459 mlxsw_core->bus_info->device_kind);
1460 if (err)
1461 return err;
1462
1463 mlxsw_reg_mgir_pack(mgir_pl);
1464 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
1465 if (err)
1466 return err;
1467 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
1468 &fw_minor, &fw_sub_minor);
1469
1470 sprintf(buf, "%X", hw_rev);
1471 err = devlink_info_version_fixed_put(req, "hw.revision", buf);
1472 if (err)
1473 return err;
1474
1475 err = devlink_info_version_fixed_put(req,
1476 DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
1477 fw_info_psid);
1478 if (err)
1479 return err;
1480
1481 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
1482 err = devlink_info_version_running_put(req, "fw.version", buf);
1483 if (err)
1484 return err;
1485
1486 return devlink_info_version_running_put(req,
1487 DEVLINK_INFO_VERSION_GENERIC_FW,
1488 buf);
1489 }
1490
1491 static int
1492 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
1493 bool netns_change, enum devlink_reload_action action,
1494 enum devlink_reload_limit limit,
1495 struct netlink_ext_ack *extack)
1496 {
1497 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1498
1499 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
1500 return -EOPNOTSUPP;
1501
1502 mlxsw_core_bus_device_unregister(mlxsw_core, true);
1503 return 0;
1504 }
1505
1506 static int
1507 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
1508 enum devlink_reload_limit limit, u32 *actions_performed,
1509 struct netlink_ext_ack *extack)
1510 {
1511 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1512 int err;
1513
1514 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1515 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
1516 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
1517 mlxsw_core->bus,
1518 mlxsw_core->bus_priv, true,
1519 devlink, extack);
1520 return err;
1521 }
1522
1523 static int mlxsw_devlink_flash_update(struct devlink *devlink,
1524 struct devlink_flash_update_params *params,
1525 struct netlink_ext_ack *extack)
1526 {
1527 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1528
1529 return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
1530 }
1531
1532 static int mlxsw_devlink_trap_init(struct devlink *devlink,
1533 const struct devlink_trap *trap,
1534 void *trap_ctx)
1535 {
1536 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1537 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1538
1539 if (!mlxsw_driver->trap_init)
1540 return -EOPNOTSUPP;
1541 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
1542 }
1543
1544 static void mlxsw_devlink_trap_fini(struct devlink *devlink,
1545 const struct devlink_trap *trap,
1546 void *trap_ctx)
1547 {
1548 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1549 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1550
1551 if (!mlxsw_driver->trap_fini)
1552 return;
1553 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
1554 }
1555
1556 static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
1557 const struct devlink_trap *trap,
1558 enum devlink_trap_action action,
1559 struct netlink_ext_ack *extack)
1560 {
1561 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1562 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1563
1564 if (!mlxsw_driver->trap_action_set)
1565 return -EOPNOTSUPP;
1566 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
1567 }
1568
1569 static int
1570 mlxsw_devlink_trap_group_init(struct devlink *devlink,
1571 const struct devlink_trap_group *group)
1572 {
1573 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1574 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1575
1576 if (!mlxsw_driver->trap_group_init)
1577 return -EOPNOTSUPP;
1578 return mlxsw_driver->trap_group_init(mlxsw_core, group);
1579 }
1580
1581 static int
1582 mlxsw_devlink_trap_group_set(struct devlink *devlink,
1583 const struct devlink_trap_group *group,
1584 const struct devlink_trap_policer *policer,
1585 struct netlink_ext_ack *extack)
1586 {
1587 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1588 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1589
1590 if (!mlxsw_driver->trap_group_set)
1591 return -EOPNOTSUPP;
1592 return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
1593 }
1594
1595 static int
1596 mlxsw_devlink_trap_policer_init(struct devlink *devlink,
1597 const struct devlink_trap_policer *policer)
1598 {
1599 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1600 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1601
1602 if (!mlxsw_driver->trap_policer_init)
1603 return -EOPNOTSUPP;
1604 return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
1605 }
1606
1607 static void
1608 mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
1609 const struct devlink_trap_policer *policer)
1610 {
1611 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1612 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1613
1614 if (!mlxsw_driver->trap_policer_fini)
1615 return;
1616 mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
1617 }
1618
1619 static int
1620 mlxsw_devlink_trap_policer_set(struct devlink *devlink,
1621 const struct devlink_trap_policer *policer,
1622 u64 rate, u64 burst,
1623 struct netlink_ext_ack *extack)
1624 {
1625 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1626 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1627
1628 if (!mlxsw_driver->trap_policer_set)
1629 return -EOPNOTSUPP;
1630 return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
1631 extack);
1632 }
1633
1634 static int
1635 mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
1636 const struct devlink_trap_policer *policer,
1637 u64 *p_drops)
1638 {
1639 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1640 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1641
1642 if (!mlxsw_driver->trap_policer_counter_get)
1643 return -EOPNOTSUPP;
1644 return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
1645 p_drops);
1646 }
1647
1648 static const struct devlink_ops mlxsw_devlink_ops = {
1649 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1650 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
1651 .reload_down = mlxsw_devlink_core_bus_device_reload_down,
1652 .reload_up = mlxsw_devlink_core_bus_device_reload_up,
1653 .port_type_set = mlxsw_devlink_port_type_set,
1654 .port_split = mlxsw_devlink_port_split,
1655 .port_unsplit = mlxsw_devlink_port_unsplit,
1656 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1657 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1658 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1659 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1660 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1661 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1662 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1663 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1664 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1665 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1666 .info_get = mlxsw_devlink_info_get,
1667 .flash_update = mlxsw_devlink_flash_update,
1668 .trap_init = mlxsw_devlink_trap_init,
1669 .trap_fini = mlxsw_devlink_trap_fini,
1670 .trap_action_set = mlxsw_devlink_trap_action_set,
1671 .trap_group_init = mlxsw_devlink_trap_group_init,
1672 .trap_group_set = mlxsw_devlink_trap_group_set,
1673 .trap_policer_init = mlxsw_devlink_trap_policer_init,
1674 .trap_policer_fini = mlxsw_devlink_trap_policer_fini,
1675 .trap_policer_set = mlxsw_devlink_trap_policer_set,
1676 .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
1677 };
1678
1679 static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
1680 {
1681 int err;
1682
1683 err = mlxsw_core_fw_params_register(mlxsw_core);
1684 if (err)
1685 return err;
1686
1687 if (mlxsw_core->driver->params_register) {
1688 err = mlxsw_core->driver->params_register(mlxsw_core);
1689 if (err)
1690 goto err_params_register;
1691 }
1692 return 0;
1693
1694 err_params_register:
1695 mlxsw_core_fw_params_unregister(mlxsw_core);
1696 return err;
1697 }
1698
1699 static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
1700 {
1701 mlxsw_core_fw_params_unregister(mlxsw_core);
1702 if (mlxsw_core->driver->params_register)
1703 mlxsw_core->driver->params_unregister(mlxsw_core);
1704 }
1705
1706 struct mlxsw_core_health_event {
1707 struct mlxsw_core *mlxsw_core;
1708 char mfde_pl[MLXSW_REG_MFDE_LEN];
1709 struct work_struct work;
1710 };
1711
1712 static void mlxsw_core_health_event_work(struct work_struct *work)
1713 {
1714 struct mlxsw_core_health_event *event;
1715 struct mlxsw_core *mlxsw_core;
1716
1717 event = container_of(work, struct mlxsw_core_health_event, work);
1718 mlxsw_core = event->mlxsw_core;
1719 devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
1720 event->mfde_pl);
1721 kfree(event);
1722 }
1723
1724 static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
1725 char *mfde_pl, void *priv)
1726 {
1727 struct mlxsw_core_health_event *event;
1728 struct mlxsw_core *mlxsw_core = priv;
1729
1730 event = kmalloc(sizeof(*event), GFP_ATOMIC);
1731 if (!event)
1732 return;
1733 event->mlxsw_core = mlxsw_core;
1734 memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
1735 INIT_WORK(&event->work, mlxsw_core_health_event_work);
1736 mlxsw_core_schedule_work(&event->work);
1737 }
1738
1739 static const struct mlxsw_listener mlxsw_core_health_listener =
1740 MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
1741
1742 static int
1743 mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
1744 struct devlink_fmsg *fmsg)
1745 {
1746 u32 val, tile_v;
1747 int err;
1748
1749 val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
1750 err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
1751 if (err)
1752 return err;
1753 tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
1754 if (tile_v) {
1755 val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
1756 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
1757 if (err)
1758 return err;
1759 }
1760
1761 return 0;
1762 }
1763
1764 static int
1765 mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
1766 struct devlink_fmsg *fmsg)
1767 {
1768 u32 val, tile_v;
1769 int err;
1770
1771 val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
1772 err = devlink_fmsg_u32_pair_put(fmsg, "var0", val);
1773 if (err)
1774 return err;
1775 val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
1776 err = devlink_fmsg_u32_pair_put(fmsg, "var1", val);
1777 if (err)
1778 return err;
1779 val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
1780 err = devlink_fmsg_u32_pair_put(fmsg, "var2", val);
1781 if (err)
1782 return err;
1783 val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
1784 err = devlink_fmsg_u32_pair_put(fmsg, "var3", val);
1785 if (err)
1786 return err;
1787 val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
1788 err = devlink_fmsg_u32_pair_put(fmsg, "var4", val);
1789 if (err)
1790 return err;
1791 val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
1792 err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
1793 if (err)
1794 return err;
1795 val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
1796 err = devlink_fmsg_u32_pair_put(fmsg, "callra", val);
1797 if (err)
1798 return err;
1799 val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
1800 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
1801 if (err)
1802 return err;
1803 tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
1804 if (tile_v) {
1805 val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
1806 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
1807 if (err)
1808 return err;
1809 }
1810 val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
1811 err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
1812 if (err)
1813 return err;
1814
1815 return 0;
1816 }
1817
1818 static int
1819 mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
1820 struct devlink_fmsg *fmsg)
1821 {
1822 u32 val;
1823 int err;
1824
1825 val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
1826 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
1827 if (err)
1828 return err;
1829 val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
1830 return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
1831 }
1832
1833 static int
1834 mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
1835 struct devlink_fmsg *fmsg)
1836 {
1837 u32 val;
1838 int err;
1839
1840 val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
1841 err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
1842 if (err)
1843 return err;
1844 val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
1845 err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
1846 if (err)
1847 return err;
1848 val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
1849 err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
1850 if (err)
1851 return err;
1852 val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
1853 err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
1854 if (err)
1855 return err;
1856
1857 return 0;
1858 }
1859
1860 static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
1861 struct devlink_fmsg *fmsg, void *priv_ctx,
1862 struct netlink_ext_ack *extack)
1863 {
1864 char *mfde_pl = priv_ctx;
1865 char *val_str;
1866 u8 event_id;
1867 u32 val;
1868 int err;
1869
1870 if (!priv_ctx)
1871
1872 return -EOPNOTSUPP;
1873
1874 val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
1875 err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
1876 if (err)
1877 return err;
1878 err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
1879 if (err)
1880 return err;
1881
1882 event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
1883 err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
1884 if (err)
1885 return err;
1886 switch (event_id) {
1887 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
1888 val_str = "CR space timeout";
1889 break;
1890 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
1891 val_str = "KVD insertion machine stopped";
1892 break;
1893 case MLXSW_REG_MFDE_EVENT_ID_TEST:
1894 val_str = "Test";
1895 break;
1896 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
1897 val_str = "FW assert";
1898 break;
1899 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
1900 val_str = "Fatal cause";
1901 break;
1902 default:
1903 val_str = NULL;
1904 }
1905 if (val_str) {
1906 err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
1907 if (err)
1908 return err;
1909 }
1910
1911 err = devlink_fmsg_arr_pair_nest_end(fmsg);
1912 if (err)
1913 return err;
1914
1915 err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
1916 if (err)
1917 return err;
1918
1919 val = mlxsw_reg_mfde_severity_get(mfde_pl);
1920 err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
1921 if (err)
1922 return err;
1923 switch (val) {
1924 case MLXSW_REG_MFDE_SEVERITY_FATL:
1925 val_str = "Fatal";
1926 break;
1927 case MLXSW_REG_MFDE_SEVERITY_NRML:
1928 val_str = "Normal";
1929 break;
1930 case MLXSW_REG_MFDE_SEVERITY_INTR:
1931 val_str = "Debug";
1932 break;
1933 default:
1934 val_str = NULL;
1935 }
1936 if (val_str) {
1937 err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
1938 if (err)
1939 return err;
1940 }
1941
1942 err = devlink_fmsg_arr_pair_nest_end(fmsg);
1943 if (err)
1944 return err;
1945
1946 val = mlxsw_reg_mfde_method_get(mfde_pl);
1947 switch (val) {
1948 case MLXSW_REG_MFDE_METHOD_QUERY:
1949 val_str = "query";
1950 break;
1951 case MLXSW_REG_MFDE_METHOD_WRITE:
1952 val_str = "write";
1953 break;
1954 default:
1955 val_str = NULL;
1956 }
1957 if (val_str) {
1958 err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
1959 if (err)
1960 return err;
1961 }
1962
1963 val = mlxsw_reg_mfde_long_process_get(mfde_pl);
1964 err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
1965 if (err)
1966 return err;
1967
1968 val = mlxsw_reg_mfde_command_type_get(mfde_pl);
1969 switch (val) {
1970 case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
1971 val_str = "mad";
1972 break;
1973 case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
1974 val_str = "emad";
1975 break;
1976 case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
1977 val_str = "cmdif";
1978 break;
1979 default:
1980 val_str = NULL;
1981 }
1982 if (val_str) {
1983 err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
1984 if (err)
1985 return err;
1986 }
1987
1988 val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
1989 err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
1990 if (err)
1991 return err;
1992
1993 switch (event_id) {
1994 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
1995 return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
1996 fmsg);
1997 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
1998 return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
1999 fmsg);
2000 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
2001 return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
2002 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
2003 return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
2004 fmsg);
2005 }
2006
2007 return 0;
2008 }
2009
2010 static int
2011 mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
2012 struct netlink_ext_ack *extack)
2013 {
2014 struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
2015 char mfgd_pl[MLXSW_REG_MFGD_LEN];
2016 int err;
2017
2018
2019 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2020 if (err)
2021 return err;
2022 mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
2023 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2024 }
2025
2026 static const struct devlink_health_reporter_ops
2027 mlxsw_core_health_fw_fatal_ops = {
2028 .name = "fw_fatal",
2029 .dump = mlxsw_core_health_fw_fatal_dump,
2030 .test = mlxsw_core_health_fw_fatal_test,
2031 };
2032
2033 static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
2034 bool enable)
2035 {
2036 char mfgd_pl[MLXSW_REG_MFGD_LEN];
2037 int err;
2038
2039
2040 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2041 if (err)
2042 return err;
2043 mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
2044 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
2045 }
2046
2047 static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
2048 {
2049 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2050 struct devlink_health_reporter *fw_fatal;
2051 int err;
2052
2053 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2054 return 0;
2055
2056 fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
2057 0, mlxsw_core);
2058 if (IS_ERR(fw_fatal)) {
2059 dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
2060 return PTR_ERR(fw_fatal);
2061 }
2062 mlxsw_core->health.fw_fatal = fw_fatal;
2063
2064 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
2065 if (err)
2066 goto err_trap_register;
2067
2068 err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
2069 if (err)
2070 goto err_fw_fatal_config;
2071
2072 return 0;
2073
2074 err_fw_fatal_config:
2075 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
2076 err_trap_register:
2077 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
2078 return err;
2079 }
2080
2081 static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
2082 {
2083 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2084 return;
2085
2086 mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
2087 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
2088
2089 mlxsw_core_flush_owq();
2090 devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
2091 }
2092
2093 static int
2094 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2095 const struct mlxsw_bus *mlxsw_bus,
2096 void *bus_priv, bool reload,
2097 struct devlink *devlink,
2098 struct netlink_ext_ack *extack)
2099 {
2100 const char *device_kind = mlxsw_bus_info->device_kind;
2101 struct mlxsw_core *mlxsw_core;
2102 struct mlxsw_driver *mlxsw_driver;
2103 size_t alloc_size;
2104 int err;
2105
2106 mlxsw_driver = mlxsw_core_driver_get(device_kind);
2107 if (!mlxsw_driver)
2108 return -EINVAL;
2109
2110 if (!reload) {
2111 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
2112 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size,
2113 mlxsw_bus_info->dev);
2114 if (!devlink) {
2115 err = -ENOMEM;
2116 goto err_devlink_alloc;
2117 }
2118 devl_lock(devlink);
2119 }
2120
2121 mlxsw_core = devlink_priv(devlink);
2122 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
2123 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
2124 mlxsw_core->driver = mlxsw_driver;
2125 mlxsw_core->bus = mlxsw_bus;
2126 mlxsw_core->bus_priv = bus_priv;
2127 mlxsw_core->bus_info = mlxsw_bus_info;
2128
2129 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
2130 &mlxsw_core->res);
2131 if (err)
2132 goto err_bus_init;
2133
2134 if (mlxsw_driver->resources_register && !reload) {
2135 err = mlxsw_driver->resources_register(mlxsw_core);
2136 if (err)
2137 goto err_register_resources;
2138 }
2139
2140 err = mlxsw_ports_init(mlxsw_core, reload);
2141 if (err)
2142 goto err_ports_init;
2143
2144 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
2145 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
2146 alloc_size = sizeof(*mlxsw_core->lag.mapping) *
2147 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
2148 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
2149 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
2150 if (!mlxsw_core->lag.mapping) {
2151 err = -ENOMEM;
2152 goto err_alloc_lag_mapping;
2153 }
2154 }
2155
2156 err = mlxsw_core_trap_groups_set(mlxsw_core);
2157 if (err)
2158 goto err_trap_groups_set;
2159
2160 err = mlxsw_emad_init(mlxsw_core);
2161 if (err)
2162 goto err_emad_init;
2163
2164 if (!reload) {
2165 err = mlxsw_core_params_register(mlxsw_core);
2166 if (err)
2167 goto err_register_params;
2168 }
2169
2170 err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
2171 mlxsw_driver->fw_filename);
2172 if (err)
2173 goto err_fw_rev_validate;
2174
2175 err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info);
2176 if (err)
2177 goto err_linecards_init;
2178
2179 err = mlxsw_core_health_init(mlxsw_core);
2180 if (err)
2181 goto err_health_init;
2182
2183 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
2184 if (err)
2185 goto err_hwmon_init;
2186
2187 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
2188 &mlxsw_core->thermal);
2189 if (err)
2190 goto err_thermal_init;
2191
2192 err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env);
2193 if (err)
2194 goto err_env_init;
2195
2196 if (mlxsw_driver->init) {
2197 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
2198 if (err)
2199 goto err_driver_init;
2200 }
2201
2202 if (!reload) {
2203 devlink_set_features(devlink, DEVLINK_F_RELOAD);
2204 devl_unlock(devlink);
2205 devlink_register(devlink);
2206 }
2207 return 0;
2208
2209 err_driver_init:
2210 mlxsw_env_fini(mlxsw_core->env);
2211 err_env_init:
2212 mlxsw_thermal_fini(mlxsw_core->thermal);
2213 err_thermal_init:
2214 mlxsw_hwmon_fini(mlxsw_core->hwmon);
2215 err_hwmon_init:
2216 mlxsw_core_health_fini(mlxsw_core);
2217 err_health_init:
2218 mlxsw_linecards_fini(mlxsw_core);
2219 err_linecards_init:
2220 err_fw_rev_validate:
2221 if (!reload)
2222 mlxsw_core_params_unregister(mlxsw_core);
2223 err_register_params:
2224 mlxsw_emad_fini(mlxsw_core);
2225 err_emad_init:
2226 err_trap_groups_set:
2227 kfree(mlxsw_core->lag.mapping);
2228 err_alloc_lag_mapping:
2229 mlxsw_ports_fini(mlxsw_core, reload);
2230 err_ports_init:
2231 if (!reload)
2232 devl_resources_unregister(devlink);
2233 err_register_resources:
2234 mlxsw_bus->fini(bus_priv);
2235 err_bus_init:
2236 if (!reload) {
2237 devl_unlock(devlink);
2238 devlink_free(devlink);
2239 }
2240 err_devlink_alloc:
2241 return err;
2242 }
2243
2244 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2245 const struct mlxsw_bus *mlxsw_bus,
2246 void *bus_priv, bool reload,
2247 struct devlink *devlink,
2248 struct netlink_ext_ack *extack)
2249 {
2250 bool called_again = false;
2251 int err;
2252
2253 again:
2254 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
2255 bus_priv, reload,
2256 devlink, extack);
2257
2258
2259
2260
2261 if (err == -EAGAIN && !called_again) {
2262 called_again = true;
2263 goto again;
2264 }
2265
2266 return err;
2267 }
2268 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
2269
2270 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
2271 bool reload)
2272 {
2273 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2274
2275 if (!reload) {
2276 devlink_unregister(devlink);
2277 devl_lock(devlink);
2278 }
2279
2280 if (devlink_is_reload_failed(devlink)) {
2281 if (!reload)
2282
2283
2284
2285 goto reload_fail_deinit;
2286 else
2287 return;
2288 }
2289
2290 if (mlxsw_core->driver->fini)
2291 mlxsw_core->driver->fini(mlxsw_core);
2292 mlxsw_env_fini(mlxsw_core->env);
2293 mlxsw_thermal_fini(mlxsw_core->thermal);
2294 mlxsw_hwmon_fini(mlxsw_core->hwmon);
2295 mlxsw_core_health_fini(mlxsw_core);
2296 mlxsw_linecards_fini(mlxsw_core);
2297 if (!reload)
2298 mlxsw_core_params_unregister(mlxsw_core);
2299 mlxsw_emad_fini(mlxsw_core);
2300 kfree(mlxsw_core->lag.mapping);
2301 mlxsw_ports_fini(mlxsw_core, reload);
2302 if (!reload)
2303 devl_resources_unregister(devlink);
2304 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
2305 if (!reload) {
2306 devl_unlock(devlink);
2307 devlink_free(devlink);
2308 }
2309
2310 return;
2311
2312 reload_fail_deinit:
2313 mlxsw_core_params_unregister(mlxsw_core);
2314 devl_resources_unregister(devlink);
2315 devl_unlock(devlink);
2316 devlink_free(devlink);
2317 }
2318 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
2319
2320 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
2321 const struct mlxsw_tx_info *tx_info)
2322 {
2323 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
2324 tx_info);
2325 }
2326 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
2327
2328 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2329 const struct mlxsw_tx_info *tx_info)
2330 {
2331 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
2332 tx_info);
2333 }
2334 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
2335
2336 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
2337 struct sk_buff *skb, u16 local_port)
2338 {
2339 if (mlxsw_core->driver->ptp_transmitted)
2340 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
2341 local_port);
2342 }
2343 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
2344
2345 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
2346 const struct mlxsw_rx_listener *rxl_b)
2347 {
2348 return (rxl_a->func == rxl_b->func &&
2349 rxl_a->local_port == rxl_b->local_port &&
2350 rxl_a->trap_id == rxl_b->trap_id &&
2351 rxl_a->mirror_reason == rxl_b->mirror_reason);
2352 }
2353
2354 static struct mlxsw_rx_listener_item *
2355 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
2356 const struct mlxsw_rx_listener *rxl)
2357 {
2358 struct mlxsw_rx_listener_item *rxl_item;
2359
2360 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
2361 if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
2362 return rxl_item;
2363 }
2364 return NULL;
2365 }
2366
2367 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
2368 const struct mlxsw_rx_listener *rxl,
2369 void *priv, bool enabled)
2370 {
2371 struct mlxsw_rx_listener_item *rxl_item;
2372
2373 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2374 if (rxl_item)
2375 return -EEXIST;
2376 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
2377 if (!rxl_item)
2378 return -ENOMEM;
2379 rxl_item->rxl = *rxl;
2380 rxl_item->priv = priv;
2381 rxl_item->enabled = enabled;
2382
2383 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
2384 return 0;
2385 }
2386 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
2387
2388 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
2389 const struct mlxsw_rx_listener *rxl)
2390 {
2391 struct mlxsw_rx_listener_item *rxl_item;
2392
2393 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2394 if (!rxl_item)
2395 return;
2396 list_del_rcu(&rxl_item->list);
2397 synchronize_rcu();
2398 kfree(rxl_item);
2399 }
2400 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
2401
2402 static void
2403 mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
2404 const struct mlxsw_rx_listener *rxl,
2405 bool enabled)
2406 {
2407 struct mlxsw_rx_listener_item *rxl_item;
2408
2409 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2410 if (WARN_ON(!rxl_item))
2411 return;
2412 rxl_item->enabled = enabled;
2413 }
2414
2415 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port,
2416 void *priv)
2417 {
2418 struct mlxsw_event_listener_item *event_listener_item = priv;
2419 struct mlxsw_core *mlxsw_core;
2420 struct mlxsw_reg_info reg;
2421 char *payload;
2422 char *reg_tlv;
2423 char *op_tlv;
2424
2425 mlxsw_core = event_listener_item->mlxsw_core;
2426 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
2427 skb->data, skb->len);
2428
2429 mlxsw_emad_tlv_parse(skb);
2430 op_tlv = mlxsw_emad_op_tlv(skb);
2431 reg_tlv = mlxsw_emad_reg_tlv(skb);
2432
2433 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
2434 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
2435 payload = mlxsw_emad_reg_payload(reg_tlv);
2436 event_listener_item->el.func(®, payload, event_listener_item->priv);
2437 dev_kfree_skb(skb);
2438 }
2439
2440 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
2441 const struct mlxsw_event_listener *el_b)
2442 {
2443 return (el_a->func == el_b->func &&
2444 el_a->trap_id == el_b->trap_id);
2445 }
2446
2447 static struct mlxsw_event_listener_item *
2448 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
2449 const struct mlxsw_event_listener *el)
2450 {
2451 struct mlxsw_event_listener_item *el_item;
2452
2453 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
2454 if (__is_event_listener_equal(&el_item->el, el))
2455 return el_item;
2456 }
2457 return NULL;
2458 }
2459
2460 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
2461 const struct mlxsw_event_listener *el,
2462 void *priv)
2463 {
2464 int err;
2465 struct mlxsw_event_listener_item *el_item;
2466 const struct mlxsw_rx_listener rxl = {
2467 .func = mlxsw_core_event_listener_func,
2468 .local_port = MLXSW_PORT_DONT_CARE,
2469 .trap_id = el->trap_id,
2470 };
2471
2472 el_item = __find_event_listener_item(mlxsw_core, el);
2473 if (el_item)
2474 return -EEXIST;
2475 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
2476 if (!el_item)
2477 return -ENOMEM;
2478 el_item->mlxsw_core = mlxsw_core;
2479 el_item->el = *el;
2480 el_item->priv = priv;
2481
2482 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
2483 if (err)
2484 goto err_rx_listener_register;
2485
2486
2487
2488
2489 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
2490
2491 return 0;
2492
2493 err_rx_listener_register:
2494 kfree(el_item);
2495 return err;
2496 }
2497 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
2498
2499 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
2500 const struct mlxsw_event_listener *el)
2501 {
2502 struct mlxsw_event_listener_item *el_item;
2503 const struct mlxsw_rx_listener rxl = {
2504 .func = mlxsw_core_event_listener_func,
2505 .local_port = MLXSW_PORT_DONT_CARE,
2506 .trap_id = el->trap_id,
2507 };
2508
2509 el_item = __find_event_listener_item(mlxsw_core, el);
2510 if (!el_item)
2511 return;
2512 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
2513 list_del(&el_item->list);
2514 kfree(el_item);
2515 }
2516 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
2517
2518 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
2519 const struct mlxsw_listener *listener,
2520 void *priv, bool enabled)
2521 {
2522 if (listener->is_event) {
2523 WARN_ON(!enabled);
2524 return mlxsw_core_event_listener_register(mlxsw_core,
2525 &listener->event_listener,
2526 priv);
2527 } else {
2528 return mlxsw_core_rx_listener_register(mlxsw_core,
2529 &listener->rx_listener,
2530 priv, enabled);
2531 }
2532 }
2533
2534 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
2535 const struct mlxsw_listener *listener,
2536 void *priv)
2537 {
2538 if (listener->is_event)
2539 mlxsw_core_event_listener_unregister(mlxsw_core,
2540 &listener->event_listener);
2541 else
2542 mlxsw_core_rx_listener_unregister(mlxsw_core,
2543 &listener->rx_listener);
2544 }
2545
2546 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
2547 const struct mlxsw_listener *listener, void *priv)
2548 {
2549 enum mlxsw_reg_htgt_trap_group trap_group;
2550 enum mlxsw_reg_hpkt_action action;
2551 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2552 int err;
2553
2554 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2555 return 0;
2556
2557 err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
2558 listener->enabled_on_register);
2559 if (err)
2560 return err;
2561
2562 action = listener->enabled_on_register ? listener->en_action :
2563 listener->dis_action;
2564 trap_group = listener->enabled_on_register ? listener->en_trap_group :
2565 listener->dis_trap_group;
2566 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2567 trap_group, listener->is_ctrl);
2568 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2569 if (err)
2570 goto err_trap_set;
2571
2572 return 0;
2573
2574 err_trap_set:
2575 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2576 return err;
2577 }
2578 EXPORT_SYMBOL(mlxsw_core_trap_register);
2579
2580 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
2581 const struct mlxsw_listener *listener,
2582 void *priv)
2583 {
2584 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2585
2586 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2587 return;
2588
2589 if (!listener->is_event) {
2590 mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
2591 listener->trap_id, listener->dis_trap_group,
2592 listener->is_ctrl);
2593 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2594 }
2595
2596 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2597 }
2598 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
2599
2600 int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
2601 const struct mlxsw_listener *listeners,
2602 size_t listeners_count, void *priv)
2603 {
2604 int i, err;
2605
2606 for (i = 0; i < listeners_count; i++) {
2607 err = mlxsw_core_trap_register(mlxsw_core,
2608 &listeners[i],
2609 priv);
2610 if (err)
2611 goto err_listener_register;
2612 }
2613 return 0;
2614
2615 err_listener_register:
2616 for (i--; i >= 0; i--) {
2617 mlxsw_core_trap_unregister(mlxsw_core,
2618 &listeners[i],
2619 priv);
2620 }
2621 return err;
2622 }
2623 EXPORT_SYMBOL(mlxsw_core_traps_register);
2624
2625 void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
2626 const struct mlxsw_listener *listeners,
2627 size_t listeners_count, void *priv)
2628 {
2629 int i;
2630
2631 for (i = 0; i < listeners_count; i++) {
2632 mlxsw_core_trap_unregister(mlxsw_core,
2633 &listeners[i],
2634 priv);
2635 }
2636 }
2637 EXPORT_SYMBOL(mlxsw_core_traps_unregister);
2638
2639 int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
2640 const struct mlxsw_listener *listener,
2641 bool enabled)
2642 {
2643 enum mlxsw_reg_htgt_trap_group trap_group;
2644 enum mlxsw_reg_hpkt_action action;
2645 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2646 int err;
2647
2648
2649 if (WARN_ON(listener->is_event))
2650 return -EINVAL;
2651
2652 action = enabled ? listener->en_action : listener->dis_action;
2653 trap_group = enabled ? listener->en_trap_group :
2654 listener->dis_trap_group;
2655 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2656 trap_group, listener->is_ctrl);
2657 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
2658 if (err)
2659 return err;
2660
2661 mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
2662 enabled);
2663 return 0;
2664 }
2665 EXPORT_SYMBOL(mlxsw_core_trap_state_set);
2666
2667 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
2668 {
2669 return atomic64_inc_return(&mlxsw_core->emad.tid);
2670 }
2671
2672 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
2673 const struct mlxsw_reg_info *reg,
2674 char *payload,
2675 enum mlxsw_core_reg_access_type type,
2676 struct list_head *bulk_list,
2677 mlxsw_reg_trans_cb_t *cb,
2678 unsigned long cb_priv)
2679 {
2680 u64 tid = mlxsw_core_tid_get(mlxsw_core);
2681 struct mlxsw_reg_trans *trans;
2682 int err;
2683
2684 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
2685 if (!trans)
2686 return -ENOMEM;
2687
2688 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
2689 bulk_list, cb, cb_priv, tid);
2690 if (err) {
2691 kfree_rcu(trans, rcu);
2692 return err;
2693 }
2694 return 0;
2695 }
2696
2697 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
2698 const struct mlxsw_reg_info *reg, char *payload,
2699 struct list_head *bulk_list,
2700 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2701 {
2702 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2703 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
2704 bulk_list, cb, cb_priv);
2705 }
2706 EXPORT_SYMBOL(mlxsw_reg_trans_query);
2707
2708 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
2709 const struct mlxsw_reg_info *reg, char *payload,
2710 struct list_head *bulk_list,
2711 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2712 {
2713 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2714 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
2715 bulk_list, cb, cb_priv);
2716 }
2717 EXPORT_SYMBOL(mlxsw_reg_trans_write);
2718
2719 #define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
2720
2721 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
2722 {
2723 char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
2724 struct mlxsw_core *mlxsw_core = trans->core;
2725 int err;
2726
2727 wait_for_completion(&trans->completion);
2728 cancel_delayed_work_sync(&trans->timeout_dw);
2729 err = trans->err;
2730
2731 if (trans->retries)
2732 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
2733 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
2734 if (err) {
2735 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
2736 trans->tid, trans->reg->id,
2737 mlxsw_reg_id_str(trans->reg->id),
2738 mlxsw_core_reg_access_type_str(trans->type),
2739 trans->emad_status,
2740 mlxsw_emad_op_tlv_status_str(trans->emad_status));
2741
2742 snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
2743 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
2744 trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
2745 mlxsw_emad_op_tlv_status_str(trans->emad_status),
2746 trans->emad_err_string ? trans->emad_err_string : "");
2747
2748 trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
2749 trans->emad_status, err_string);
2750
2751 kfree(trans->emad_err_string);
2752 }
2753
2754 list_del(&trans->bulk_list);
2755 kfree_rcu(trans, rcu);
2756 return err;
2757 }
2758
2759 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
2760 {
2761 struct mlxsw_reg_trans *trans;
2762 struct mlxsw_reg_trans *tmp;
2763 int sum_err = 0;
2764 int err;
2765
2766 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
2767 err = mlxsw_reg_trans_wait(trans);
2768 if (err && sum_err == 0)
2769 sum_err = err;
2770 }
2771 return sum_err;
2772 }
2773 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
2774
2775 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
2776 const struct mlxsw_reg_info *reg,
2777 char *payload,
2778 enum mlxsw_core_reg_access_type type)
2779 {
2780 enum mlxsw_emad_op_tlv_status status;
2781 int err, n_retry;
2782 bool reset_ok;
2783 char *in_mbox, *out_mbox, *tmp;
2784
2785 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
2786 reg->id, mlxsw_reg_id_str(reg->id),
2787 mlxsw_core_reg_access_type_str(type));
2788
2789 in_mbox = mlxsw_cmd_mbox_alloc();
2790 if (!in_mbox)
2791 return -ENOMEM;
2792
2793 out_mbox = mlxsw_cmd_mbox_alloc();
2794 if (!out_mbox) {
2795 err = -ENOMEM;
2796 goto free_in_mbox;
2797 }
2798
2799 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
2800 mlxsw_core_tid_get(mlxsw_core));
2801 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
2802 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
2803
2804
2805
2806
2807
2808
2809 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
2810
2811 n_retry = 0;
2812 retry:
2813 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
2814 if (!err) {
2815 err = mlxsw_emad_process_status(out_mbox, &status);
2816 if (err) {
2817 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
2818 goto retry;
2819 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
2820 status, mlxsw_emad_op_tlv_status_str(status));
2821 }
2822 }
2823
2824 if (!err)
2825 memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
2826 reg->len);
2827
2828 mlxsw_cmd_mbox_free(out_mbox);
2829 free_in_mbox:
2830 mlxsw_cmd_mbox_free(in_mbox);
2831 if (err)
2832 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
2833 reg->id, mlxsw_reg_id_str(reg->id),
2834 mlxsw_core_reg_access_type_str(type));
2835 return err;
2836 }
2837
2838 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
2839 char *payload, size_t payload_len,
2840 unsigned long cb_priv)
2841 {
2842 char *orig_payload = (char *) cb_priv;
2843
2844 memcpy(orig_payload, payload, payload_len);
2845 }
2846
2847 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
2848 const struct mlxsw_reg_info *reg,
2849 char *payload,
2850 enum mlxsw_core_reg_access_type type)
2851 {
2852 LIST_HEAD(bulk_list);
2853 int err;
2854
2855
2856
2857
2858
2859 if (!mlxsw_core->emad.use_emad)
2860 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
2861 payload, type);
2862
2863 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
2864 payload, type, &bulk_list,
2865 mlxsw_core_reg_access_cb,
2866 (unsigned long) payload);
2867 if (err)
2868 return err;
2869 return mlxsw_reg_trans_bulk_wait(&bulk_list);
2870 }
2871
2872 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
2873 const struct mlxsw_reg_info *reg, char *payload)
2874 {
2875 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2876 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
2877 }
2878 EXPORT_SYMBOL(mlxsw_reg_query);
2879
2880 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
2881 const struct mlxsw_reg_info *reg, char *payload)
2882 {
2883 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2884 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
2885 }
2886 EXPORT_SYMBOL(mlxsw_reg_write);
2887
2888 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2889 struct mlxsw_rx_info *rx_info)
2890 {
2891 struct mlxsw_rx_listener_item *rxl_item;
2892 const struct mlxsw_rx_listener *rxl;
2893 u16 local_port;
2894 bool found = false;
2895
2896 if (rx_info->is_lag) {
2897 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
2898 __func__, rx_info->u.lag_id,
2899 rx_info->trap_id);
2900
2901
2902
2903 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
2904 rx_info->u.lag_id,
2905 rx_info->lag_port_index);
2906 } else {
2907 local_port = rx_info->u.sys_port;
2908 }
2909
2910 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
2911 __func__, local_port, rx_info->trap_id);
2912
2913 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
2914 (local_port >= mlxsw_core->max_ports))
2915 goto drop;
2916
2917 rcu_read_lock();
2918 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
2919 rxl = &rxl_item->rxl;
2920 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
2921 rxl->local_port == local_port) &&
2922 rxl->trap_id == rx_info->trap_id &&
2923 rxl->mirror_reason == rx_info->mirror_reason) {
2924 if (rxl_item->enabled)
2925 found = true;
2926 break;
2927 }
2928 }
2929 if (!found) {
2930 rcu_read_unlock();
2931 goto drop;
2932 }
2933
2934 rxl->func(skb, local_port, rxl_item->priv);
2935 rcu_read_unlock();
2936 return;
2937
2938 drop:
2939 dev_kfree_skb(skb);
2940 }
2941 EXPORT_SYMBOL(mlxsw_core_skb_receive);
2942
2943 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
2944 u16 lag_id, u8 port_index)
2945 {
2946 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
2947 port_index;
2948 }
2949
2950 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
2951 u16 lag_id, u8 port_index, u16 local_port)
2952 {
2953 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2954 lag_id, port_index);
2955
2956 mlxsw_core->lag.mapping[index] = local_port;
2957 }
2958 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
2959
2960 u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
2961 u16 lag_id, u8 port_index)
2962 {
2963 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2964 lag_id, port_index);
2965
2966 return mlxsw_core->lag.mapping[index];
2967 }
2968 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
2969
2970 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
2971 u16 lag_id, u16 local_port)
2972 {
2973 int i;
2974
2975 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
2976 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
2977 lag_id, i);
2978
2979 if (mlxsw_core->lag.mapping[index] == local_port)
2980 mlxsw_core->lag.mapping[index] = 0;
2981 }
2982 }
2983 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
2984
2985 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
2986 enum mlxsw_res_id res_id)
2987 {
2988 return mlxsw_res_valid(&mlxsw_core->res, res_id);
2989 }
2990 EXPORT_SYMBOL(mlxsw_core_res_valid);
2991
2992 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
2993 enum mlxsw_res_id res_id)
2994 {
2995 return mlxsw_res_get(&mlxsw_core->res, res_id);
2996 }
2997 EXPORT_SYMBOL(mlxsw_core_res_get);
2998
2999 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
3000 enum devlink_port_flavour flavour,
3001 u8 slot_index, u32 port_number, bool split,
3002 u32 split_port_subnumber,
3003 bool splittable, u32 lanes,
3004 const unsigned char *switch_id,
3005 unsigned char switch_id_len)
3006 {
3007 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3008 struct mlxsw_core_port *mlxsw_core_port =
3009 &mlxsw_core->ports[local_port];
3010 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3011 struct devlink_port_attrs attrs = {};
3012 int err;
3013
3014 attrs.split = split;
3015 attrs.lanes = lanes;
3016 attrs.splittable = splittable;
3017 attrs.flavour = flavour;
3018 attrs.phys.port_number = port_number;
3019 attrs.phys.split_subport_number = split_port_subnumber;
3020 memcpy(attrs.switch_id.id, switch_id, switch_id_len);
3021 attrs.switch_id.id_len = switch_id_len;
3022 mlxsw_core_port->local_port = local_port;
3023 devlink_port_attrs_set(devlink_port, &attrs);
3024 if (slot_index) {
3025 struct mlxsw_linecard *linecard;
3026
3027 linecard = mlxsw_linecard_get(mlxsw_core->linecards,
3028 slot_index);
3029 mlxsw_core_port->linecard = linecard;
3030 devlink_port_linecard_set(devlink_port,
3031 linecard->devlink_linecard);
3032 }
3033 err = devl_port_register(devlink, devlink_port, local_port);
3034 if (err)
3035 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
3036 return err;
3037 }
3038
3039 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
3040 {
3041 struct mlxsw_core_port *mlxsw_core_port =
3042 &mlxsw_core->ports[local_port];
3043 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3044
3045 devl_port_unregister(devlink_port);
3046 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
3047 }
3048
3049 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
3050 u8 slot_index, u32 port_number, bool split,
3051 u32 split_port_subnumber,
3052 bool splittable, u32 lanes,
3053 const unsigned char *switch_id,
3054 unsigned char switch_id_len)
3055 {
3056 int err;
3057
3058 err = __mlxsw_core_port_init(mlxsw_core, local_port,
3059 DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
3060 port_number, split, split_port_subnumber,
3061 splittable, lanes,
3062 switch_id, switch_id_len);
3063 if (err)
3064 return err;
3065
3066 atomic_inc(&mlxsw_core->active_ports_count);
3067 return 0;
3068 }
3069 EXPORT_SYMBOL(mlxsw_core_port_init);
3070
3071 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
3072 {
3073 atomic_dec(&mlxsw_core->active_ports_count);
3074
3075 __mlxsw_core_port_fini(mlxsw_core, local_port);
3076 }
3077 EXPORT_SYMBOL(mlxsw_core_port_fini);
3078
3079 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
3080 void *port_driver_priv,
3081 const unsigned char *switch_id,
3082 unsigned char switch_id_len)
3083 {
3084 struct mlxsw_core_port *mlxsw_core_port =
3085 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
3086 int err;
3087
3088 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
3089 DEVLINK_PORT_FLAVOUR_CPU,
3090 0, 0, false, 0, false, 0,
3091 switch_id, switch_id_len);
3092 if (err)
3093 return err;
3094
3095 mlxsw_core_port->port_driver_priv = port_driver_priv;
3096 return 0;
3097 }
3098 EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
3099
3100 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
3101 {
3102 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
3103 }
3104 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
3105
3106 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
3107 void *port_driver_priv, struct net_device *dev)
3108 {
3109 struct mlxsw_core_port *mlxsw_core_port =
3110 &mlxsw_core->ports[local_port];
3111 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3112
3113 mlxsw_core_port->port_driver_priv = port_driver_priv;
3114 devlink_port_type_eth_set(devlink_port, dev);
3115 }
3116 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
3117
3118 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
3119 void *port_driver_priv)
3120 {
3121 struct mlxsw_core_port *mlxsw_core_port =
3122 &mlxsw_core->ports[local_port];
3123 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3124
3125 mlxsw_core_port->port_driver_priv = port_driver_priv;
3126 devlink_port_type_ib_set(devlink_port, NULL);
3127 }
3128 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
3129
3130 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
3131 void *port_driver_priv)
3132 {
3133 struct mlxsw_core_port *mlxsw_core_port =
3134 &mlxsw_core->ports[local_port];
3135 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3136
3137 mlxsw_core_port->port_driver_priv = port_driver_priv;
3138 devlink_port_type_clear(devlink_port);
3139 }
3140 EXPORT_SYMBOL(mlxsw_core_port_clear);
3141
3142 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
3143 u16 local_port)
3144 {
3145 struct mlxsw_core_port *mlxsw_core_port =
3146 &mlxsw_core->ports[local_port];
3147 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3148
3149 return devlink_port->type;
3150 }
3151 EXPORT_SYMBOL(mlxsw_core_port_type_get);
3152
3153
3154 struct devlink_port *
3155 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
3156 u16 local_port)
3157 {
3158 struct mlxsw_core_port *mlxsw_core_port =
3159 &mlxsw_core->ports[local_port];
3160 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3161
3162 return devlink_port;
3163 }
3164 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
3165
3166 struct mlxsw_linecard *
3167 mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
3168 u16 local_port)
3169 {
3170 struct mlxsw_core_port *mlxsw_core_port =
3171 &mlxsw_core->ports[local_port];
3172
3173 return mlxsw_core_port->linecard;
3174 }
3175
3176 void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
3177 bool (*selector)(void *priv, u16 local_port),
3178 void *priv)
3179 {
3180 if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
3181 return;
3182 mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
3183 }
3184
3185 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
3186 {
3187 return mlxsw_core->env;
3188 }
3189
3190 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
3191 const char *buf, size_t size)
3192 {
3193 __be32 *m = (__be32 *) buf;
3194 int i;
3195 int count = size / sizeof(__be32);
3196
3197 for (i = count - 1; i >= 0; i--)
3198 if (m[i])
3199 break;
3200 i++;
3201 count = i ? i : 1;
3202 for (i = 0; i < count; i += 4)
3203 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
3204 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
3205 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
3206 }
3207
3208 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
3209 u32 in_mod, bool out_mbox_direct, bool reset_ok,
3210 char *in_mbox, size_t in_mbox_size,
3211 char *out_mbox, size_t out_mbox_size)
3212 {
3213 u8 status;
3214 int err;
3215
3216 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
3217 if (!mlxsw_core->bus->cmd_exec)
3218 return -EOPNOTSUPP;
3219
3220 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
3221 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
3222 if (in_mbox) {
3223 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
3224 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
3225 }
3226
3227 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
3228 opcode_mod, in_mod, out_mbox_direct,
3229 in_mbox, in_mbox_size,
3230 out_mbox, out_mbox_size, &status);
3231
3232 if (!err && out_mbox) {
3233 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
3234 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
3235 }
3236
3237 if (reset_ok && err == -EIO &&
3238 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
3239 err = 0;
3240 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
3241 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
3242 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
3243 in_mod, status, mlxsw_cmd_status_str(status));
3244 } else if (err == -ETIMEDOUT) {
3245 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
3246 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
3247 in_mod);
3248 }
3249
3250 return err;
3251 }
3252 EXPORT_SYMBOL(mlxsw_cmd_exec);
3253
3254 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
3255 {
3256 return queue_delayed_work(mlxsw_wq, dwork, delay);
3257 }
3258 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
3259
3260 bool mlxsw_core_schedule_work(struct work_struct *work)
3261 {
3262 return queue_work(mlxsw_owq, work);
3263 }
3264 EXPORT_SYMBOL(mlxsw_core_schedule_work);
3265
3266 void mlxsw_core_flush_owq(void)
3267 {
3268 flush_workqueue(mlxsw_owq);
3269 }
3270 EXPORT_SYMBOL(mlxsw_core_flush_owq);
3271
3272 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3273 const struct mlxsw_config_profile *profile,
3274 u64 *p_single_size, u64 *p_double_size,
3275 u64 *p_linear_size)
3276 {
3277 struct mlxsw_driver *driver = mlxsw_core->driver;
3278
3279 if (!driver->kvd_sizes_get)
3280 return -EINVAL;
3281
3282 return driver->kvd_sizes_get(mlxsw_core, profile,
3283 p_single_size, p_double_size,
3284 p_linear_size);
3285 }
3286 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
3287
3288 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
3289 struct mlxsw_res *res)
3290 {
3291 int index, i;
3292 u64 data;
3293 u16 id;
3294 int err;
3295
3296 mlxsw_cmd_mbox_zero(mbox);
3297
3298 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
3299 index++) {
3300 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
3301 if (err)
3302 return err;
3303
3304 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
3305 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
3306 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
3307
3308 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
3309 return 0;
3310
3311 mlxsw_res_parse(res, id, data);
3312 }
3313 }
3314
3315
3316
3317
3318 return -EIO;
3319 }
3320 EXPORT_SYMBOL(mlxsw_core_resources_query);
3321
3322 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
3323 {
3324 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
3325 }
3326 EXPORT_SYMBOL(mlxsw_core_read_frc_h);
3327
3328 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
3329 {
3330 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
3331 }
3332 EXPORT_SYMBOL(mlxsw_core_read_frc_l);
3333
3334 u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
3335 {
3336 return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
3337 }
3338 EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
3339
3340 u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
3341 {
3342 return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
3343 }
3344 EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
3345
3346 bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
3347 {
3348 return mlxsw_core->driver->sdq_supports_cqe_v2;
3349 }
3350 EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
3351
3352 void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
3353 {
3354 mlxsw_core->emad.enable_string_tlv = true;
3355 }
3356 EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
3357
3358 static int __init mlxsw_core_module_init(void)
3359 {
3360 int err;
3361
3362 err = mlxsw_linecard_driver_register();
3363 if (err)
3364 return err;
3365
3366 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
3367 if (!mlxsw_wq) {
3368 err = -ENOMEM;
3369 goto err_alloc_workqueue;
3370 }
3371 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
3372 mlxsw_core_driver_name);
3373 if (!mlxsw_owq) {
3374 err = -ENOMEM;
3375 goto err_alloc_ordered_workqueue;
3376 }
3377 return 0;
3378
3379 err_alloc_ordered_workqueue:
3380 destroy_workqueue(mlxsw_wq);
3381 err_alloc_workqueue:
3382 mlxsw_linecard_driver_unregister();
3383 return err;
3384 }
3385
3386 static void __exit mlxsw_core_module_exit(void)
3387 {
3388 destroy_workqueue(mlxsw_owq);
3389 destroy_workqueue(mlxsw_wq);
3390 mlxsw_linecard_driver_unregister();
3391 }
3392
3393 module_init(mlxsw_core_module_init);
3394 module_exit(mlxsw_core_module_exit);
3395
3396 MODULE_LICENSE("Dual BSD/GPL");
3397 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3398 MODULE_DESCRIPTION("Mellanox switch device core driver");