0001
0002
0003
0004 #include <linux/mlx5/driver.h>
0005 #include "eswitch.h"
0006 #include "priv.h"
0007 #include "sf/dev/dev.h"
0008 #include "mlx5_ifc_vhca_event.h"
0009 #include "vhca_event.h"
0010 #include "ecpf.h"
0011 #define CREATE_TRACE_POINTS
0012 #include "diag/sf_tracepoint.h"
0013
0014 struct mlx5_sf {
0015 struct devlink_port dl_port;
0016 unsigned int port_index;
0017 u32 controller;
0018 u16 id;
0019 u16 hw_fn_id;
0020 u16 hw_state;
0021 };
0022
0023 struct mlx5_sf_table {
0024 struct mlx5_core_dev *dev;
0025 struct xarray port_indices;
0026 refcount_t refcount;
0027 struct completion disable_complete;
0028 struct mutex sf_state_lock;
0029 struct notifier_block esw_nb;
0030 struct notifier_block vhca_nb;
0031 u8 ecpu: 1;
0032 };
0033
0034 static struct mlx5_sf *
0035 mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
0036 {
0037 return xa_load(&table->port_indices, port_index);
0038 }
0039
0040 static struct mlx5_sf *
0041 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
0042 {
0043 unsigned long index;
0044 struct mlx5_sf *sf;
0045
0046 xa_for_each(&table->port_indices, index, sf) {
0047 if (sf->hw_fn_id == fn_id)
0048 return sf;
0049 }
0050 return NULL;
0051 }
0052
0053 static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
0054 {
0055 return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
0056 }
0057
0058 static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
0059 {
0060 xa_erase(&table->port_indices, sf->port_index);
0061 }
0062
0063 static struct mlx5_sf *
0064 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
0065 u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
0066 {
0067 unsigned int dl_port_index;
0068 struct mlx5_sf *sf;
0069 u16 hw_fn_id;
0070 int id_err;
0071 int err;
0072
0073 if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
0074 NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
0075 return ERR_PTR(-EINVAL);
0076 }
0077
0078 id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
0079 if (id_err < 0) {
0080 err = id_err;
0081 goto id_err;
0082 }
0083
0084 sf = kzalloc(sizeof(*sf), GFP_KERNEL);
0085 if (!sf) {
0086 err = -ENOMEM;
0087 goto alloc_err;
0088 }
0089 sf->id = id_err;
0090 hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
0091 dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
0092 sf->port_index = dl_port_index;
0093 sf->hw_fn_id = hw_fn_id;
0094 sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
0095 sf->controller = controller;
0096
0097 err = mlx5_sf_id_insert(table, sf);
0098 if (err)
0099 goto insert_err;
0100
0101 return sf;
0102
0103 insert_err:
0104 kfree(sf);
0105 alloc_err:
0106 mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
0107 id_err:
0108 if (err == -EEXIST)
0109 NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
0110 return ERR_PTR(err);
0111 }
0112
0113 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
0114 {
0115 mlx5_sf_id_erase(table, sf);
0116 mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
0117 trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
0118 kfree(sf);
0119 }
0120
0121 static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
0122 {
0123 struct mlx5_sf_table *table = dev->priv.sf_table;
0124
0125 if (!table)
0126 return NULL;
0127
0128 return refcount_inc_not_zero(&table->refcount) ? table : NULL;
0129 }
0130
0131 static void mlx5_sf_table_put(struct mlx5_sf_table *table)
0132 {
0133 if (refcount_dec_and_test(&table->refcount))
0134 complete(&table->disable_complete);
0135 }
0136
0137 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
0138 {
0139 switch (hw_state) {
0140 case MLX5_VHCA_STATE_ACTIVE:
0141 case MLX5_VHCA_STATE_IN_USE:
0142 return DEVLINK_PORT_FN_STATE_ACTIVE;
0143 case MLX5_VHCA_STATE_INVALID:
0144 case MLX5_VHCA_STATE_ALLOCATED:
0145 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
0146 default:
0147 return DEVLINK_PORT_FN_STATE_INACTIVE;
0148 }
0149 }
0150
0151 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
0152 {
0153 switch (hw_state) {
0154 case MLX5_VHCA_STATE_IN_USE:
0155 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
0156 return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
0157 case MLX5_VHCA_STATE_INVALID:
0158 case MLX5_VHCA_STATE_ALLOCATED:
0159 case MLX5_VHCA_STATE_ACTIVE:
0160 default:
0161 return DEVLINK_PORT_FN_OPSTATE_DETACHED;
0162 }
0163 }
0164
0165 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
0166 {
0167 return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
0168 }
0169
0170 int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
0171 enum devlink_port_fn_state *state,
0172 enum devlink_port_fn_opstate *opstate,
0173 struct netlink_ext_ack *extack)
0174 {
0175 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
0176 struct mlx5_sf_table *table;
0177 struct mlx5_sf *sf;
0178 int err = 0;
0179
0180 table = mlx5_sf_table_try_get(dev);
0181 if (!table)
0182 return -EOPNOTSUPP;
0183
0184 sf = mlx5_sf_lookup_by_index(table, dl_port->index);
0185 if (!sf) {
0186 err = -EOPNOTSUPP;
0187 goto sf_err;
0188 }
0189 mutex_lock(&table->sf_state_lock);
0190 *state = mlx5_sf_to_devlink_state(sf->hw_state);
0191 *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
0192 mutex_unlock(&table->sf_state_lock);
0193 sf_err:
0194 mlx5_sf_table_put(table);
0195 return err;
0196 }
0197
0198 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
0199 struct netlink_ext_ack *extack)
0200 {
0201 int err;
0202
0203 if (mlx5_sf_is_active(sf))
0204 return 0;
0205 if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
0206 NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
0207 return -EBUSY;
0208 }
0209
0210 err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
0211 if (err)
0212 return err;
0213
0214 sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
0215 trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
0216 return 0;
0217 }
0218
0219 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
0220 {
0221 int err;
0222
0223 if (!mlx5_sf_is_active(sf))
0224 return 0;
0225
0226 err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
0227 if (err)
0228 return err;
0229
0230 sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
0231 trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
0232 return 0;
0233 }
0234
0235 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
0236 struct mlx5_sf *sf,
0237 enum devlink_port_fn_state state,
0238 struct netlink_ext_ack *extack)
0239 {
0240 int err = 0;
0241
0242 mutex_lock(&table->sf_state_lock);
0243 if (state == mlx5_sf_to_devlink_state(sf->hw_state))
0244 goto out;
0245 if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
0246 err = mlx5_sf_activate(dev, sf, extack);
0247 else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
0248 err = mlx5_sf_deactivate(dev, sf);
0249 else
0250 err = -EINVAL;
0251 out:
0252 mutex_unlock(&table->sf_state_lock);
0253 return err;
0254 }
0255
0256 int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
0257 enum devlink_port_fn_state state,
0258 struct netlink_ext_ack *extack)
0259 {
0260 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
0261 struct mlx5_sf_table *table;
0262 struct mlx5_sf *sf;
0263 int err;
0264
0265 table = mlx5_sf_table_try_get(dev);
0266 if (!table) {
0267 NL_SET_ERR_MSG_MOD(extack,
0268 "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
0269 return -EOPNOTSUPP;
0270 }
0271 sf = mlx5_sf_lookup_by_index(table, dl_port->index);
0272 if (!sf) {
0273 err = -ENODEV;
0274 goto out;
0275 }
0276
0277 err = mlx5_sf_state_set(dev, table, sf, state, extack);
0278 out:
0279 mlx5_sf_table_put(table);
0280 return err;
0281 }
0282
0283 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
0284 const struct devlink_port_new_attrs *new_attr,
0285 struct netlink_ext_ack *extack,
0286 unsigned int *new_port_index)
0287 {
0288 struct mlx5_eswitch *esw = dev->priv.eswitch;
0289 struct mlx5_sf *sf;
0290 int err;
0291
0292 sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
0293 if (IS_ERR(sf))
0294 return PTR_ERR(sf);
0295
0296 err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
0297 new_attr->controller, new_attr->sfnum);
0298 if (err)
0299 goto esw_err;
0300 *new_port_index = sf->port_index;
0301 trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
0302 return 0;
0303
0304 esw_err:
0305 mlx5_sf_free(table, sf);
0306 return err;
0307 }
0308
0309 static int
0310 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
0311 struct netlink_ext_ack *extack)
0312 {
0313 if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
0314 NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
0315 return -EOPNOTSUPP;
0316 }
0317 if (new_attr->port_index_valid) {
0318 NL_SET_ERR_MSG_MOD(extack,
0319 "Driver does not support user defined port index assignment");
0320 return -EOPNOTSUPP;
0321 }
0322 if (!new_attr->sfnum_valid) {
0323 NL_SET_ERR_MSG_MOD(extack,
0324 "User must provide unique sfnum. Driver does not support auto assignment");
0325 return -EOPNOTSUPP;
0326 }
0327 if (new_attr->controller_valid && new_attr->controller &&
0328 !mlx5_core_is_ecpf_esw_manager(dev)) {
0329 NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
0330 return -EOPNOTSUPP;
0331 }
0332 if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
0333 NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
0334 return -EOPNOTSUPP;
0335 }
0336 return 0;
0337 }
0338
0339 int mlx5_devlink_sf_port_new(struct devlink *devlink,
0340 const struct devlink_port_new_attrs *new_attr,
0341 struct netlink_ext_ack *extack,
0342 unsigned int *new_port_index)
0343 {
0344 struct mlx5_core_dev *dev = devlink_priv(devlink);
0345 struct mlx5_sf_table *table;
0346 int err;
0347
0348 err = mlx5_sf_new_check_attr(dev, new_attr, extack);
0349 if (err)
0350 return err;
0351
0352 table = mlx5_sf_table_try_get(dev);
0353 if (!table) {
0354 NL_SET_ERR_MSG_MOD(extack,
0355 "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
0356 return -EOPNOTSUPP;
0357 }
0358 err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
0359 mlx5_sf_table_put(table);
0360 return err;
0361 }
0362
0363 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
0364 {
0365 if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
0366 mlx5_sf_free(table, sf);
0367 } else if (mlx5_sf_is_active(sf)) {
0368
0369
0370
0371
0372
0373 mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
0374 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
0375 kfree(sf);
0376 } else {
0377 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
0378 kfree(sf);
0379 }
0380 }
0381
0382 int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
0383 struct netlink_ext_ack *extack)
0384 {
0385 struct mlx5_core_dev *dev = devlink_priv(devlink);
0386 struct mlx5_eswitch *esw = dev->priv.eswitch;
0387 struct mlx5_sf_table *table;
0388 struct mlx5_sf *sf;
0389 int err = 0;
0390
0391 table = mlx5_sf_table_try_get(dev);
0392 if (!table) {
0393 NL_SET_ERR_MSG_MOD(extack,
0394 "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
0395 return -EOPNOTSUPP;
0396 }
0397 sf = mlx5_sf_lookup_by_index(table, port_index);
0398 if (!sf) {
0399 err = -ENODEV;
0400 goto sf_err;
0401 }
0402
0403 mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
0404 mlx5_sf_id_erase(table, sf);
0405
0406 mutex_lock(&table->sf_state_lock);
0407 mlx5_sf_dealloc(table, sf);
0408 mutex_unlock(&table->sf_state_lock);
0409 sf_err:
0410 mlx5_sf_table_put(table);
0411 return err;
0412 }
0413
0414 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
0415 {
0416 if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
0417 return true;
0418
0419 if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
0420 return true;
0421
0422 if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
0423 new_state == MLX5_VHCA_STATE_ALLOCATED)
0424 return true;
0425
0426 return false;
0427 }
0428
0429 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
0430 {
0431 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
0432 const struct mlx5_vhca_state_event *event = data;
0433 bool update = false;
0434 struct mlx5_sf *sf;
0435
0436 table = mlx5_sf_table_try_get(table->dev);
0437 if (!table)
0438 return 0;
0439
0440 mutex_lock(&table->sf_state_lock);
0441 sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
0442 if (!sf)
0443 goto sf_err;
0444
0445
0446
0447
0448 update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
0449 if (update)
0450 sf->hw_state = event->new_vhca_state;
0451 trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
0452 sf->hw_fn_id, sf->hw_state);
0453 sf_err:
0454 mutex_unlock(&table->sf_state_lock);
0455 mlx5_sf_table_put(table);
0456 return 0;
0457 }
0458
0459 static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
0460 {
0461 init_completion(&table->disable_complete);
0462 refcount_set(&table->refcount, 1);
0463 }
0464
0465 static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
0466 {
0467 struct mlx5_eswitch *esw = table->dev->priv.eswitch;
0468 unsigned long index;
0469 struct mlx5_sf *sf;
0470
0471
0472
0473
0474 xa_for_each(&table->port_indices, index, sf) {
0475 mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
0476 mlx5_sf_id_erase(table, sf);
0477 mlx5_sf_dealloc(table, sf);
0478 }
0479 }
0480
0481 static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
0482 {
0483 if (!refcount_read(&table->refcount))
0484 return;
0485
0486
0487
0488
0489 mlx5_sf_table_put(table);
0490 wait_for_completion(&table->disable_complete);
0491
0492 mlx5_sf_deactivate_all(table);
0493 }
0494
0495 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
0496 {
0497 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
0498 const struct mlx5_esw_event_info *mode = data;
0499
0500 switch (mode->new_mode) {
0501 case MLX5_ESWITCH_OFFLOADS:
0502 mlx5_sf_table_enable(table);
0503 break;
0504 case MLX5_ESWITCH_LEGACY:
0505 mlx5_sf_table_disable(table);
0506 break;
0507 default:
0508 break;
0509 }
0510
0511 return 0;
0512 }
0513
0514 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
0515 {
0516 return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
0517 mlx5_sf_hw_table_supported(dev);
0518 }
0519
0520 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
0521 {
0522 struct mlx5_sf_table *table;
0523 int err;
0524
0525 if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
0526 return 0;
0527
0528 table = kzalloc(sizeof(*table), GFP_KERNEL);
0529 if (!table)
0530 return -ENOMEM;
0531
0532 mutex_init(&table->sf_state_lock);
0533 table->dev = dev;
0534 xa_init(&table->port_indices);
0535 dev->priv.sf_table = table;
0536 refcount_set(&table->refcount, 0);
0537 table->esw_nb.notifier_call = mlx5_sf_esw_event;
0538 err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
0539 if (err)
0540 goto reg_err;
0541
0542 table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
0543 err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
0544 if (err)
0545 goto vhca_err;
0546
0547 return 0;
0548
0549 vhca_err:
0550 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
0551 reg_err:
0552 mutex_destroy(&table->sf_state_lock);
0553 kfree(table);
0554 dev->priv.sf_table = NULL;
0555 return err;
0556 }
0557
0558 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
0559 {
0560 struct mlx5_sf_table *table = dev->priv.sf_table;
0561
0562 if (!table)
0563 return;
0564
0565 mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
0566 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
0567 WARN_ON(refcount_read(&table->refcount));
0568 mutex_destroy(&table->sf_state_lock);
0569 WARN_ON(!xa_empty(&table->port_indices));
0570 kfree(table);
0571 }