0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/errno.h>
0006 #include <linux/netdevice.h>
0007 #include <net/pkt_cls.h>
0008 #include <net/red.h>
0009
0010 #include "spectrum.h"
0011 #include "spectrum_span.h"
0012 #include "reg.h"
0013
0014 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
0015 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
0016 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
0017
0018 enum mlxsw_sp_qdisc_type {
0019 MLXSW_SP_QDISC_NO_QDISC,
0020 MLXSW_SP_QDISC_RED,
0021 MLXSW_SP_QDISC_PRIO,
0022 MLXSW_SP_QDISC_ETS,
0023 MLXSW_SP_QDISC_TBF,
0024 MLXSW_SP_QDISC_FIFO,
0025 };
0026
0027 struct mlxsw_sp_qdisc;
0028
0029 struct mlxsw_sp_qdisc_ops {
0030 enum mlxsw_sp_qdisc_type type;
0031 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
0032 void *params);
0033 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
0034 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
0035 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
0036 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
0037 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
0038 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0039 struct tc_qopt_offload_stats *stats_ptr);
0040 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
0041 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0042 void *xstats_ptr);
0043 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
0044 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
0045
0046
0047
0048 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
0049 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
0050 struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0051 u32 parent);
0052 unsigned int num_classes;
0053
0054 u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0055 struct mlxsw_sp_qdisc *child);
0056 int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0057 struct mlxsw_sp_qdisc *child);
0058 };
0059
0060 struct mlxsw_sp_qdisc_ets_band {
0061 u8 prio_bitmap;
0062 int tclass_num;
0063 };
0064
0065 struct mlxsw_sp_qdisc_ets_data {
0066 struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
0067 };
0068
0069 struct mlxsw_sp_qdisc {
0070 u32 handle;
0071 union {
0072 struct red_stats red;
0073 } xstats_base;
0074 struct mlxsw_sp_qdisc_stats {
0075 u64 tx_bytes;
0076 u64 tx_packets;
0077 u64 drops;
0078 u64 overlimits;
0079 u64 backlog;
0080 } stats_base;
0081
0082 union {
0083 struct mlxsw_sp_qdisc_ets_data *ets_data;
0084 };
0085
0086 struct mlxsw_sp_qdisc_ops *ops;
0087 struct mlxsw_sp_qdisc *parent;
0088 struct mlxsw_sp_qdisc *qdiscs;
0089 unsigned int num_classes;
0090 };
0091
0092 struct mlxsw_sp_qdisc_state {
0093 struct mlxsw_sp_qdisc root_qdisc;
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 u32 future_handle;
0107 bool future_fifos[IEEE_8021QAZ_MAX_TCS];
0108 struct mutex lock;
0109 };
0110
0111 static bool
0112 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
0113 {
0114 return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
0115 }
0116
0117 static struct mlxsw_sp_qdisc *
0118 mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
0119 struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
0120 void *),
0121 void *data)
0122 {
0123 struct mlxsw_sp_qdisc *tmp;
0124 unsigned int i;
0125
0126 if (pre) {
0127 tmp = pre(qdisc, data);
0128 if (tmp)
0129 return tmp;
0130 }
0131
0132 if (qdisc->ops) {
0133 for (i = 0; i < qdisc->num_classes; i++) {
0134 tmp = &qdisc->qdiscs[i];
0135 if (qdisc->ops) {
0136 tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
0137 if (tmp)
0138 return tmp;
0139 }
0140 }
0141 }
0142
0143 return NULL;
0144 }
0145
0146 static struct mlxsw_sp_qdisc *
0147 mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
0148 {
0149 u32 parent = *(u32 *)data;
0150
0151 if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
0152 if (qdisc->ops->find_class)
0153 return qdisc->ops->find_class(qdisc, parent);
0154 }
0155
0156 return NULL;
0157 }
0158
0159 static struct mlxsw_sp_qdisc *
0160 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
0161 {
0162 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
0163
0164 if (!qdisc_state)
0165 return NULL;
0166 if (parent == TC_H_ROOT)
0167 return &qdisc_state->root_qdisc;
0168 return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
0169 mlxsw_sp_qdisc_walk_cb_find, &parent);
0170 }
0171
0172 static struct mlxsw_sp_qdisc *
0173 mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
0174 {
0175 u32 handle = *(u32 *)data;
0176
0177 if (qdisc->ops && qdisc->handle == handle)
0178 return qdisc;
0179 return NULL;
0180 }
0181
0182 static struct mlxsw_sp_qdisc *
0183 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
0184 {
0185 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
0186
0187 if (!qdisc_state)
0188 return NULL;
0189 return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
0190 mlxsw_sp_qdisc_walk_cb_find_by_handle,
0191 &handle);
0192 }
0193
0194 static void
0195 mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0196 {
0197 struct mlxsw_sp_qdisc *tmp;
0198
0199 for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
0200 tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
0201 }
0202
0203 static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
0204 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0205 {
0206 struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
0207
0208 if (!parent)
0209 return 0xff;
0210 if (!parent->ops->get_prio_bitmap)
0211 return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
0212 return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
0213 }
0214
0215 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
0216
0217 static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
0218 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0219 {
0220 struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
0221
0222 if (!parent)
0223 return MLXSW_SP_PORT_DEFAULT_TCLASS;
0224 if (!parent->ops->get_tclass_num)
0225 return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
0226 return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
0227 }
0228
0229 static int
0230 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
0231 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0232 {
0233 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
0234 int err_hdroom = 0;
0235 int err = 0;
0236 int i;
0237
0238 if (!mlxsw_sp_qdisc)
0239 return 0;
0240
0241 if (root_qdisc == mlxsw_sp_qdisc) {
0242 struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
0243
0244 hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
0245 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
0246 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
0247 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
0248 err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
0249 }
0250
0251 if (!mlxsw_sp_qdisc->ops)
0252 return 0;
0253
0254 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
0255 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
0256 &mlxsw_sp_qdisc->qdiscs[i]);
0257 mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
0258 if (mlxsw_sp_qdisc->ops->destroy)
0259 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
0260 mlxsw_sp_qdisc);
0261 if (mlxsw_sp_qdisc->ops->clean_stats)
0262 mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
0263
0264 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
0265 mlxsw_sp_qdisc->ops = NULL;
0266 mlxsw_sp_qdisc->num_classes = 0;
0267 kfree(mlxsw_sp_qdisc->qdiscs);
0268 mlxsw_sp_qdisc->qdiscs = NULL;
0269 return err_hdroom ?: err;
0270 }
0271
0272 struct mlxsw_sp_qdisc_tree_validate {
0273 bool forbid_ets;
0274 bool forbid_root_tbf;
0275 bool forbid_tbf;
0276 bool forbid_red;
0277 };
0278
0279 static int
0280 __mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0281 struct mlxsw_sp_qdisc_tree_validate validate);
0282
0283 static int
0284 mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0285 struct mlxsw_sp_qdisc_tree_validate validate)
0286 {
0287 unsigned int i;
0288 int err;
0289
0290 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
0291 err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
0292 validate);
0293 if (err)
0294 return err;
0295 }
0296
0297 return 0;
0298 }
0299
0300 static int
0301 __mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0302 struct mlxsw_sp_qdisc_tree_validate validate)
0303 {
0304 if (!mlxsw_sp_qdisc->ops)
0305 return 0;
0306
0307 switch (mlxsw_sp_qdisc->ops->type) {
0308 case MLXSW_SP_QDISC_FIFO:
0309 break;
0310 case MLXSW_SP_QDISC_RED:
0311 if (validate.forbid_red)
0312 return -EINVAL;
0313 validate.forbid_red = true;
0314 validate.forbid_root_tbf = true;
0315 validate.forbid_ets = true;
0316 break;
0317 case MLXSW_SP_QDISC_TBF:
0318 if (validate.forbid_root_tbf) {
0319 if (validate.forbid_tbf)
0320 return -EINVAL;
0321
0322 validate.forbid_tbf = true;
0323 validate.forbid_ets = true;
0324 } else {
0325
0326 validate.forbid_root_tbf = true;
0327 }
0328 break;
0329 case MLXSW_SP_QDISC_PRIO:
0330 case MLXSW_SP_QDISC_ETS:
0331 if (validate.forbid_ets)
0332 return -EINVAL;
0333 validate.forbid_root_tbf = true;
0334 validate.forbid_ets = true;
0335 break;
0336 default:
0337 WARN_ON(1);
0338 return -EINVAL;
0339 }
0340
0341 return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
0342 }
0343
0344 static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
0345 {
0346 struct mlxsw_sp_qdisc_tree_validate validate = {};
0347 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
0348
0349 mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
0350 return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
0351 }
0352
0353 static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
0354 u32 handle,
0355 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0356 struct mlxsw_sp_qdisc_ops *ops, void *params)
0357 {
0358 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
0359 struct mlxsw_sp_hdroom orig_hdroom;
0360 unsigned int i;
0361 int err;
0362
0363 err = ops->check_params(mlxsw_sp_port, params);
0364 if (err)
0365 return err;
0366
0367 if (ops->num_classes) {
0368 mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
0369 sizeof(*mlxsw_sp_qdisc->qdiscs),
0370 GFP_KERNEL);
0371 if (!mlxsw_sp_qdisc->qdiscs)
0372 return -ENOMEM;
0373
0374 for (i = 0; i < ops->num_classes; i++)
0375 mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
0376 }
0377
0378 orig_hdroom = *mlxsw_sp_port->hdroom;
0379 if (root_qdisc == mlxsw_sp_qdisc) {
0380 struct mlxsw_sp_hdroom hdroom = orig_hdroom;
0381
0382 hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
0383 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
0384 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
0385 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
0386
0387 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
0388 if (err)
0389 goto err_hdroom_configure;
0390 }
0391
0392 mlxsw_sp_qdisc->num_classes = ops->num_classes;
0393 mlxsw_sp_qdisc->ops = ops;
0394 mlxsw_sp_qdisc->handle = handle;
0395 err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
0396 if (err)
0397 goto err_replace;
0398
0399 err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
0400 if (err)
0401 goto err_replace;
0402
0403 return 0;
0404
0405 err_replace:
0406 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
0407 mlxsw_sp_qdisc->ops = NULL;
0408 mlxsw_sp_qdisc->num_classes = 0;
0409 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
0410 err_hdroom_configure:
0411 kfree(mlxsw_sp_qdisc->qdiscs);
0412 mlxsw_sp_qdisc->qdiscs = NULL;
0413 return err;
0414 }
0415
0416 static int
0417 mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
0418 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
0419 {
0420 struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
0421 int err;
0422
0423 err = ops->check_params(mlxsw_sp_port, params);
0424 if (err)
0425 goto unoffload;
0426
0427 err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
0428 if (err)
0429 goto unoffload;
0430
0431
0432
0433
0434
0435 if (mlxsw_sp_qdisc->handle != handle) {
0436 if (ops->clean_stats)
0437 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
0438 }
0439
0440 mlxsw_sp_qdisc->handle = handle;
0441 return 0;
0442
0443 unoffload:
0444 if (ops->unoffload)
0445 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
0446
0447 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
0448 return err;
0449 }
0450
0451 static int
0452 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
0453 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0454 struct mlxsw_sp_qdisc_ops *ops, void *params)
0455 {
0456 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
0457
0458
0459
0460
0461
0462 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
0463
0464 if (!mlxsw_sp_qdisc->ops)
0465 return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
0466 mlxsw_sp_qdisc, ops, params);
0467 else
0468 return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
0469 mlxsw_sp_qdisc, params);
0470 }
0471
0472 static int
0473 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
0474 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0475 struct tc_qopt_offload_stats *stats_ptr)
0476 {
0477 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
0478 mlxsw_sp_qdisc->ops->get_stats)
0479 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
0480 mlxsw_sp_qdisc,
0481 stats_ptr);
0482
0483 return -EOPNOTSUPP;
0484 }
0485
0486 static int
0487 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
0488 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0489 void *xstats_ptr)
0490 {
0491 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
0492 mlxsw_sp_qdisc->ops->get_xstats)
0493 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
0494 mlxsw_sp_qdisc,
0495 xstats_ptr);
0496
0497 return -EOPNOTSUPP;
0498 }
0499
0500 static u64
0501 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
0502 {
0503 return xstats->backlog[tclass_num] +
0504 xstats->backlog[tclass_num + 8];
0505 }
0506
0507 static u64
0508 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
0509 {
0510 return xstats->tail_drop[tclass_num] +
0511 xstats->tail_drop[tclass_num + 8];
0512 }
0513
0514 static void
0515 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
0516 u8 prio_bitmap, u64 *tx_packets,
0517 u64 *tx_bytes)
0518 {
0519 int i;
0520
0521 *tx_packets = 0;
0522 *tx_bytes = 0;
0523 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
0524 if (prio_bitmap & BIT(i)) {
0525 *tx_packets += xstats->tx_packets[i];
0526 *tx_bytes += xstats->tx_bytes[i];
0527 }
0528 }
0529 }
0530
0531 static void
0532 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
0533 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0534 u64 *p_tx_bytes, u64 *p_tx_packets,
0535 u64 *p_drops, u64 *p_backlog)
0536 {
0537 struct mlxsw_sp_port_xstats *xstats;
0538 u64 tx_bytes, tx_packets;
0539 u8 prio_bitmap;
0540 int tclass_num;
0541
0542 prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
0543 mlxsw_sp_qdisc);
0544 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
0545 mlxsw_sp_qdisc);
0546 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
0547 mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
0548 &tx_packets, &tx_bytes);
0549
0550 *p_tx_packets += tx_packets;
0551 *p_tx_bytes += tx_bytes;
0552 *p_drops += xstats->wred_drop[tclass_num] +
0553 mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
0554 *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
0555 }
0556
0557 static void
0558 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
0559 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0560 u64 tx_bytes, u64 tx_packets,
0561 u64 drops, u64 backlog,
0562 struct tc_qopt_offload_stats *stats_ptr)
0563 {
0564 struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
0565
0566 tx_bytes -= stats_base->tx_bytes;
0567 tx_packets -= stats_base->tx_packets;
0568 drops -= stats_base->drops;
0569 backlog -= stats_base->backlog;
0570
0571 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
0572 stats_ptr->qstats->drops += drops;
0573 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
0574
0575 stats_base->backlog += backlog;
0576 stats_base->drops += drops;
0577 stats_base->tx_bytes += tx_bytes;
0578 stats_base->tx_packets += tx_packets;
0579 }
0580
0581 static void
0582 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
0583 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0584 struct tc_qopt_offload_stats *stats_ptr)
0585 {
0586 u64 tx_packets = 0;
0587 u64 tx_bytes = 0;
0588 u64 backlog = 0;
0589 u64 drops = 0;
0590
0591 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
0592 &tx_bytes, &tx_packets,
0593 &drops, &backlog);
0594 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
0595 tx_bytes, tx_packets, drops, backlog,
0596 stats_ptr);
0597 }
0598
0599 static int
0600 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
0601 int tclass_num, u32 min, u32 max,
0602 u32 probability, bool is_wred, bool is_ecn)
0603 {
0604 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
0605 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
0606 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0607 int err;
0608
0609 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
0610 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
0611 roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
0612 roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
0613 probability);
0614
0615 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
0616 if (err)
0617 return err;
0618
0619 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
0620 MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
0621
0622 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
0623 }
0624
0625 static int
0626 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
0627 int tclass_num)
0628 {
0629 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0630 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
0631
0632 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
0633 MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
0634 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
0635 }
0636
0637 static void
0638 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
0639 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0640 {
0641 struct mlxsw_sp_qdisc_stats *stats_base;
0642 struct mlxsw_sp_port_xstats *xstats;
0643 struct red_stats *red_base;
0644 u8 prio_bitmap;
0645 int tclass_num;
0646
0647 prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
0648 mlxsw_sp_qdisc);
0649 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
0650 mlxsw_sp_qdisc);
0651 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
0652 stats_base = &mlxsw_sp_qdisc->stats_base;
0653 red_base = &mlxsw_sp_qdisc->xstats_base.red;
0654
0655 mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
0656 &stats_base->tx_packets,
0657 &stats_base->tx_bytes);
0658 red_base->prob_mark = xstats->tc_ecn[tclass_num];
0659 red_base->prob_drop = xstats->wred_drop[tclass_num];
0660 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
0661
0662 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
0663 stats_base->drops = red_base->prob_drop + red_base->pdrop;
0664
0665 stats_base->backlog = 0;
0666 }
0667
0668 static int
0669 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
0670 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0671 {
0672 int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
0673 mlxsw_sp_qdisc);
0674
0675 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
0676 }
0677
0678 static int
0679 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
0680 void *params)
0681 {
0682 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0683 struct tc_red_qopt_offload_params *p = params;
0684
0685 if (p->min > p->max) {
0686 dev_err(mlxsw_sp->bus_info->dev,
0687 "spectrum: RED: min %u is bigger then max %u\n", p->min,
0688 p->max);
0689 return -EINVAL;
0690 }
0691 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
0692 GUARANTEED_SHARED_BUFFER)) {
0693 dev_err(mlxsw_sp->bus_info->dev,
0694 "spectrum: RED: max value %u is too big\n", p->max);
0695 return -EINVAL;
0696 }
0697 if (p->min == 0 || p->max == 0) {
0698 dev_err(mlxsw_sp->bus_info->dev,
0699 "spectrum: RED: 0 value is illegal for min and max\n");
0700 return -EINVAL;
0701 }
0702 return 0;
0703 }
0704
0705 static int
0706 mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
0707 u32 handle, unsigned int band,
0708 struct mlxsw_sp_qdisc *child_qdisc);
0709 static void
0710 mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
0711 u32 handle);
0712
0713 static int
0714 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
0715 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0716 void *params)
0717 {
0718 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0719 struct tc_red_qopt_offload_params *p = params;
0720 int tclass_num;
0721 u32 min, max;
0722 u64 prob;
0723 int err;
0724
0725 err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
0726 &mlxsw_sp_qdisc->qdiscs[0]);
0727 if (err)
0728 return err;
0729 mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
0730
0731 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
0732 mlxsw_sp_qdisc);
0733
0734
0735 prob = p->probability;
0736 prob *= 100;
0737 prob = DIV_ROUND_UP(prob, 1 << 16);
0738 prob = DIV_ROUND_UP(prob, 1 << 16);
0739 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
0740 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
0741 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
0742 min, max, prob,
0743 !p->is_nodrop, p->is_ecn);
0744 }
0745
0746 static void
0747 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
0748 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0749 struct gnet_stats_queue *qstats)
0750 {
0751 u64 backlog;
0752
0753 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
0754 mlxsw_sp_qdisc->stats_base.backlog);
0755 qstats->backlog -= backlog;
0756 mlxsw_sp_qdisc->stats_base.backlog = 0;
0757 }
0758
0759 static void
0760 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
0761 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0762 void *params)
0763 {
0764 struct tc_red_qopt_offload_params *p = params;
0765
0766 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
0767 }
0768
0769 static int
0770 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
0771 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0772 void *xstats_ptr)
0773 {
0774 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
0775 struct mlxsw_sp_port_xstats *xstats;
0776 struct red_stats *res = xstats_ptr;
0777 int early_drops, marks, pdrops;
0778 int tclass_num;
0779
0780 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
0781 mlxsw_sp_qdisc);
0782 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
0783
0784 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
0785 marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
0786 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
0787 xstats_base->pdrop;
0788
0789 res->pdrop += pdrops;
0790 res->prob_drop += early_drops;
0791 res->prob_mark += marks;
0792
0793 xstats_base->pdrop += pdrops;
0794 xstats_base->prob_drop += early_drops;
0795 xstats_base->prob_mark += marks;
0796 return 0;
0797 }
0798
0799 static int
0800 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
0801 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0802 struct tc_qopt_offload_stats *stats_ptr)
0803 {
0804 struct mlxsw_sp_qdisc_stats *stats_base;
0805 struct mlxsw_sp_port_xstats *xstats;
0806 u64 overlimits;
0807 int tclass_num;
0808
0809 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
0810 mlxsw_sp_qdisc);
0811 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
0812 stats_base = &mlxsw_sp_qdisc->stats_base;
0813
0814 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
0815 overlimits = xstats->wred_drop[tclass_num] +
0816 xstats->tc_ecn[tclass_num] - stats_base->overlimits;
0817
0818 stats_ptr->qstats->overlimits += overlimits;
0819 stats_base->overlimits += overlimits;
0820
0821 return 0;
0822 }
0823
0824 static struct mlxsw_sp_qdisc *
0825 mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0826 u32 parent)
0827 {
0828
0829
0830
0831 return &mlxsw_sp_qdisc->qdiscs[0];
0832 }
0833
0834 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
0835 .type = MLXSW_SP_QDISC_RED,
0836 .check_params = mlxsw_sp_qdisc_red_check_params,
0837 .replace = mlxsw_sp_qdisc_red_replace,
0838 .unoffload = mlxsw_sp_qdisc_red_unoffload,
0839 .destroy = mlxsw_sp_qdisc_red_destroy,
0840 .get_stats = mlxsw_sp_qdisc_get_red_stats,
0841 .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
0842 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
0843 .find_class = mlxsw_sp_qdisc_leaf_find_class,
0844 .num_classes = 1,
0845 };
0846
0847 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
0848 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
0849 u8 band, u32 child_handle);
0850
0851 static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
0852 struct tc_red_qopt_offload *p)
0853 {
0854 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
0855
0856 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
0857 if (!mlxsw_sp_qdisc)
0858 return -EOPNOTSUPP;
0859
0860 if (p->command == TC_RED_REPLACE)
0861 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
0862 mlxsw_sp_qdisc,
0863 &mlxsw_sp_qdisc_ops_red,
0864 &p->set);
0865
0866 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
0867 return -EOPNOTSUPP;
0868
0869 switch (p->command) {
0870 case TC_RED_DESTROY:
0871 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
0872 case TC_RED_XSTATS:
0873 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
0874 p->xstats);
0875 case TC_RED_STATS:
0876 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
0877 &p->stats);
0878 case TC_RED_GRAFT:
0879 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
0880 p->child_handle);
0881 default:
0882 return -EOPNOTSUPP;
0883 }
0884 }
0885
0886 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
0887 struct tc_red_qopt_offload *p)
0888 {
0889 int err;
0890
0891 mutex_lock(&mlxsw_sp_port->qdisc->lock);
0892 err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
0893 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
0894
0895 return err;
0896 }
0897
0898 static void
0899 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
0900 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0901 {
0902 u64 backlog_cells = 0;
0903 u64 tx_packets = 0;
0904 u64 tx_bytes = 0;
0905 u64 drops = 0;
0906
0907 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
0908 &tx_bytes, &tx_packets,
0909 &drops, &backlog_cells);
0910
0911 mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
0912 mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
0913 mlxsw_sp_qdisc->stats_base.drops = drops;
0914 mlxsw_sp_qdisc->stats_base.backlog = 0;
0915 }
0916
0917 static enum mlxsw_reg_qeec_hr
0918 mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
0919 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0920 {
0921 if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
0922 return MLXSW_REG_QEEC_HR_PORT;
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932 return MLXSW_REG_QEEC_HR_SUBGROUP;
0933 }
0934
0935 static int
0936 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
0937 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
0938 {
0939 enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
0940 mlxsw_sp_qdisc);
0941 int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
0942 mlxsw_sp_qdisc);
0943
0944 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
0945 MLXSW_REG_QEEC_MAS_DIS, 0);
0946 }
0947
0948 static int
0949 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
0950 u32 max_size, u8 *p_burst_size)
0951 {
0952
0953
0954
0955 u32 bs512 = max_size / 64;
0956 u8 bs = fls(bs512);
0957
0958 if (!bs)
0959 return -EINVAL;
0960 --bs;
0961
0962
0963 if ((1 << bs) != bs512)
0964 return -EINVAL;
0965
0966 if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
0967 bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
0968 return -EINVAL;
0969
0970 *p_burst_size = bs;
0971 return 0;
0972 }
0973
0974 static u32
0975 mlxsw_sp_qdisc_tbf_max_size(u8 bs)
0976 {
0977 return (1U << bs) * 64;
0978 }
0979
0980 static u64
0981 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
0982 {
0983
0984
0985
0986 return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
0987 }
0988
0989 static int
0990 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
0991 void *params)
0992 {
0993 struct tc_tbf_qopt_offload_replace_params *p = params;
0994 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0995 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
0996 u8 burst_size;
0997 int err;
0998
0999 if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
1000 dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
1001 "spectrum: TBF: rate of %lluKbps must be below %u\n",
1002 rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
1003 return -EINVAL;
1004 }
1005
1006 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
1007 if (err) {
1008 u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
1009
1010 dev_err(mlxsw_sp->bus_info->dev,
1011 "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
1012 p->max_size,
1013 mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
1014 mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
1015 return -EINVAL;
1016 }
1017
1018 return 0;
1019 }
1020
1021 static int
1022 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1023 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1024 void *params)
1025 {
1026 enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
1027 mlxsw_sp_qdisc);
1028 struct tc_tbf_qopt_offload_replace_params *p = params;
1029 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
1030 int tclass_num;
1031 u8 burst_size;
1032 int err;
1033
1034 err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
1035 &mlxsw_sp_qdisc->qdiscs[0]);
1036 if (err)
1037 return err;
1038 mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1039
1040 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
1041 mlxsw_sp_qdisc);
1042
1043 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
1044 if (WARN_ON_ONCE(err))
1045
1046 return -EINVAL;
1047
1048 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
1049 rate_kbps, burst_size);
1050 }
1051
1052 static void
1053 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1054 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1055 void *params)
1056 {
1057 struct tc_tbf_qopt_offload_replace_params *p = params;
1058
1059 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
1060 }
1061
1062 static int
1063 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1064 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1065 struct tc_qopt_offload_stats *stats_ptr)
1066 {
1067 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1068 stats_ptr);
1069 return 0;
1070 }
1071
1072 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
1073 .type = MLXSW_SP_QDISC_TBF,
1074 .check_params = mlxsw_sp_qdisc_tbf_check_params,
1075 .replace = mlxsw_sp_qdisc_tbf_replace,
1076 .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
1077 .destroy = mlxsw_sp_qdisc_tbf_destroy,
1078 .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
1079 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1080 .find_class = mlxsw_sp_qdisc_leaf_find_class,
1081 .num_classes = 1,
1082 };
1083
1084 static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
1085 struct tc_tbf_qopt_offload *p)
1086 {
1087 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1088
1089 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1090 if (!mlxsw_sp_qdisc)
1091 return -EOPNOTSUPP;
1092
1093 if (p->command == TC_TBF_REPLACE)
1094 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1095 mlxsw_sp_qdisc,
1096 &mlxsw_sp_qdisc_ops_tbf,
1097 &p->replace_params);
1098
1099 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1100 return -EOPNOTSUPP;
1101
1102 switch (p->command) {
1103 case TC_TBF_DESTROY:
1104 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1105 case TC_TBF_STATS:
1106 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1107 &p->stats);
1108 case TC_TBF_GRAFT:
1109 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
1110 p->child_handle);
1111 default:
1112 return -EOPNOTSUPP;
1113 }
1114 }
1115
1116 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
1117 struct tc_tbf_qopt_offload *p)
1118 {
1119 int err;
1120
1121 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1122 err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
1123 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1124
1125 return err;
1126 }
1127
1128 static int
1129 mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1130 void *params)
1131 {
1132 return 0;
1133 }
1134
1135 static int
1136 mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1137 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1138 void *params)
1139 {
1140 return 0;
1141 }
1142
1143 static int
1144 mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1145 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1146 struct tc_qopt_offload_stats *stats_ptr)
1147 {
1148 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1149 stats_ptr);
1150 return 0;
1151 }
1152
1153 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
1154 .type = MLXSW_SP_QDISC_FIFO,
1155 .check_params = mlxsw_sp_qdisc_fifo_check_params,
1156 .replace = mlxsw_sp_qdisc_fifo_replace,
1157 .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
1158 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1159 };
1160
1161 static int
1162 mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1163 u32 handle, unsigned int band,
1164 struct mlxsw_sp_qdisc *child_qdisc)
1165 {
1166 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1167
1168 if (handle == qdisc_state->future_handle &&
1169 qdisc_state->future_fifos[band])
1170 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1171 child_qdisc,
1172 &mlxsw_sp_qdisc_ops_fifo,
1173 NULL);
1174 return 0;
1175 }
1176
1177 static void
1178 mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
1179 u32 handle)
1180 {
1181 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1182
1183 qdisc_state->future_handle = handle;
1184 memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1185 }
1186
1187 static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1188 struct tc_fifo_qopt_offload *p)
1189 {
1190 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1191 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1192 unsigned int band;
1193 u32 parent_handle;
1194
1195 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1196 if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
1197 parent_handle = TC_H_MAJ(p->parent);
1198 if (parent_handle != qdisc_state->future_handle) {
1199
1200
1201
1202 mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
1203 parent_handle);
1204 }
1205
1206 band = TC_H_MIN(p->parent) - 1;
1207 if (band < IEEE_8021QAZ_MAX_TCS) {
1208 if (p->command == TC_FIFO_REPLACE)
1209 qdisc_state->future_fifos[band] = true;
1210 else if (p->command == TC_FIFO_DESTROY)
1211 qdisc_state->future_fifos[band] = false;
1212 }
1213 }
1214 if (!mlxsw_sp_qdisc)
1215 return -EOPNOTSUPP;
1216
1217 if (p->command == TC_FIFO_REPLACE) {
1218 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1219 mlxsw_sp_qdisc,
1220 &mlxsw_sp_qdisc_ops_fifo, NULL);
1221 }
1222
1223 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1224 return -EOPNOTSUPP;
1225
1226 switch (p->command) {
1227 case TC_FIFO_DESTROY:
1228 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1229 case TC_FIFO_STATS:
1230 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1231 &p->stats);
1232 case TC_FIFO_REPLACE:
1233 break;
1234 }
1235
1236 return -EOPNOTSUPP;
1237 }
1238
1239 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1240 struct tc_fifo_qopt_offload *p)
1241 {
1242 int err;
1243
1244 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1245 err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
1246 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1247
1248 return err;
1249 }
1250
1251 static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1252 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1253 {
1254 int i;
1255
1256 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1257 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
1258 MLXSW_SP_PORT_DEFAULT_TCLASS);
1259 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1260 MLXSW_REG_QEEC_HR_SUBGROUP,
1261 i, 0, false, 0);
1262 }
1263
1264 kfree(mlxsw_sp_qdisc->ets_data);
1265 mlxsw_sp_qdisc->ets_data = NULL;
1266 return 0;
1267 }
1268
1269 static int
1270 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1271 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1272 {
1273 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1274 }
1275
1276 static int
1277 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
1278 {
1279 if (nbands > IEEE_8021QAZ_MAX_TCS)
1280 return -EOPNOTSUPP;
1281
1282 return 0;
1283 }
1284
1285 static int
1286 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1287 void *params)
1288 {
1289 struct tc_prio_qopt_offload_params *p = params;
1290
1291 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1292 }
1293
1294 static struct mlxsw_sp_qdisc *
1295 mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1296 void *mlxsw_sp_port)
1297 {
1298 u64 backlog;
1299
1300 if (mlxsw_sp_qdisc->ops) {
1301 backlog = mlxsw_sp_qdisc->stats_base.backlog;
1302 if (mlxsw_sp_qdisc->ops->clean_stats)
1303 mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
1304 mlxsw_sp_qdisc);
1305 mlxsw_sp_qdisc->stats_base.backlog = backlog;
1306 }
1307
1308 return NULL;
1309 }
1310
1311 static void
1312 mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1313 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1314 {
1315 mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
1316 mlxsw_sp_port);
1317 }
1318
1319 static int
1320 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1321 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1322 u32 handle, unsigned int nbands,
1323 const unsigned int *quanta,
1324 const unsigned int *weights,
1325 const u8 *priomap)
1326 {
1327 struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
1328 struct mlxsw_sp_qdisc_ets_band *ets_band;
1329 struct mlxsw_sp_qdisc *child_qdisc;
1330 u8 old_priomap, new_priomap;
1331 int i, band;
1332 int err;
1333
1334 if (!ets_data) {
1335 ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
1336 if (!ets_data)
1337 return -ENOMEM;
1338 mlxsw_sp_qdisc->ets_data = ets_data;
1339
1340 for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
1341 int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1342
1343 ets_band = &ets_data->bands[band];
1344 ets_band->tclass_num = tclass_num;
1345 }
1346 }
1347
1348 for (band = 0; band < nbands; band++) {
1349 int tclass_num;
1350
1351 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1352 ets_band = &ets_data->bands[band];
1353
1354 tclass_num = ets_band->tclass_num;
1355 old_priomap = ets_band->prio_bitmap;
1356 new_priomap = 0;
1357
1358 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1359 MLXSW_REG_QEEC_HR_SUBGROUP,
1360 tclass_num, 0, !!quanta[band],
1361 weights[band]);
1362 if (err)
1363 return err;
1364
1365 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1366 if (priomap[i] == band) {
1367 new_priomap |= BIT(i);
1368 if (BIT(i) & old_priomap)
1369 continue;
1370 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
1371 i, tclass_num);
1372 if (err)
1373 return err;
1374 }
1375 }
1376
1377 ets_band->prio_bitmap = new_priomap;
1378
1379 if (old_priomap != new_priomap)
1380 mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
1381 child_qdisc);
1382
1383 err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
1384 band, child_qdisc);
1385 if (err)
1386 return err;
1387 }
1388 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1389 ets_band = &ets_data->bands[band];
1390 ets_band->prio_bitmap = 0;
1391
1392 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1393 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1394
1395 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1396 MLXSW_REG_QEEC_HR_SUBGROUP,
1397 ets_band->tclass_num, 0, false, 0);
1398 }
1399
1400 mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1401 return 0;
1402 }
1403
1404 static int
1405 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1406 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1407 void *params)
1408 {
1409 struct tc_prio_qopt_offload_params *p = params;
1410 unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1411
1412 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1413 handle, p->bands, zeroes,
1414 zeroes, p->priomap);
1415 }
1416
1417 static void
1418 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1419 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1420 struct gnet_stats_queue *qstats)
1421 {
1422 u64 backlog;
1423
1424 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1425 mlxsw_sp_qdisc->stats_base.backlog);
1426 qstats->backlog -= backlog;
1427 }
1428
1429 static void
1430 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1431 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1432 void *params)
1433 {
1434 struct tc_prio_qopt_offload_params *p = params;
1435
1436 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1437 p->qstats);
1438 }
1439
1440 static int
1441 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1442 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1443 struct tc_qopt_offload_stats *stats_ptr)
1444 {
1445 struct mlxsw_sp_qdisc *tc_qdisc;
1446 u64 tx_packets = 0;
1447 u64 tx_bytes = 0;
1448 u64 backlog = 0;
1449 u64 drops = 0;
1450 int i;
1451
1452 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1453 tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
1454 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1455 &tx_bytes, &tx_packets,
1456 &drops, &backlog);
1457 }
1458
1459 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1460 tx_bytes, tx_packets, drops, backlog,
1461 stats_ptr);
1462 return 0;
1463 }
1464
1465 static void
1466 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1467 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1468 {
1469 struct mlxsw_sp_qdisc_stats *stats_base;
1470 struct mlxsw_sp_port_xstats *xstats;
1471 struct rtnl_link_stats64 *stats;
1472 int i;
1473
1474 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1475 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1476 stats_base = &mlxsw_sp_qdisc->stats_base;
1477
1478 stats_base->tx_packets = stats->tx_packets;
1479 stats_base->tx_bytes = stats->tx_bytes;
1480
1481 stats_base->drops = 0;
1482 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1483 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1484 stats_base->drops += xstats->wred_drop[i];
1485 }
1486
1487 mlxsw_sp_qdisc->stats_base.backlog = 0;
1488 }
1489
1490 static struct mlxsw_sp_qdisc *
1491 mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1492 u32 parent)
1493 {
1494 int child_index = TC_H_MIN(parent);
1495 int band = child_index - 1;
1496
1497 if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
1498 return NULL;
1499 return &mlxsw_sp_qdisc->qdiscs[band];
1500 }
1501
1502 static struct mlxsw_sp_qdisc_ets_band *
1503 mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1504 struct mlxsw_sp_qdisc *child)
1505 {
1506 unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
1507
1508 if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
1509 band = 0;
1510 return &mlxsw_sp_qdisc->ets_data->bands[band];
1511 }
1512
1513 static u8
1514 mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1515 struct mlxsw_sp_qdisc *child)
1516 {
1517 return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
1518 }
1519
1520 static int
1521 mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1522 struct mlxsw_sp_qdisc *child)
1523 {
1524 return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
1525 }
1526
1527 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1528 .type = MLXSW_SP_QDISC_PRIO,
1529 .check_params = mlxsw_sp_qdisc_prio_check_params,
1530 .replace = mlxsw_sp_qdisc_prio_replace,
1531 .unoffload = mlxsw_sp_qdisc_prio_unoffload,
1532 .destroy = mlxsw_sp_qdisc_prio_destroy,
1533 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1534 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1535 .find_class = mlxsw_sp_qdisc_prio_find_class,
1536 .num_classes = IEEE_8021QAZ_MAX_TCS,
1537 .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1538 .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1539 };
1540
1541 static int
1542 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1543 void *params)
1544 {
1545 struct tc_ets_qopt_offload_replace_params *p = params;
1546
1547 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1548 }
1549
1550 static int
1551 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1552 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1553 void *params)
1554 {
1555 struct tc_ets_qopt_offload_replace_params *p = params;
1556
1557 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1558 handle, p->bands, p->quanta,
1559 p->weights, p->priomap);
1560 }
1561
1562 static void
1563 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1564 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1565 void *params)
1566 {
1567 struct tc_ets_qopt_offload_replace_params *p = params;
1568
1569 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1570 p->qstats);
1571 }
1572
1573 static int
1574 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1575 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1576 {
1577 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1578 }
1579
1580 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1581 .type = MLXSW_SP_QDISC_ETS,
1582 .check_params = mlxsw_sp_qdisc_ets_check_params,
1583 .replace = mlxsw_sp_qdisc_ets_replace,
1584 .unoffload = mlxsw_sp_qdisc_ets_unoffload,
1585 .destroy = mlxsw_sp_qdisc_ets_destroy,
1586 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1587 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1588 .find_class = mlxsw_sp_qdisc_prio_find_class,
1589 .num_classes = IEEE_8021QAZ_MAX_TCS,
1590 .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1591 .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1592 };
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1620 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1621 u8 band, u32 child_handle)
1622 {
1623 struct mlxsw_sp_qdisc *old_qdisc;
1624 u32 parent;
1625
1626 if (band < mlxsw_sp_qdisc->num_classes &&
1627 mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
1628 return 0;
1629
1630 if (!child_handle) {
1631
1632
1633
1634 return 0;
1635 }
1636
1637
1638
1639
1640 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1641 child_handle);
1642 if (old_qdisc)
1643 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1644
1645 parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
1646 mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
1647 parent);
1648 if (!WARN_ON(!mlxsw_sp_qdisc))
1649 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1650
1651 return -EOPNOTSUPP;
1652 }
1653
1654 static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1655 struct tc_prio_qopt_offload *p)
1656 {
1657 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1658
1659 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1660 if (!mlxsw_sp_qdisc)
1661 return -EOPNOTSUPP;
1662
1663 if (p->command == TC_PRIO_REPLACE)
1664 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1665 mlxsw_sp_qdisc,
1666 &mlxsw_sp_qdisc_ops_prio,
1667 &p->replace_params);
1668
1669 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1670 return -EOPNOTSUPP;
1671
1672 switch (p->command) {
1673 case TC_PRIO_DESTROY:
1674 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1675 case TC_PRIO_STATS:
1676 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1677 &p->stats);
1678 case TC_PRIO_GRAFT:
1679 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1680 p->graft_params.band,
1681 p->graft_params.child_handle);
1682 default:
1683 return -EOPNOTSUPP;
1684 }
1685 }
1686
1687 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1688 struct tc_prio_qopt_offload *p)
1689 {
1690 int err;
1691
1692 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1693 err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
1694 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1695
1696 return err;
1697 }
1698
1699 static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1700 struct tc_ets_qopt_offload *p)
1701 {
1702 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1703
1704 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
1705 if (!mlxsw_sp_qdisc)
1706 return -EOPNOTSUPP;
1707
1708 if (p->command == TC_ETS_REPLACE)
1709 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1710 mlxsw_sp_qdisc,
1711 &mlxsw_sp_qdisc_ops_ets,
1712 &p->replace_params);
1713
1714 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1715 return -EOPNOTSUPP;
1716
1717 switch (p->command) {
1718 case TC_ETS_DESTROY:
1719 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1720 case TC_ETS_STATS:
1721 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1722 &p->stats);
1723 case TC_ETS_GRAFT:
1724 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1725 p->graft_params.band,
1726 p->graft_params.child_handle);
1727 default:
1728 return -EOPNOTSUPP;
1729 }
1730 }
1731
1732 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1733 struct tc_ets_qopt_offload *p)
1734 {
1735 int err;
1736
1737 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1738 err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
1739 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1740
1741 return err;
1742 }
1743
1744 struct mlxsw_sp_qevent_block {
1745 struct list_head binding_list;
1746 struct list_head mall_entry_list;
1747 struct mlxsw_sp *mlxsw_sp;
1748 };
1749
1750 struct mlxsw_sp_qevent_binding {
1751 struct list_head list;
1752 struct mlxsw_sp_port *mlxsw_sp_port;
1753 u32 handle;
1754 int tclass_num;
1755 enum mlxsw_sp_span_trigger span_trigger;
1756 unsigned int action_mask;
1757 };
1758
1759 static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1760
1761 static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1762 struct mlxsw_sp_mall_entry *mall_entry,
1763 struct mlxsw_sp_qevent_binding *qevent_binding,
1764 const struct mlxsw_sp_span_agent_parms *agent_parms,
1765 int *p_span_id)
1766 {
1767 enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1768 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1769 struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1770 bool ingress;
1771 int span_id;
1772 int err;
1773
1774 err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1775 if (err)
1776 return err;
1777
1778 ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1779 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
1780 if (err)
1781 goto err_analyzed_port_get;
1782
1783 trigger_parms.span_id = span_id;
1784 trigger_parms.probability_rate = 1;
1785 err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1786 &trigger_parms);
1787 if (err)
1788 goto err_agent_bind;
1789
1790 err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
1791 qevent_binding->tclass_num);
1792 if (err)
1793 goto err_trigger_enable;
1794
1795 *p_span_id = span_id;
1796 return 0;
1797
1798 err_trigger_enable:
1799 mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1800 &trigger_parms);
1801 err_agent_bind:
1802 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1803 err_analyzed_port_get:
1804 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1805 return err;
1806 }
1807
1808 static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1809 struct mlxsw_sp_qevent_binding *qevent_binding,
1810 int span_id)
1811 {
1812 enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1813 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1814 struct mlxsw_sp_span_trigger_parms trigger_parms = {
1815 .span_id = span_id,
1816 };
1817 bool ingress;
1818
1819 ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1820
1821 mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
1822 qevent_binding->tclass_num);
1823 mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1824 &trigger_parms);
1825 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1826 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1827 }
1828
1829 static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1830 struct mlxsw_sp_mall_entry *mall_entry,
1831 struct mlxsw_sp_qevent_binding *qevent_binding)
1832 {
1833 struct mlxsw_sp_span_agent_parms agent_parms = {
1834 .to_dev = mall_entry->mirror.to_dev,
1835 };
1836
1837 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1838 &agent_parms, &mall_entry->mirror.span_id);
1839 }
1840
1841 static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1842 struct mlxsw_sp_mall_entry *mall_entry,
1843 struct mlxsw_sp_qevent_binding *qevent_binding)
1844 {
1845 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1846 }
1847
1848 static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1849 struct mlxsw_sp_mall_entry *mall_entry,
1850 struct mlxsw_sp_qevent_binding *qevent_binding)
1851 {
1852 struct mlxsw_sp_span_agent_parms agent_parms = {
1853 .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
1854 };
1855 int err;
1856
1857 err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1858 DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1859 &agent_parms.policer_enable,
1860 &agent_parms.policer_id);
1861 if (err)
1862 return err;
1863
1864 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1865 &agent_parms, &mall_entry->trap.span_id);
1866 }
1867
1868 static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1869 struct mlxsw_sp_mall_entry *mall_entry,
1870 struct mlxsw_sp_qevent_binding *qevent_binding)
1871 {
1872 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1873 }
1874
1875 static int
1876 mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1877 struct mlxsw_sp_mall_entry *mall_entry,
1878 struct mlxsw_sp_qevent_binding *qevent_binding,
1879 struct netlink_ext_ack *extack)
1880 {
1881 if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
1882 NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
1883 return -EOPNOTSUPP;
1884 }
1885
1886 switch (mall_entry->type) {
1887 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1888 return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1889 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1890 return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1891 default:
1892
1893 WARN_ON(1);
1894 return -EOPNOTSUPP;
1895 }
1896 }
1897
1898 static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1899 struct mlxsw_sp_mall_entry *mall_entry,
1900 struct mlxsw_sp_qevent_binding *qevent_binding)
1901 {
1902 switch (mall_entry->type) {
1903 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1904 return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1905 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1906 return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1907 default:
1908 WARN_ON(1);
1909 return;
1910 }
1911 }
1912
1913 static int
1914 mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1915 struct mlxsw_sp_qevent_binding *qevent_binding,
1916 struct netlink_ext_ack *extack)
1917 {
1918 struct mlxsw_sp_mall_entry *mall_entry;
1919 int err;
1920
1921 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1922 err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1923 qevent_binding, extack);
1924 if (err)
1925 goto err_entry_configure;
1926 }
1927
1928 return 0;
1929
1930 err_entry_configure:
1931 list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1932 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1933 qevent_binding);
1934 return err;
1935 }
1936
1937 static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1938 struct mlxsw_sp_qevent_binding *qevent_binding)
1939 {
1940 struct mlxsw_sp_mall_entry *mall_entry;
1941
1942 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1943 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1944 qevent_binding);
1945 }
1946
1947 static int
1948 mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
1949 struct netlink_ext_ack *extack)
1950 {
1951 struct mlxsw_sp_qevent_binding *qevent_binding;
1952 int err;
1953
1954 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1955 err = mlxsw_sp_qevent_binding_configure(qevent_block,
1956 qevent_binding,
1957 extack);
1958 if (err)
1959 goto err_binding_configure;
1960 }
1961
1962 return 0;
1963
1964 err_binding_configure:
1965 list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1966 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1967 return err;
1968 }
1969
1970 static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1971 {
1972 struct mlxsw_sp_qevent_binding *qevent_binding;
1973
1974 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1975 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1976 }
1977
1978 static struct mlxsw_sp_mall_entry *
1979 mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1980 {
1981 struct mlxsw_sp_mall_entry *mall_entry;
1982
1983 list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1984 if (mall_entry->cookie == cookie)
1985 return mall_entry;
1986
1987 return NULL;
1988 }
1989
1990 static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1991 struct mlxsw_sp_qevent_block *qevent_block,
1992 struct tc_cls_matchall_offload *f)
1993 {
1994 struct mlxsw_sp_mall_entry *mall_entry;
1995 struct flow_action_entry *act;
1996 int err;
1997
1998
1999
2000
2001 if (!list_empty(&qevent_block->mall_entry_list)) {
2002 NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
2003 return -EOPNOTSUPP;
2004 }
2005 if (f->rule->action.num_entries != 1) {
2006 NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
2007 return -EOPNOTSUPP;
2008 }
2009 if (f->common.chain_index) {
2010 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
2011 return -EOPNOTSUPP;
2012 }
2013 if (f->common.protocol != htons(ETH_P_ALL)) {
2014 NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
2015 return -EOPNOTSUPP;
2016 }
2017
2018 act = &f->rule->action.entries[0];
2019 if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
2020 NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
2021 return -EOPNOTSUPP;
2022 }
2023
2024 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
2025 if (!mall_entry)
2026 return -ENOMEM;
2027 mall_entry->cookie = f->cookie;
2028
2029 if (act->id == FLOW_ACTION_MIRRED) {
2030 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
2031 mall_entry->mirror.to_dev = act->dev;
2032 } else if (act->id == FLOW_ACTION_TRAP) {
2033 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
2034 } else {
2035 NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
2036 err = -EOPNOTSUPP;
2037 goto err_unsupported_action;
2038 }
2039
2040 list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
2041
2042 err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
2043 if (err)
2044 goto err_block_configure;
2045
2046 return 0;
2047
2048 err_block_configure:
2049 list_del(&mall_entry->list);
2050 err_unsupported_action:
2051 kfree(mall_entry);
2052 return err;
2053 }
2054
2055 static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
2056 struct tc_cls_matchall_offload *f)
2057 {
2058 struct mlxsw_sp_mall_entry *mall_entry;
2059
2060 mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
2061 if (!mall_entry)
2062 return;
2063
2064 mlxsw_sp_qevent_block_deconfigure(qevent_block);
2065
2066 list_del(&mall_entry->list);
2067 kfree(mall_entry);
2068 }
2069
2070 static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
2071 struct tc_cls_matchall_offload *f)
2072 {
2073 struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
2074
2075 switch (f->command) {
2076 case TC_CLSMATCHALL_REPLACE:
2077 return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
2078 case TC_CLSMATCHALL_DESTROY:
2079 mlxsw_sp_qevent_mall_destroy(qevent_block, f);
2080 return 0;
2081 default:
2082 return -EOPNOTSUPP;
2083 }
2084 }
2085
2086 static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
2087 {
2088 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2089
2090 switch (type) {
2091 case TC_SETUP_CLSMATCHALL:
2092 return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
2093 default:
2094 return -EOPNOTSUPP;
2095 }
2096 }
2097
2098 static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
2099 struct net *net)
2100 {
2101 struct mlxsw_sp_qevent_block *qevent_block;
2102
2103 qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
2104 if (!qevent_block)
2105 return NULL;
2106
2107 INIT_LIST_HEAD(&qevent_block->binding_list);
2108 INIT_LIST_HEAD(&qevent_block->mall_entry_list);
2109 qevent_block->mlxsw_sp = mlxsw_sp;
2110 return qevent_block;
2111 }
2112
2113 static void
2114 mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
2115 {
2116 WARN_ON(!list_empty(&qevent_block->binding_list));
2117 WARN_ON(!list_empty(&qevent_block->mall_entry_list));
2118 kfree(qevent_block);
2119 }
2120
2121 static void mlxsw_sp_qevent_block_release(void *cb_priv)
2122 {
2123 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2124
2125 mlxsw_sp_qevent_block_destroy(qevent_block);
2126 }
2127
2128 static struct mlxsw_sp_qevent_binding *
2129 mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
2130 enum mlxsw_sp_span_trigger span_trigger,
2131 unsigned int action_mask)
2132 {
2133 struct mlxsw_sp_qevent_binding *binding;
2134
2135 binding = kzalloc(sizeof(*binding), GFP_KERNEL);
2136 if (!binding)
2137 return ERR_PTR(-ENOMEM);
2138
2139 binding->mlxsw_sp_port = mlxsw_sp_port;
2140 binding->handle = handle;
2141 binding->tclass_num = tclass_num;
2142 binding->span_trigger = span_trigger;
2143 binding->action_mask = action_mask;
2144 return binding;
2145 }
2146
2147 static void
2148 mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
2149 {
2150 kfree(binding);
2151 }
2152
2153 static struct mlxsw_sp_qevent_binding *
2154 mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
2155 struct mlxsw_sp_port *mlxsw_sp_port,
2156 u32 handle,
2157 enum mlxsw_sp_span_trigger span_trigger)
2158 {
2159 struct mlxsw_sp_qevent_binding *qevent_binding;
2160
2161 list_for_each_entry(qevent_binding, &block->binding_list, list)
2162 if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
2163 qevent_binding->handle == handle &&
2164 qevent_binding->span_trigger == span_trigger)
2165 return qevent_binding;
2166 return NULL;
2167 }
2168
2169 static int
2170 mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
2171 struct flow_block_offload *f,
2172 enum mlxsw_sp_span_trigger span_trigger,
2173 unsigned int action_mask)
2174 {
2175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2176 struct mlxsw_sp_qevent_binding *qevent_binding;
2177 struct mlxsw_sp_qevent_block *qevent_block;
2178 struct flow_block_cb *block_cb;
2179 struct mlxsw_sp_qdisc *qdisc;
2180 bool register_block = false;
2181 int tclass_num;
2182 int err;
2183
2184 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2185 if (!block_cb) {
2186 qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
2187 if (!qevent_block)
2188 return -ENOMEM;
2189 block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
2190 mlxsw_sp_qevent_block_release);
2191 if (IS_ERR(block_cb)) {
2192 mlxsw_sp_qevent_block_destroy(qevent_block);
2193 return PTR_ERR(block_cb);
2194 }
2195 register_block = true;
2196 } else {
2197 qevent_block = flow_block_cb_priv(block_cb);
2198 }
2199 flow_block_cb_incref(block_cb);
2200
2201 qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
2202 if (!qdisc) {
2203 NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
2204 err = -ENOENT;
2205 goto err_find_qdisc;
2206 }
2207
2208 if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2209 span_trigger))) {
2210 err = -EEXIST;
2211 goto err_binding_exists;
2212 }
2213
2214 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
2215 qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
2216 f->sch->handle,
2217 tclass_num,
2218 span_trigger,
2219 action_mask);
2220 if (IS_ERR(qevent_binding)) {
2221 err = PTR_ERR(qevent_binding);
2222 goto err_binding_create;
2223 }
2224
2225 err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
2226 f->extack);
2227 if (err)
2228 goto err_binding_configure;
2229
2230 list_add(&qevent_binding->list, &qevent_block->binding_list);
2231
2232 if (register_block) {
2233 flow_block_cb_add(block_cb, f);
2234 list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
2235 }
2236
2237 return 0;
2238
2239 err_binding_configure:
2240 mlxsw_sp_qevent_binding_destroy(qevent_binding);
2241 err_binding_create:
2242 err_binding_exists:
2243 err_find_qdisc:
2244 if (!flow_block_cb_decref(block_cb))
2245 flow_block_cb_free(block_cb);
2246 return err;
2247 }
2248
2249 static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
2250 struct flow_block_offload *f,
2251 enum mlxsw_sp_span_trigger span_trigger)
2252 {
2253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2254 struct mlxsw_sp_qevent_binding *qevent_binding;
2255 struct mlxsw_sp_qevent_block *qevent_block;
2256 struct flow_block_cb *block_cb;
2257
2258 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2259 if (!block_cb)
2260 return;
2261 qevent_block = flow_block_cb_priv(block_cb);
2262
2263 qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2264 span_trigger);
2265 if (!qevent_binding)
2266 return;
2267
2268 list_del(&qevent_binding->list);
2269 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
2270 mlxsw_sp_qevent_binding_destroy(qevent_binding);
2271
2272 if (!flow_block_cb_decref(block_cb)) {
2273 flow_block_cb_remove(block_cb, f);
2274 list_del(&block_cb->driver_list);
2275 }
2276 }
2277
2278 static int
2279 mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
2280 struct flow_block_offload *f,
2281 enum mlxsw_sp_span_trigger span_trigger,
2282 unsigned int action_mask)
2283 {
2284 f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
2285
2286 switch (f->command) {
2287 case FLOW_BLOCK_BIND:
2288 return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
2289 span_trigger,
2290 action_mask);
2291 case FLOW_BLOCK_UNBIND:
2292 mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
2293 return 0;
2294 default:
2295 return -EOPNOTSUPP;
2296 }
2297 }
2298
2299 int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
2300 struct flow_block_offload *f)
2301 {
2302 unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
2303 BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
2304
2305 return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2306 MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
2307 action_mask);
2308 }
2309
2310 int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
2311 struct flow_block_offload *f)
2312 {
2313 unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
2314
2315 return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2316 MLXSW_SP_SPAN_TRIGGER_ECN,
2317 action_mask);
2318 }
2319
2320 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
2321 {
2322 struct mlxsw_sp_qdisc_state *qdisc_state;
2323
2324 qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
2325 if (!qdisc_state)
2326 return -ENOMEM;
2327
2328 mutex_init(&qdisc_state->lock);
2329 mlxsw_sp_port->qdisc = qdisc_state;
2330 return 0;
2331 }
2332
2333 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2334 {
2335 mutex_destroy(&mlxsw_sp_port->qdisc->lock);
2336 kfree(mlxsw_sp_port->qdisc);
2337 }