0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/errno.h>
0006 #include <linux/netdevice.h>
0007 #include <net/flow_offload.h>
0008
0009 #include "spectrum.h"
0010 #include "spectrum_span.h"
0011 #include "reg.h"
0012
0013 static struct mlxsw_sp_mall_entry *
0014 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
0015 {
0016 struct mlxsw_sp_mall_entry *mall_entry;
0017
0018 list_for_each_entry(mall_entry, &block->mall.list, list)
0019 if (mall_entry->cookie == cookie)
0020 return mall_entry;
0021
0022 return NULL;
0023 }
0024
0025 static int
0026 mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
0027 struct mlxsw_sp_mall_entry *mall_entry,
0028 struct netlink_ext_ack *extack)
0029 {
0030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0031 struct mlxsw_sp_span_agent_parms agent_parms = {};
0032 struct mlxsw_sp_span_trigger_parms parms;
0033 enum mlxsw_sp_span_trigger trigger;
0034 int err;
0035
0036 if (!mall_entry->mirror.to_dev) {
0037 NL_SET_ERR_MSG(extack, "Could not find requested device");
0038 return -EINVAL;
0039 }
0040
0041 agent_parms.to_dev = mall_entry->mirror.to_dev;
0042 err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
0043 &agent_parms);
0044 if (err) {
0045 NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
0046 return err;
0047 }
0048
0049 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
0050 mall_entry->ingress);
0051 if (err) {
0052 NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
0053 goto err_analyzed_port_get;
0054 }
0055
0056 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
0057 MLXSW_SP_SPAN_TRIGGER_EGRESS;
0058 parms.span_id = mall_entry->mirror.span_id;
0059 parms.probability_rate = 1;
0060 err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
0061 &parms);
0062 if (err) {
0063 NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
0064 goto err_agent_bind;
0065 }
0066
0067 return 0;
0068
0069 err_agent_bind:
0070 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
0071 err_analyzed_port_get:
0072 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
0073 return err;
0074 }
0075
0076 static void
0077 mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
0078 struct mlxsw_sp_mall_entry *mall_entry)
0079 {
0080 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0081 struct mlxsw_sp_span_trigger_parms parms;
0082 enum mlxsw_sp_span_trigger trigger;
0083
0084 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
0085 MLXSW_SP_SPAN_TRIGGER_EGRESS;
0086 parms.span_id = mall_entry->mirror.span_id;
0087 mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
0088 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
0089 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
0090 }
0091
0092 static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
0093 bool enable, u32 rate)
0094 {
0095 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0096 char mpsc_pl[MLXSW_REG_MPSC_LEN];
0097
0098 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
0099 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
0100 }
0101
0102 static int
0103 mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
0104 struct mlxsw_sp_mall_entry *mall_entry,
0105 struct netlink_ext_ack *extack)
0106 {
0107 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0108 struct mlxsw_sp_sample_trigger trigger;
0109 int err;
0110
0111 if (mall_entry->ingress)
0112 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
0113 else
0114 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
0115 trigger.local_port = mlxsw_sp_port->local_port;
0116 err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger,
0117 &mall_entry->sample.params,
0118 extack);
0119 if (err)
0120 return err;
0121
0122 err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port,
0123 mall_entry, extack);
0124 if (err)
0125 goto err_port_sample_set;
0126 return 0;
0127
0128 err_port_sample_set:
0129 mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
0130 return err;
0131 }
0132
0133 static void
0134 mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port,
0135 struct mlxsw_sp_mall_entry *mall_entry)
0136 {
0137 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
0138 struct mlxsw_sp_sample_trigger trigger;
0139
0140 if (mall_entry->ingress)
0141 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
0142 else
0143 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
0144 trigger.local_port = mlxsw_sp_port->local_port;
0145
0146 mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry);
0147 mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
0148 }
0149
0150 static int
0151 mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
0152 struct mlxsw_sp_mall_entry *mall_entry,
0153 struct netlink_ext_ack *extack)
0154 {
0155 switch (mall_entry->type) {
0156 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
0157 return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry,
0158 extack);
0159 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
0160 return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry,
0161 extack);
0162 default:
0163 WARN_ON(1);
0164 return -EINVAL;
0165 }
0166 }
0167
0168 static void
0169 mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
0170 struct mlxsw_sp_mall_entry *mall_entry)
0171 {
0172 switch (mall_entry->type) {
0173 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
0174 mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
0175 break;
0176 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
0177 mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry);
0178 break;
0179 default:
0180 WARN_ON(1);
0181 }
0182 }
0183
0184 static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
0185 {
0186 struct mlxsw_sp_mall_entry *mall_entry;
0187
0188 if (list_empty(&block->mall.list))
0189 return;
0190 block->mall.min_prio = UINT_MAX;
0191 block->mall.max_prio = 0;
0192 list_for_each_entry(mall_entry, &block->mall.list, list) {
0193 if (mall_entry->priority < block->mall.min_prio)
0194 block->mall.min_prio = mall_entry->priority;
0195 if (mall_entry->priority > block->mall.max_prio)
0196 block->mall.max_prio = mall_entry->priority;
0197 }
0198 }
0199
0200 int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
0201 struct mlxsw_sp_flow_block *block,
0202 struct tc_cls_matchall_offload *f)
0203 {
0204 struct mlxsw_sp_flow_block_binding *binding;
0205 struct mlxsw_sp_mall_entry *mall_entry;
0206 __be16 protocol = f->common.protocol;
0207 struct flow_action_entry *act;
0208 unsigned int flower_min_prio;
0209 unsigned int flower_max_prio;
0210 bool flower_prio_valid;
0211 int err;
0212
0213 if (!flow_offload_has_one_action(&f->rule->action)) {
0214 NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
0215 return -EOPNOTSUPP;
0216 }
0217
0218 if (f->common.chain_index) {
0219 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
0220 return -EOPNOTSUPP;
0221 }
0222
0223 if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
0224 NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
0225 return -EOPNOTSUPP;
0226 }
0227
0228 err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
0229 &flower_min_prio, &flower_max_prio);
0230 if (err) {
0231 if (err != -ENOENT) {
0232 NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
0233 return err;
0234 }
0235 flower_prio_valid = false;
0236
0237 } else {
0238 flower_prio_valid = true;
0239 }
0240
0241 if (protocol != htons(ETH_P_ALL)) {
0242 NL_SET_ERR_MSG(f->common.extack, "matchall rules only supported with 'all' protocol");
0243 return -EOPNOTSUPP;
0244 }
0245
0246 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
0247 if (!mall_entry)
0248 return -ENOMEM;
0249 mall_entry->cookie = f->cookie;
0250 mall_entry->priority = f->common.prio;
0251 mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
0252
0253 if (flower_prio_valid && mall_entry->ingress &&
0254 mall_entry->priority >= flower_min_prio) {
0255 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
0256 err = -EOPNOTSUPP;
0257 goto errout;
0258 }
0259 if (flower_prio_valid && !mall_entry->ingress &&
0260 mall_entry->priority <= flower_max_prio) {
0261 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
0262 err = -EOPNOTSUPP;
0263 goto errout;
0264 }
0265
0266 act = &f->rule->action.entries[0];
0267
0268 switch (act->id) {
0269 case FLOW_ACTION_MIRRED:
0270 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
0271 mall_entry->mirror.to_dev = act->dev;
0272 break;
0273 case FLOW_ACTION_SAMPLE:
0274 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
0275 mall_entry->sample.params.psample_group = act->sample.psample_group;
0276 mall_entry->sample.params.truncate = act->sample.truncate;
0277 mall_entry->sample.params.trunc_size = act->sample.trunc_size;
0278 mall_entry->sample.params.rate = act->sample.rate;
0279 break;
0280 default:
0281 err = -EOPNOTSUPP;
0282 goto errout;
0283 }
0284
0285 list_for_each_entry(binding, &block->binding_list, list) {
0286 err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
0287 mall_entry, f->common.extack);
0288 if (err)
0289 goto rollback;
0290 }
0291
0292 block->rule_count++;
0293 if (mall_entry->ingress)
0294 block->egress_blocker_rule_count++;
0295 else
0296 block->ingress_blocker_rule_count++;
0297 list_add_tail(&mall_entry->list, &block->mall.list);
0298 mlxsw_sp_mall_prio_update(block);
0299 return 0;
0300
0301 rollback:
0302 list_for_each_entry_continue_reverse(binding, &block->binding_list,
0303 list)
0304 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
0305 errout:
0306 kfree(mall_entry);
0307 return err;
0308 }
0309
0310 void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
0311 struct tc_cls_matchall_offload *f)
0312 {
0313 struct mlxsw_sp_flow_block_binding *binding;
0314 struct mlxsw_sp_mall_entry *mall_entry;
0315
0316 mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
0317 if (!mall_entry) {
0318 NL_SET_ERR_MSG(f->common.extack, "Entry not found");
0319 return;
0320 }
0321
0322 list_del(&mall_entry->list);
0323 if (mall_entry->ingress)
0324 block->egress_blocker_rule_count--;
0325 else
0326 block->ingress_blocker_rule_count--;
0327 block->rule_count--;
0328 list_for_each_entry(binding, &block->binding_list, list)
0329 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
0330 kfree_rcu(mall_entry, rcu);
0331 mlxsw_sp_mall_prio_update(block);
0332 }
0333
0334 int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
0335 struct mlxsw_sp_port *mlxsw_sp_port,
0336 struct netlink_ext_ack *extack)
0337 {
0338 struct mlxsw_sp_mall_entry *mall_entry;
0339 int err;
0340
0341 list_for_each_entry(mall_entry, &block->mall.list, list) {
0342 err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry,
0343 extack);
0344 if (err)
0345 goto rollback;
0346 }
0347 return 0;
0348
0349 rollback:
0350 list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
0351 list)
0352 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
0353 return err;
0354 }
0355
0356 void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
0357 struct mlxsw_sp_port *mlxsw_sp_port)
0358 {
0359 struct mlxsw_sp_mall_entry *mall_entry;
0360
0361 list_for_each_entry(mall_entry, &block->mall.list, list)
0362 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
0363 }
0364
0365 int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
0366 unsigned int *p_min_prio, unsigned int *p_max_prio)
0367 {
0368 if (chain_index || list_empty(&block->mall.list))
0369
0370
0371
0372
0373 return -ENOENT;
0374 *p_min_prio = block->mall.min_prio;
0375 *p_max_prio = block->mall.max_prio;
0376 return 0;
0377 }
0378
0379 static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
0380 struct mlxsw_sp_port *mlxsw_sp_port,
0381 struct mlxsw_sp_mall_entry *mall_entry,
0382 struct netlink_ext_ack *extack)
0383 {
0384 u32 rate = mall_entry->sample.params.rate;
0385
0386 if (!mall_entry->ingress) {
0387 NL_SET_ERR_MSG(extack, "Sampling is not supported on egress");
0388 return -EOPNOTSUPP;
0389 }
0390
0391 if (rate > MLXSW_REG_MPSC_RATE_MAX) {
0392 NL_SET_ERR_MSG(extack, "Unsupported sampling rate");
0393 return -EOPNOTSUPP;
0394 }
0395
0396 return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate);
0397 }
0398
0399 static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
0400 struct mlxsw_sp_port *mlxsw_sp_port,
0401 struct mlxsw_sp_mall_entry *mall_entry)
0402 {
0403 mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
0404 }
0405
0406 const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = {
0407 .sample_add = mlxsw_sp1_mall_sample_add,
0408 .sample_del = mlxsw_sp1_mall_sample_del,
0409 };
0410
0411 static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
0412 struct mlxsw_sp_port *mlxsw_sp_port,
0413 struct mlxsw_sp_mall_entry *mall_entry,
0414 struct netlink_ext_ack *extack)
0415 {
0416 struct mlxsw_sp_span_trigger_parms trigger_parms = {};
0417 struct mlxsw_sp_span_agent_parms agent_parms = {
0418 .to_dev = NULL,
0419 .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
0420 };
0421 u32 rate = mall_entry->sample.params.rate;
0422 enum mlxsw_sp_span_trigger span_trigger;
0423 int err;
0424
0425 err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id,
0426 &agent_parms);
0427 if (err) {
0428 NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
0429 return err;
0430 }
0431
0432 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
0433 mall_entry->ingress);
0434 if (err) {
0435 NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
0436 goto err_analyzed_port_get;
0437 }
0438
0439 span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
0440 MLXSW_SP_SPAN_TRIGGER_EGRESS;
0441 trigger_parms.span_id = mall_entry->sample.span_id;
0442 trigger_parms.probability_rate = rate;
0443 err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
0444 &trigger_parms);
0445 if (err) {
0446 NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
0447 goto err_agent_bind;
0448 }
0449
0450 return 0;
0451
0452 err_agent_bind:
0453 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
0454 err_analyzed_port_get:
0455 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
0456 return err;
0457 }
0458
0459 static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
0460 struct mlxsw_sp_port *mlxsw_sp_port,
0461 struct mlxsw_sp_mall_entry *mall_entry)
0462 {
0463 struct mlxsw_sp_span_trigger_parms trigger_parms = {};
0464 enum mlxsw_sp_span_trigger span_trigger;
0465
0466 span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
0467 MLXSW_SP_SPAN_TRIGGER_EGRESS;
0468 trigger_parms.span_id = mall_entry->sample.span_id;
0469 mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
0470 &trigger_parms);
0471 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
0472 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
0473 }
0474
0475 const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = {
0476 .sample_add = mlxsw_sp2_mall_sample_add,
0477 .sample_del = mlxsw_sp2_mall_sample_del,
0478 };