0001
0002
0003
0004 #include "cxgb4.h"
0005 #include "cxgb4_tc_matchall.h"
0006 #include "sched.h"
0007 #include "cxgb4_uld.h"
0008 #include "cxgb4_filter.h"
0009 #include "cxgb4_tc_flower.h"
0010
0011 static int cxgb4_policer_validate(const struct flow_action *action,
0012 const struct flow_action_entry *act,
0013 struct netlink_ext_ack *extack)
0014 {
0015 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
0016 NL_SET_ERR_MSG_MOD(extack,
0017 "Offload not supported when exceed action is not drop");
0018 return -EOPNOTSUPP;
0019 }
0020
0021 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
0022 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
0023 NL_SET_ERR_MSG_MOD(extack,
0024 "Offload not supported when conform action is not pipe or ok");
0025 return -EOPNOTSUPP;
0026 }
0027
0028 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
0029 !flow_action_is_last_entry(action, act)) {
0030 NL_SET_ERR_MSG_MOD(extack,
0031 "Offload not supported when conform action is ok, but action is not last");
0032 return -EOPNOTSUPP;
0033 }
0034
0035 if (act->police.peakrate_bytes_ps ||
0036 act->police.avrate || act->police.overhead) {
0037 NL_SET_ERR_MSG_MOD(extack,
0038 "Offload not supported when peakrate/avrate/overhead is configured");
0039 return -EOPNOTSUPP;
0040 }
0041
0042 if (act->police.rate_pkt_ps) {
0043 NL_SET_ERR_MSG_MOD(extack,
0044 "QoS offload not support packets per second");
0045 return -EOPNOTSUPP;
0046 }
0047
0048 return 0;
0049 }
0050
0051 static int cxgb4_matchall_egress_validate(struct net_device *dev,
0052 struct tc_cls_matchall_offload *cls)
0053 {
0054 struct netlink_ext_ack *extack = cls->common.extack;
0055 struct flow_action *actions = &cls->rule->action;
0056 struct port_info *pi = netdev2pinfo(dev);
0057 struct flow_action_entry *entry;
0058 struct ch_sched_queue qe;
0059 struct sched_class *e;
0060 u64 max_link_rate;
0061 u32 i, speed;
0062 int ret;
0063
0064 if (!flow_action_has_entries(actions)) {
0065 NL_SET_ERR_MSG_MOD(extack,
0066 "Egress MATCHALL offload needs at least 1 policing action");
0067 return -EINVAL;
0068 } else if (!flow_offload_has_one_action(actions)) {
0069 NL_SET_ERR_MSG_MOD(extack,
0070 "Egress MATCHALL offload only supports 1 policing action");
0071 return -EINVAL;
0072 } else if (pi->tc_block_shared) {
0073 NL_SET_ERR_MSG_MOD(extack,
0074 "Egress MATCHALL offload not supported with shared blocks");
0075 return -EINVAL;
0076 }
0077
0078 ret = t4_get_link_params(pi, NULL, &speed, NULL);
0079 if (ret) {
0080 NL_SET_ERR_MSG_MOD(extack,
0081 "Failed to get max speed supported by the link");
0082 return -EINVAL;
0083 }
0084
0085
0086 max_link_rate = (u64)speed * 1000 * 1000;
0087
0088 flow_action_for_each(i, entry, actions) {
0089 switch (entry->id) {
0090 case FLOW_ACTION_POLICE:
0091 ret = cxgb4_policer_validate(actions, entry, extack);
0092 if (ret)
0093 return ret;
0094
0095
0096 if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
0097 NL_SET_ERR_MSG_MOD(extack,
0098 "Specified policing max rate is larger than underlying link speed");
0099 return -ERANGE;
0100 }
0101 break;
0102 default:
0103 NL_SET_ERR_MSG_MOD(extack,
0104 "Only policing action supported with Egress MATCHALL offload");
0105 return -EOPNOTSUPP;
0106 }
0107 }
0108
0109 for (i = 0; i < pi->nqsets; i++) {
0110 memset(&qe, 0, sizeof(qe));
0111 qe.queue = i;
0112
0113 e = cxgb4_sched_queue_lookup(dev, &qe);
0114 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
0115 NL_SET_ERR_MSG_MOD(extack,
0116 "Some queues are already bound to different class");
0117 return -EBUSY;
0118 }
0119 }
0120
0121 return 0;
0122 }
0123
0124 static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
0125 {
0126 struct port_info *pi = netdev2pinfo(dev);
0127 struct ch_sched_queue qe;
0128 int ret;
0129 u32 i;
0130
0131 for (i = 0; i < pi->nqsets; i++) {
0132 qe.queue = i;
0133 qe.class = tc;
0134 ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
0135 if (ret)
0136 goto out_free;
0137 }
0138
0139 return 0;
0140
0141 out_free:
0142 while (i--) {
0143 qe.queue = i;
0144 qe.class = SCHED_CLS_NONE;
0145 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
0146 }
0147
0148 return ret;
0149 }
0150
0151 static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
0152 {
0153 struct port_info *pi = netdev2pinfo(dev);
0154 struct ch_sched_queue qe;
0155 u32 i;
0156
0157 for (i = 0; i < pi->nqsets; i++) {
0158 qe.queue = i;
0159 qe.class = SCHED_CLS_NONE;
0160 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
0161 }
0162 }
0163
0164 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
0165 struct tc_cls_matchall_offload *cls)
0166 {
0167 struct ch_sched_params p = {
0168 .type = SCHED_CLASS_TYPE_PACKET,
0169 .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
0170 .u.params.mode = SCHED_CLASS_MODE_CLASS,
0171 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
0172 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
0173 .u.params.class = SCHED_CLS_NONE,
0174 .u.params.minrate = 0,
0175 .u.params.weight = 0,
0176 .u.params.pktsize = dev->mtu,
0177 };
0178 struct netlink_ext_ack *extack = cls->common.extack;
0179 struct cxgb4_tc_port_matchall *tc_port_matchall;
0180 struct port_info *pi = netdev2pinfo(dev);
0181 struct adapter *adap = netdev2adap(dev);
0182 struct flow_action_entry *entry;
0183 struct sched_class *e;
0184 int ret;
0185 u32 i;
0186
0187 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0188
0189 flow_action_for_each(i, entry, &cls->rule->action)
0190 if (entry->id == FLOW_ACTION_POLICE)
0191 break;
0192
0193 ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
0194 if (ret)
0195 return ret;
0196
0197
0198 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
0199 p.u.params.channel = pi->tx_chan;
0200 e = cxgb4_sched_class_alloc(dev, &p);
0201 if (!e) {
0202 NL_SET_ERR_MSG_MOD(extack,
0203 "No free traffic class available for policing action");
0204 return -ENOMEM;
0205 }
0206
0207 ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
0208 if (ret) {
0209 NL_SET_ERR_MSG_MOD(extack,
0210 "Could not bind queues to traffic class");
0211 goto out_free;
0212 }
0213
0214 tc_port_matchall->egress.hwtc = e->idx;
0215 tc_port_matchall->egress.cookie = cls->cookie;
0216 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
0217 return 0;
0218
0219 out_free:
0220 cxgb4_sched_class_free(dev, e->idx);
0221 return ret;
0222 }
0223
0224 static void cxgb4_matchall_free_tc(struct net_device *dev)
0225 {
0226 struct cxgb4_tc_port_matchall *tc_port_matchall;
0227 struct port_info *pi = netdev2pinfo(dev);
0228 struct adapter *adap = netdev2adap(dev);
0229
0230 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0231 cxgb4_matchall_tc_unbind_queues(dev);
0232 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
0233
0234 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
0235 tc_port_matchall->egress.cookie = 0;
0236 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
0237 }
0238
0239 static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
0240 struct tc_cls_matchall_offload *cls)
0241 {
0242 struct netlink_ext_ack *extack = cls->common.extack;
0243 struct cxgb4_tc_port_matchall *tc_port_matchall;
0244 struct port_info *pi = netdev2pinfo(dev);
0245 struct adapter *adap = netdev2adap(dev);
0246 struct flow_action_entry *act;
0247 int ret;
0248 u32 i;
0249
0250 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0251 flow_action_for_each(i, act, &cls->rule->action) {
0252 if (act->id == FLOW_ACTION_MIRRED) {
0253 ret = cxgb4_port_mirror_alloc(dev);
0254 if (ret) {
0255 NL_SET_ERR_MSG_MOD(extack,
0256 "Couldn't allocate mirror");
0257 return ret;
0258 }
0259
0260 tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
0261 break;
0262 }
0263 }
0264
0265 return 0;
0266 }
0267
0268 static void cxgb4_matchall_mirror_free(struct net_device *dev)
0269 {
0270 struct cxgb4_tc_port_matchall *tc_port_matchall;
0271 struct port_info *pi = netdev2pinfo(dev);
0272 struct adapter *adap = netdev2adap(dev);
0273
0274 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0275 if (!tc_port_matchall->ingress.viid_mirror)
0276 return;
0277
0278 cxgb4_port_mirror_free(dev);
0279 tc_port_matchall->ingress.viid_mirror = 0;
0280 }
0281
0282 static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
0283 {
0284 struct cxgb4_tc_port_matchall *tc_port_matchall;
0285 struct port_info *pi = netdev2pinfo(dev);
0286 struct adapter *adap = netdev2adap(dev);
0287 int ret;
0288
0289 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0290 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
0291 &tc_port_matchall->ingress.fs[filter_type]);
0292 if (ret)
0293 return ret;
0294
0295 tc_port_matchall->ingress.tid[filter_type] = 0;
0296 return 0;
0297 }
0298
0299 static int cxgb4_matchall_add_filter(struct net_device *dev,
0300 struct tc_cls_matchall_offload *cls,
0301 u8 filter_type)
0302 {
0303 struct netlink_ext_ack *extack = cls->common.extack;
0304 struct cxgb4_tc_port_matchall *tc_port_matchall;
0305 struct port_info *pi = netdev2pinfo(dev);
0306 struct adapter *adap = netdev2adap(dev);
0307 struct ch_filter_specification *fs;
0308 int ret, fidx;
0309
0310
0311
0312
0313
0314 fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
0315 false, cls->common.prio);
0316 if (fidx < 0) {
0317 NL_SET_ERR_MSG_MOD(extack,
0318 "No free LETCAM index available");
0319 return -ENOMEM;
0320 }
0321
0322 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0323 fs = &tc_port_matchall->ingress.fs[filter_type];
0324 memset(fs, 0, sizeof(*fs));
0325
0326 if (fidx < adap->tids.nhpftids)
0327 fs->prio = 1;
0328 fs->tc_prio = cls->common.prio;
0329 fs->tc_cookie = cls->cookie;
0330 fs->type = filter_type;
0331 fs->hitcnts = 1;
0332
0333 fs->val.pfvf_vld = 1;
0334 fs->val.pf = adap->pf;
0335 fs->val.vf = pi->vin;
0336
0337 cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
0338
0339 ret = cxgb4_set_filter(dev, fidx, fs);
0340 if (ret)
0341 return ret;
0342
0343 tc_port_matchall->ingress.tid[filter_type] = fidx;
0344 return 0;
0345 }
0346
0347 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
0348 struct tc_cls_matchall_offload *cls)
0349 {
0350 struct cxgb4_tc_port_matchall *tc_port_matchall;
0351 struct port_info *pi = netdev2pinfo(dev);
0352 struct adapter *adap = netdev2adap(dev);
0353 int ret, i;
0354
0355 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0356
0357 ret = cxgb4_matchall_mirror_alloc(dev, cls);
0358 if (ret)
0359 return ret;
0360
0361 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
0362 ret = cxgb4_matchall_add_filter(dev, cls, i);
0363 if (ret)
0364 goto out_free;
0365 }
0366
0367 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
0368 return 0;
0369
0370 out_free:
0371 while (i-- > 0)
0372 cxgb4_matchall_del_filter(dev, i);
0373
0374 cxgb4_matchall_mirror_free(dev);
0375 return ret;
0376 }
0377
0378 static int cxgb4_matchall_free_filter(struct net_device *dev)
0379 {
0380 struct cxgb4_tc_port_matchall *tc_port_matchall;
0381 struct port_info *pi = netdev2pinfo(dev);
0382 struct adapter *adap = netdev2adap(dev);
0383 int ret;
0384 u8 i;
0385
0386 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0387
0388 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
0389 ret = cxgb4_matchall_del_filter(dev, i);
0390 if (ret)
0391 return ret;
0392 }
0393
0394 cxgb4_matchall_mirror_free(dev);
0395
0396 tc_port_matchall->ingress.packets = 0;
0397 tc_port_matchall->ingress.bytes = 0;
0398 tc_port_matchall->ingress.last_used = 0;
0399 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
0400 return 0;
0401 }
0402
0403 int cxgb4_tc_matchall_replace(struct net_device *dev,
0404 struct tc_cls_matchall_offload *cls_matchall,
0405 bool ingress)
0406 {
0407 struct netlink_ext_ack *extack = cls_matchall->common.extack;
0408 struct cxgb4_tc_port_matchall *tc_port_matchall;
0409 struct port_info *pi = netdev2pinfo(dev);
0410 struct adapter *adap = netdev2adap(dev);
0411 int ret;
0412
0413 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0414 if (ingress) {
0415 if (tc_port_matchall->ingress.state ==
0416 CXGB4_MATCHALL_STATE_ENABLED) {
0417 NL_SET_ERR_MSG_MOD(extack,
0418 "Only 1 Ingress MATCHALL can be offloaded");
0419 return -ENOMEM;
0420 }
0421
0422 ret = cxgb4_validate_flow_actions(dev,
0423 &cls_matchall->rule->action,
0424 extack, 1);
0425 if (ret)
0426 return ret;
0427
0428 return cxgb4_matchall_alloc_filter(dev, cls_matchall);
0429 }
0430
0431 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
0432 NL_SET_ERR_MSG_MOD(extack,
0433 "Only 1 Egress MATCHALL can be offloaded");
0434 return -ENOMEM;
0435 }
0436
0437 ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
0438 if (ret)
0439 return ret;
0440
0441 return cxgb4_matchall_alloc_tc(dev, cls_matchall);
0442 }
0443
0444 int cxgb4_tc_matchall_destroy(struct net_device *dev,
0445 struct tc_cls_matchall_offload *cls_matchall,
0446 bool ingress)
0447 {
0448 struct cxgb4_tc_port_matchall *tc_port_matchall;
0449 struct port_info *pi = netdev2pinfo(dev);
0450 struct adapter *adap = netdev2adap(dev);
0451
0452 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0453 if (ingress) {
0454
0455
0456
0457
0458 if (cls_matchall->cookie !=
0459 tc_port_matchall->ingress.fs[0].tc_cookie)
0460 return -ENOENT;
0461
0462 return cxgb4_matchall_free_filter(dev);
0463 }
0464
0465 if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
0466 return -ENOENT;
0467
0468 cxgb4_matchall_free_tc(dev);
0469 return 0;
0470 }
0471
0472 int cxgb4_tc_matchall_stats(struct net_device *dev,
0473 struct tc_cls_matchall_offload *cls_matchall)
0474 {
0475 u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
0476 struct cxgb4_tc_port_matchall *tc_port_matchall;
0477 struct cxgb4_matchall_ingress_entry *ingress;
0478 struct port_info *pi = netdev2pinfo(dev);
0479 struct adapter *adap = netdev2adap(dev);
0480 int ret;
0481 u8 i;
0482
0483 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0484 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
0485 return -ENOENT;
0486
0487 ingress = &tc_port_matchall->ingress;
0488 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
0489 ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
0490 &tmp_packets, &tmp_bytes,
0491 ingress->fs[i].hash);
0492 if (ret)
0493 return ret;
0494
0495 packets += tmp_packets;
0496 bytes += tmp_bytes;
0497 }
0498
0499 if (tc_port_matchall->ingress.packets != packets) {
0500 flow_stats_update(&cls_matchall->stats,
0501 bytes - tc_port_matchall->ingress.bytes,
0502 packets - tc_port_matchall->ingress.packets,
0503 0, tc_port_matchall->ingress.last_used,
0504 FLOW_ACTION_HW_STATS_IMMEDIATE);
0505
0506 tc_port_matchall->ingress.packets = packets;
0507 tc_port_matchall->ingress.bytes = bytes;
0508 tc_port_matchall->ingress.last_used = jiffies;
0509 }
0510
0511 return 0;
0512 }
0513
0514 static void cxgb4_matchall_disable_offload(struct net_device *dev)
0515 {
0516 struct cxgb4_tc_port_matchall *tc_port_matchall;
0517 struct port_info *pi = netdev2pinfo(dev);
0518 struct adapter *adap = netdev2adap(dev);
0519
0520 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
0521 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
0522 cxgb4_matchall_free_tc(dev);
0523
0524 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
0525 cxgb4_matchall_free_filter(dev);
0526 }
0527
0528 int cxgb4_init_tc_matchall(struct adapter *adap)
0529 {
0530 struct cxgb4_tc_port_matchall *tc_port_matchall;
0531 struct cxgb4_tc_matchall *tc_matchall;
0532 int ret;
0533
0534 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
0535 if (!tc_matchall)
0536 return -ENOMEM;
0537
0538 tc_port_matchall = kcalloc(adap->params.nports,
0539 sizeof(*tc_port_matchall),
0540 GFP_KERNEL);
0541 if (!tc_port_matchall) {
0542 ret = -ENOMEM;
0543 goto out_free_matchall;
0544 }
0545
0546 tc_matchall->port_matchall = tc_port_matchall;
0547 adap->tc_matchall = tc_matchall;
0548 return 0;
0549
0550 out_free_matchall:
0551 kfree(tc_matchall);
0552 return ret;
0553 }
0554
0555 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
0556 {
0557 u8 i;
0558
0559 if (adap->tc_matchall) {
0560 if (adap->tc_matchall->port_matchall) {
0561 for (i = 0; i < adap->params.nports; i++) {
0562 struct net_device *dev = adap->port[i];
0563
0564 if (dev)
0565 cxgb4_matchall_disable_offload(dev);
0566 }
0567 kfree(adap->tc_matchall->port_matchall);
0568 }
0569 kfree(adap->tc_matchall);
0570 }
0571 }