Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * DPAA2 Ethernet Switch flower support
0004  *
0005  * Copyright 2021 NXP
0006  *
0007  */
0008 
0009 #include "dpaa2-switch.h"
0010 
0011 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
0012                      struct dpsw_acl_key *acl_key)
0013 {
0014     struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0015     struct flow_dissector *dissector = rule->match.dissector;
0016     struct netlink_ext_ack *extack = cls->common.extack;
0017     struct dpsw_acl_fields *acl_h, *acl_m;
0018 
0019     if (dissector->used_keys &
0020         ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
0021           BIT(FLOW_DISSECTOR_KEY_CONTROL) |
0022           BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
0023           BIT(FLOW_DISSECTOR_KEY_VLAN) |
0024           BIT(FLOW_DISSECTOR_KEY_PORTS) |
0025           BIT(FLOW_DISSECTOR_KEY_IP) |
0026           BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
0027           BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
0028         NL_SET_ERR_MSG_MOD(extack,
0029                    "Unsupported keys used");
0030         return -EOPNOTSUPP;
0031     }
0032 
0033     acl_h = &acl_key->match;
0034     acl_m = &acl_key->mask;
0035 
0036     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
0037         struct flow_match_basic match;
0038 
0039         flow_rule_match_basic(rule, &match);
0040         acl_h->l3_protocol = match.key->ip_proto;
0041         acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
0042         acl_m->l3_protocol = match.mask->ip_proto;
0043         acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
0044     }
0045 
0046     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
0047         struct flow_match_eth_addrs match;
0048 
0049         flow_rule_match_eth_addrs(rule, &match);
0050         ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
0051         ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
0052         ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
0053         ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
0054     }
0055 
0056     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0057         struct flow_match_vlan match;
0058 
0059         flow_rule_match_vlan(rule, &match);
0060         acl_h->l2_vlan_id = match.key->vlan_id;
0061         acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
0062         acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
0063                     match.key->vlan_dei;
0064 
0065         acl_m->l2_vlan_id = match.mask->vlan_id;
0066         acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
0067         acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
0068                     match.mask->vlan_dei;
0069     }
0070 
0071     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
0072         struct flow_match_ipv4_addrs match;
0073 
0074         flow_rule_match_ipv4_addrs(rule, &match);
0075         acl_h->l3_source_ip = be32_to_cpu(match.key->src);
0076         acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
0077         acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
0078         acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
0079     }
0080 
0081     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
0082         struct flow_match_ports match;
0083 
0084         flow_rule_match_ports(rule, &match);
0085         acl_h->l4_source_port = be16_to_cpu(match.key->src);
0086         acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
0087         acl_m->l4_source_port = be16_to_cpu(match.mask->src);
0088         acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
0089     }
0090 
0091     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
0092         struct flow_match_ip match;
0093 
0094         flow_rule_match_ip(rule, &match);
0095         if (match.mask->ttl != 0) {
0096             NL_SET_ERR_MSG_MOD(extack,
0097                        "Matching on TTL not supported");
0098             return -EOPNOTSUPP;
0099         }
0100 
0101         if ((match.mask->tos & 0x3) != 0) {
0102             NL_SET_ERR_MSG_MOD(extack,
0103                        "Matching on ECN not supported, only DSCP");
0104             return -EOPNOTSUPP;
0105         }
0106 
0107         acl_h->l3_dscp = match.key->tos >> 2;
0108         acl_m->l3_dscp = match.mask->tos >> 2;
0109     }
0110 
0111     return 0;
0112 }
0113 
0114 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
0115                    struct dpaa2_switch_acl_entry *entry)
0116 {
0117     struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
0118     struct ethsw_core *ethsw = filter_block->ethsw;
0119     struct dpsw_acl_key *acl_key = &entry->key;
0120     struct device *dev = ethsw->dev;
0121     u8 *cmd_buff;
0122     int err;
0123 
0124     cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
0125     if (!cmd_buff)
0126         return -ENOMEM;
0127 
0128     dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
0129 
0130     acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
0131                          DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
0132                          DMA_TO_DEVICE);
0133     if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
0134         dev_err(dev, "DMA mapping failed\n");
0135         return -EFAULT;
0136     }
0137 
0138     err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
0139                  filter_block->acl_id, acl_entry_cfg);
0140 
0141     dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
0142              DMA_TO_DEVICE);
0143     if (err) {
0144         dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
0145         return err;
0146     }
0147 
0148     kfree(cmd_buff);
0149 
0150     return 0;
0151 }
0152 
0153 static int
0154 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
0155                   struct dpaa2_switch_acl_entry *entry)
0156 {
0157     struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
0158     struct dpsw_acl_key *acl_key = &entry->key;
0159     struct ethsw_core *ethsw = block->ethsw;
0160     struct device *dev = ethsw->dev;
0161     u8 *cmd_buff;
0162     int err;
0163 
0164     cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
0165     if (!cmd_buff)
0166         return -ENOMEM;
0167 
0168     dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
0169 
0170     acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
0171                          DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
0172                          DMA_TO_DEVICE);
0173     if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
0174         dev_err(dev, "DMA mapping failed\n");
0175         return -EFAULT;
0176     }
0177 
0178     err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
0179                     block->acl_id, acl_entry_cfg);
0180 
0181     dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
0182              DMA_TO_DEVICE);
0183     if (err) {
0184         dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
0185         return err;
0186     }
0187 
0188     kfree(cmd_buff);
0189 
0190     return 0;
0191 }
0192 
0193 static int
0194 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
0195                    struct dpaa2_switch_acl_entry *entry)
0196 {
0197     struct dpaa2_switch_acl_entry *tmp;
0198     struct list_head *pos, *n;
0199     int index = 0;
0200 
0201     if (list_empty(&block->acl_entries)) {
0202         list_add(&entry->list, &block->acl_entries);
0203         return index;
0204     }
0205 
0206     list_for_each_safe(pos, n, &block->acl_entries) {
0207         tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
0208         if (entry->prio < tmp->prio)
0209             break;
0210         index++;
0211     }
0212     list_add(&entry->list, pos->prev);
0213     return index;
0214 }
0215 
0216 static struct dpaa2_switch_acl_entry*
0217 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
0218                     int index)
0219 {
0220     struct dpaa2_switch_acl_entry *tmp;
0221     int i = 0;
0222 
0223     list_for_each_entry(tmp, &block->acl_entries, list) {
0224         if (i == index)
0225             return tmp;
0226         ++i;
0227     }
0228 
0229     return NULL;
0230 }
0231 
0232 static int
0233 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
0234                       struct dpaa2_switch_acl_entry *entry,
0235                       int precedence)
0236 {
0237     int err;
0238 
0239     err = dpaa2_switch_acl_entry_remove(block, entry);
0240     if (err)
0241         return err;
0242 
0243     entry->cfg.precedence = precedence;
0244     return dpaa2_switch_acl_entry_add(block, entry);
0245 }
0246 
0247 static int
0248 dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
0249                    struct dpaa2_switch_acl_entry *entry)
0250 {
0251     struct dpaa2_switch_acl_entry *tmp;
0252     int index, i, precedence, err;
0253 
0254     /* Add the new ACL entry to the linked list and get its index */
0255     index = dpaa2_switch_acl_entry_add_to_list(block, entry);
0256 
0257     /* Move up in priority the ACL entries to make space
0258      * for the new filter.
0259      */
0260     precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
0261     for (i = 0; i < index; i++) {
0262         tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
0263 
0264         err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
0265                                 precedence);
0266         if (err)
0267             return err;
0268 
0269         precedence++;
0270     }
0271 
0272     /* Add the new entry to hardware */
0273     entry->cfg.precedence = precedence;
0274     err = dpaa2_switch_acl_entry_add(block, entry);
0275     block->num_acl_rules++;
0276 
0277     return err;
0278 }
0279 
0280 static struct dpaa2_switch_acl_entry *
0281 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
0282                       unsigned long cookie)
0283 {
0284     struct dpaa2_switch_acl_entry *tmp, *n;
0285 
0286     list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
0287         if (tmp->cookie == cookie)
0288             return tmp;
0289     }
0290     return NULL;
0291 }
0292 
0293 static int
0294 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
0295                  struct dpaa2_switch_acl_entry *entry)
0296 {
0297     struct dpaa2_switch_acl_entry *tmp, *n;
0298     int index = 0;
0299 
0300     list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
0301         if (tmp->cookie == entry->cookie)
0302             return index;
0303         index++;
0304     }
0305     return -ENOENT;
0306 }
0307 
0308 static struct dpaa2_switch_mirror_entry *
0309 dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
0310                      unsigned long cookie)
0311 {
0312     struct dpaa2_switch_mirror_entry *tmp, *n;
0313 
0314     list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
0315         if (tmp->cookie == cookie)
0316             return tmp;
0317     }
0318     return NULL;
0319 }
0320 
0321 static int
0322 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
0323                   struct dpaa2_switch_acl_entry *entry)
0324 {
0325     struct dpaa2_switch_acl_entry *tmp;
0326     int index, i, precedence, err;
0327 
0328     index = dpaa2_switch_acl_entry_get_index(block, entry);
0329 
0330     /* Remove from hardware the ACL entry */
0331     err = dpaa2_switch_acl_entry_remove(block, entry);
0332     if (err)
0333         return err;
0334 
0335     block->num_acl_rules--;
0336 
0337     /* Remove it from the list also */
0338     list_del(&entry->list);
0339 
0340     /* Move down in priority the entries over the deleted one */
0341     precedence = entry->cfg.precedence;
0342     for (i = index - 1; i >= 0; i--) {
0343         tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
0344         err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
0345                                 precedence);
0346         if (err)
0347             return err;
0348 
0349         precedence--;
0350     }
0351 
0352     kfree(entry);
0353 
0354     return 0;
0355 }
0356 
0357 static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
0358                         struct flow_action_entry *cls_act,
0359                         struct dpsw_acl_result *dpsw_act,
0360                         struct netlink_ext_ack *extack)
0361 {
0362     int err = 0;
0363 
0364     switch (cls_act->id) {
0365     case FLOW_ACTION_TRAP:
0366         dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
0367         break;
0368     case FLOW_ACTION_REDIRECT:
0369         if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
0370             NL_SET_ERR_MSG_MOD(extack,
0371                        "Destination not a DPAA2 switch port");
0372             return -EOPNOTSUPP;
0373         }
0374 
0375         dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
0376         dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
0377         break;
0378     case FLOW_ACTION_DROP:
0379         dpsw_act->action = DPSW_ACL_ACTION_DROP;
0380         break;
0381     default:
0382         NL_SET_ERR_MSG_MOD(extack,
0383                    "Action not supported");
0384         err = -EOPNOTSUPP;
0385         goto out;
0386     }
0387 
0388 out:
0389     return err;
0390 }
0391 
0392 static int
0393 dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
0394                   struct dpaa2_switch_mirror_entry *entry,
0395                   u16 to, struct netlink_ext_ack *extack)
0396 {
0397     unsigned long block_ports = block->ports;
0398     struct ethsw_core *ethsw = block->ethsw;
0399     struct ethsw_port_priv *port_priv;
0400     unsigned long ports_added = 0;
0401     u16 vlan = entry->cfg.vlan_id;
0402     bool mirror_port_enabled;
0403     int err, port;
0404 
0405     /* Setup the mirroring port */
0406     mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
0407     if (!mirror_port_enabled) {
0408         err = dpsw_set_reflection_if(ethsw->mc_io, 0,
0409                          ethsw->dpsw_handle, to);
0410         if (err)
0411             return err;
0412         ethsw->mirror_port = to;
0413     }
0414 
0415     /* Setup the same egress mirroring configuration on all the switch
0416      * ports that share the same filter block.
0417      */
0418     for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
0419         port_priv = ethsw->ports[port];
0420 
0421         /* We cannot add a per VLAN mirroring rule if the VLAN in
0422          * question is not installed on the switch port.
0423          */
0424         if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
0425             !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
0426             NL_SET_ERR_MSG(extack,
0427                        "VLAN must be installed on the switch port");
0428             err = -EINVAL;
0429             goto err_remove_filters;
0430         }
0431 
0432         err = dpsw_if_add_reflection(ethsw->mc_io, 0,
0433                          ethsw->dpsw_handle,
0434                          port, &entry->cfg);
0435         if (err)
0436             goto err_remove_filters;
0437 
0438         ports_added |= BIT(port);
0439     }
0440 
0441     list_add(&entry->list, &block->mirror_entries);
0442 
0443     return 0;
0444 
0445 err_remove_filters:
0446     for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
0447         dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
0448                       port, &entry->cfg);
0449     }
0450 
0451     if (!mirror_port_enabled)
0452         ethsw->mirror_port = ethsw->sw_attr.num_ifs;
0453 
0454     return err;
0455 }
0456 
0457 static int
0458 dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
0459                  struct dpaa2_switch_mirror_entry *entry)
0460 {
0461     struct dpsw_reflection_cfg *cfg = &entry->cfg;
0462     unsigned long block_ports = block->ports;
0463     struct ethsw_core *ethsw = block->ethsw;
0464     int port;
0465 
0466     /* Remove this mirroring configuration from all the ports belonging to
0467      * the filter block.
0468      */
0469     for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
0470         dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
0471                       port, cfg);
0472 
0473     /* Also remove it from the list of mirror filters */
0474     list_del(&entry->list);
0475     kfree(entry);
0476 
0477     /* If this was the last mirror filter, then unset the mirror port */
0478     if (list_empty(&block->mirror_entries))
0479         ethsw->mirror_port =  ethsw->sw_attr.num_ifs;
0480 
0481     return 0;
0482 }
0483 
0484 static int
0485 dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
0486                     struct flow_cls_offload *cls)
0487 {
0488     struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0489     struct netlink_ext_ack *extack = cls->common.extack;
0490     struct dpaa2_switch_acl_entry *acl_entry;
0491     struct ethsw_core *ethsw = block->ethsw;
0492     struct flow_action_entry *act;
0493     int err;
0494 
0495     if (dpaa2_switch_acl_tbl_is_full(block)) {
0496         NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
0497         return -ENOMEM;
0498     }
0499 
0500     acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
0501     if (!acl_entry)
0502         return -ENOMEM;
0503 
0504     err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
0505     if (err)
0506         goto free_acl_entry;
0507 
0508     act = &rule->action.entries[0];
0509     err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
0510                            &acl_entry->cfg.result, extack);
0511     if (err)
0512         goto free_acl_entry;
0513 
0514     acl_entry->prio = cls->common.prio;
0515     acl_entry->cookie = cls->cookie;
0516 
0517     err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
0518     if (err)
0519         goto free_acl_entry;
0520 
0521     return 0;
0522 
0523 free_acl_entry:
0524     kfree(acl_entry);
0525 
0526     return err;
0527 }
0528 
0529 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
0530                         u16 *vlan)
0531 {
0532     struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0533     struct flow_dissector *dissector = rule->match.dissector;
0534     struct netlink_ext_ack *extack = cls->common.extack;
0535     int ret = -EOPNOTSUPP;
0536 
0537     if (dissector->used_keys &
0538         ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
0539           BIT(FLOW_DISSECTOR_KEY_CONTROL) |
0540           BIT(FLOW_DISSECTOR_KEY_VLAN))) {
0541         NL_SET_ERR_MSG_MOD(extack,
0542                    "Mirroring is supported only per VLAN");
0543         return -EOPNOTSUPP;
0544     }
0545 
0546     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0547         struct flow_match_vlan match;
0548 
0549         flow_rule_match_vlan(rule, &match);
0550 
0551         if (match.mask->vlan_priority != 0 ||
0552             match.mask->vlan_dei != 0) {
0553             NL_SET_ERR_MSG_MOD(extack,
0554                        "Only matching on VLAN ID supported");
0555             return -EOPNOTSUPP;
0556         }
0557 
0558         if (match.mask->vlan_id != 0xFFF) {
0559             NL_SET_ERR_MSG_MOD(extack,
0560                        "Masked matching not supported");
0561             return -EOPNOTSUPP;
0562         }
0563 
0564         *vlan = (u16)match.key->vlan_id;
0565         ret = 0;
0566     }
0567 
0568     return ret;
0569 }
0570 
0571 static int
0572 dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
0573                        struct flow_cls_offload *cls)
0574 {
0575     struct netlink_ext_ack *extack = cls->common.extack;
0576     struct dpaa2_switch_mirror_entry *mirror_entry;
0577     struct ethsw_core *ethsw = block->ethsw;
0578     struct dpaa2_switch_mirror_entry *tmp;
0579     struct flow_action_entry *cls_act;
0580     struct list_head *pos, *n;
0581     bool mirror_port_enabled;
0582     u16 if_id, vlan;
0583     int err;
0584 
0585     mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
0586     cls_act = &cls->rule->action.entries[0];
0587 
0588     /* Offload rules only when the destination is a DPAA2 switch port */
0589     if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
0590         NL_SET_ERR_MSG_MOD(extack,
0591                    "Destination not a DPAA2 switch port");
0592         return -EOPNOTSUPP;
0593     }
0594     if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
0595 
0596     /* We have a single mirror port but can configure egress mirroring on
0597      * all the other switch ports. We need to allow mirroring rules only
0598      * when the destination port is the same.
0599      */
0600     if (mirror_port_enabled && ethsw->mirror_port != if_id) {
0601         NL_SET_ERR_MSG_MOD(extack,
0602                    "Multiple mirror ports not supported");
0603         return -EBUSY;
0604     }
0605 
0606     /* Parse the key */
0607     err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
0608     if (err)
0609         return err;
0610 
0611     /* Make sure that we don't already have a mirror rule with the same
0612      * configuration.
0613      */
0614     list_for_each_safe(pos, n, &block->mirror_entries) {
0615         tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
0616 
0617         if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
0618             tmp->cfg.vlan_id == vlan) {
0619             NL_SET_ERR_MSG_MOD(extack,
0620                        "VLAN mirror filter already installed");
0621             return -EBUSY;
0622         }
0623     }
0624 
0625     mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
0626     if (!mirror_entry)
0627         return -ENOMEM;
0628 
0629     mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
0630     mirror_entry->cfg.vlan_id = vlan;
0631     mirror_entry->cookie = cls->cookie;
0632 
0633     return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
0634                          extack);
0635 }
0636 
0637 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
0638                     struct flow_cls_offload *cls)
0639 {
0640     struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
0641     struct netlink_ext_ack *extack = cls->common.extack;
0642     struct flow_action_entry *act;
0643 
0644     if (!flow_offload_has_one_action(&rule->action)) {
0645         NL_SET_ERR_MSG(extack, "Only singular actions are supported");
0646         return -EOPNOTSUPP;
0647     }
0648 
0649     act = &rule->action.entries[0];
0650     switch (act->id) {
0651     case FLOW_ACTION_REDIRECT:
0652     case FLOW_ACTION_TRAP:
0653     case FLOW_ACTION_DROP:
0654         return dpaa2_switch_cls_flower_replace_acl(block, cls);
0655     case FLOW_ACTION_MIRRED:
0656         return dpaa2_switch_cls_flower_replace_mirror(block, cls);
0657     default:
0658         NL_SET_ERR_MSG_MOD(extack, "Action not supported");
0659         return -EOPNOTSUPP;
0660     }
0661 }
0662 
0663 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
0664                     struct flow_cls_offload *cls)
0665 {
0666     struct dpaa2_switch_mirror_entry *mirror_entry;
0667     struct dpaa2_switch_acl_entry *acl_entry;
0668 
0669     /* If this filter is a an ACL one, remove it */
0670     acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
0671                                   cls->cookie);
0672     if (acl_entry)
0673         return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
0674 
0675     /* If not, then it has to be a mirror */
0676     mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
0677                                 cls->cookie);
0678     if (mirror_entry)
0679         return dpaa2_switch_block_remove_mirror(block,
0680                             mirror_entry);
0681 
0682     return 0;
0683 }
0684 
0685 static int
0686 dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
0687                       struct tc_cls_matchall_offload *cls)
0688 {
0689     struct netlink_ext_ack *extack = cls->common.extack;
0690     struct ethsw_core *ethsw = block->ethsw;
0691     struct dpaa2_switch_acl_entry *acl_entry;
0692     struct flow_action_entry *act;
0693     int err;
0694 
0695     if (dpaa2_switch_acl_tbl_is_full(block)) {
0696         NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
0697         return -ENOMEM;
0698     }
0699 
0700     acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
0701     if (!acl_entry)
0702         return -ENOMEM;
0703 
0704     act = &cls->rule->action.entries[0];
0705     err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
0706                            &acl_entry->cfg.result, extack);
0707     if (err)
0708         goto free_acl_entry;
0709 
0710     acl_entry->prio = cls->common.prio;
0711     acl_entry->cookie = cls->cookie;
0712 
0713     err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
0714     if (err)
0715         goto free_acl_entry;
0716 
0717     return 0;
0718 
0719 free_acl_entry:
0720     kfree(acl_entry);
0721 
0722     return err;
0723 }
0724 
0725 static int
0726 dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
0727                      struct tc_cls_matchall_offload *cls)
0728 {
0729     struct netlink_ext_ack *extack = cls->common.extack;
0730     struct dpaa2_switch_mirror_entry *mirror_entry;
0731     struct ethsw_core *ethsw = block->ethsw;
0732     struct dpaa2_switch_mirror_entry *tmp;
0733     struct flow_action_entry *cls_act;
0734     struct list_head *pos, *n;
0735     bool mirror_port_enabled;
0736     u16 if_id;
0737 
0738     mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
0739     cls_act = &cls->rule->action.entries[0];
0740 
0741     /* Offload rules only when the destination is a DPAA2 switch port */
0742     if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
0743         NL_SET_ERR_MSG_MOD(extack,
0744                    "Destination not a DPAA2 switch port");
0745         return -EOPNOTSUPP;
0746     }
0747     if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
0748 
0749     /* We have a single mirror port but can configure egress mirroring on
0750      * all the other switch ports. We need to allow mirroring rules only
0751      * when the destination port is the same.
0752      */
0753     if (mirror_port_enabled && ethsw->mirror_port != if_id) {
0754         NL_SET_ERR_MSG_MOD(extack,
0755                    "Multiple mirror ports not supported");
0756         return -EBUSY;
0757     }
0758 
0759     /* Make sure that we don't already have a mirror rule with the same
0760      * configuration. One matchall rule per block is the maximum.
0761      */
0762     list_for_each_safe(pos, n, &block->mirror_entries) {
0763         tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
0764 
0765         if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
0766             NL_SET_ERR_MSG_MOD(extack,
0767                        "Matchall mirror filter already installed");
0768             return -EBUSY;
0769         }
0770     }
0771 
0772     mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
0773     if (!mirror_entry)
0774         return -ENOMEM;
0775 
0776     mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
0777     mirror_entry->cookie = cls->cookie;
0778 
0779     return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
0780                          extack);
0781 }
0782 
0783 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
0784                       struct tc_cls_matchall_offload *cls)
0785 {
0786     struct netlink_ext_ack *extack = cls->common.extack;
0787     struct flow_action_entry *act;
0788 
0789     if (!flow_offload_has_one_action(&cls->rule->action)) {
0790         NL_SET_ERR_MSG(extack, "Only singular actions are supported");
0791         return -EOPNOTSUPP;
0792     }
0793 
0794     act = &cls->rule->action.entries[0];
0795     switch (act->id) {
0796     case FLOW_ACTION_REDIRECT:
0797     case FLOW_ACTION_TRAP:
0798     case FLOW_ACTION_DROP:
0799         return dpaa2_switch_cls_matchall_replace_acl(block, cls);
0800     case FLOW_ACTION_MIRRED:
0801         return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
0802     default:
0803         NL_SET_ERR_MSG_MOD(extack, "Action not supported");
0804         return -EOPNOTSUPP;
0805     }
0806 }
0807 
0808 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
0809                       struct ethsw_port_priv *port_priv)
0810 {
0811     struct ethsw_core *ethsw = port_priv->ethsw_data;
0812     struct dpaa2_switch_mirror_entry *tmp;
0813     int err;
0814 
0815     list_for_each_entry(tmp, &block->mirror_entries, list) {
0816         err = dpsw_if_add_reflection(ethsw->mc_io, 0,
0817                          ethsw->dpsw_handle,
0818                          port_priv->idx, &tmp->cfg);
0819         if (err)
0820             goto unwind_add;
0821     }
0822 
0823     return 0;
0824 
0825 unwind_add:
0826     list_for_each_entry(tmp, &block->mirror_entries, list)
0827         dpsw_if_remove_reflection(ethsw->mc_io, 0,
0828                       ethsw->dpsw_handle,
0829                       port_priv->idx, &tmp->cfg);
0830 
0831     return err;
0832 }
0833 
0834 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
0835                     struct ethsw_port_priv *port_priv)
0836 {
0837     struct ethsw_core *ethsw = port_priv->ethsw_data;
0838     struct dpaa2_switch_mirror_entry *tmp;
0839     int err;
0840 
0841     list_for_each_entry(tmp, &block->mirror_entries, list) {
0842         err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
0843                         ethsw->dpsw_handle,
0844                         port_priv->idx, &tmp->cfg);
0845         if (err)
0846             goto unwind_remove;
0847     }
0848 
0849     return 0;
0850 
0851 unwind_remove:
0852     list_for_each_entry(tmp, &block->mirror_entries, list)
0853         dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
0854                        port_priv->idx, &tmp->cfg);
0855 
0856     return err;
0857 }
0858 
0859 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
0860                       struct tc_cls_matchall_offload *cls)
0861 {
0862     struct dpaa2_switch_mirror_entry *mirror_entry;
0863     struct dpaa2_switch_acl_entry *acl_entry;
0864 
0865     /* If this filter is a an ACL one, remove it */
0866     acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
0867                                   cls->cookie);
0868     if (acl_entry)
0869         return dpaa2_switch_acl_tbl_remove_entry(block,
0870                              acl_entry);
0871 
0872     /* If not, then it has to be a mirror */
0873     mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
0874                                 cls->cookie);
0875     if (mirror_entry)
0876         return dpaa2_switch_block_remove_mirror(block,
0877                             mirror_entry);
0878 
0879     return 0;
0880 }