0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/types.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/hashtable.h>
0013 #include <linux/crc32.h>
0014 #include <linux/netdevice.h>
0015 #include <linux/inetdevice.h>
0016 #include <linux/if_vlan.h>
0017 #include <linux/if_bridge.h>
0018 #include <net/neighbour.h>
0019 #include <net/switchdev.h>
0020 #include <net/ip_fib.h>
0021 #include <net/nexthop.h>
0022 #include <net/arp.h>
0023
0024 #include "rocker.h"
0025 #include "rocker_tlv.h"
0026
0027 struct ofdpa_flow_tbl_key {
0028 u32 priority;
0029 enum rocker_of_dpa_table_id tbl_id;
0030 union {
0031 struct {
0032 u32 in_pport;
0033 u32 in_pport_mask;
0034 enum rocker_of_dpa_table_id goto_tbl;
0035 } ig_port;
0036 struct {
0037 u32 in_pport;
0038 __be16 vlan_id;
0039 __be16 vlan_id_mask;
0040 enum rocker_of_dpa_table_id goto_tbl;
0041 bool untagged;
0042 __be16 new_vlan_id;
0043 } vlan;
0044 struct {
0045 u32 in_pport;
0046 u32 in_pport_mask;
0047 __be16 eth_type;
0048 u8 eth_dst[ETH_ALEN];
0049 u8 eth_dst_mask[ETH_ALEN];
0050 __be16 vlan_id;
0051 __be16 vlan_id_mask;
0052 enum rocker_of_dpa_table_id goto_tbl;
0053 bool copy_to_cpu;
0054 } term_mac;
0055 struct {
0056 __be16 eth_type;
0057 __be32 dst4;
0058 __be32 dst4_mask;
0059 enum rocker_of_dpa_table_id goto_tbl;
0060 u32 group_id;
0061 } ucast_routing;
0062 struct {
0063 u8 eth_dst[ETH_ALEN];
0064 u8 eth_dst_mask[ETH_ALEN];
0065 int has_eth_dst;
0066 int has_eth_dst_mask;
0067 __be16 vlan_id;
0068 u32 tunnel_id;
0069 enum rocker_of_dpa_table_id goto_tbl;
0070 u32 group_id;
0071 bool copy_to_cpu;
0072 } bridge;
0073 struct {
0074 u32 in_pport;
0075 u32 in_pport_mask;
0076 u8 eth_src[ETH_ALEN];
0077 u8 eth_src_mask[ETH_ALEN];
0078 u8 eth_dst[ETH_ALEN];
0079 u8 eth_dst_mask[ETH_ALEN];
0080 __be16 eth_type;
0081 __be16 vlan_id;
0082 __be16 vlan_id_mask;
0083 u8 ip_proto;
0084 u8 ip_proto_mask;
0085 u8 ip_tos;
0086 u8 ip_tos_mask;
0087 u32 group_id;
0088 } acl;
0089 };
0090 };
0091
0092 struct ofdpa_flow_tbl_entry {
0093 struct hlist_node entry;
0094 u32 cmd;
0095 u64 cookie;
0096 struct ofdpa_flow_tbl_key key;
0097 size_t key_len;
0098 u32 key_crc32;
0099 struct fib_info *fi;
0100 };
0101
0102 struct ofdpa_group_tbl_entry {
0103 struct hlist_node entry;
0104 u32 cmd;
0105 u32 group_id;
0106 u16 group_count;
0107 u32 *group_ids;
0108 union {
0109 struct {
0110 u8 pop_vlan;
0111 } l2_interface;
0112 struct {
0113 u8 eth_src[ETH_ALEN];
0114 u8 eth_dst[ETH_ALEN];
0115 __be16 vlan_id;
0116 u32 group_id;
0117 } l2_rewrite;
0118 struct {
0119 u8 eth_src[ETH_ALEN];
0120 u8 eth_dst[ETH_ALEN];
0121 __be16 vlan_id;
0122 bool ttl_check;
0123 u32 group_id;
0124 } l3_unicast;
0125 };
0126 };
0127
0128 struct ofdpa_fdb_tbl_entry {
0129 struct hlist_node entry;
0130 u32 key_crc32;
0131 bool learned;
0132 unsigned long touched;
0133 struct ofdpa_fdb_tbl_key {
0134 struct ofdpa_port *ofdpa_port;
0135 u8 addr[ETH_ALEN];
0136 __be16 vlan_id;
0137 } key;
0138 };
0139
0140 struct ofdpa_internal_vlan_tbl_entry {
0141 struct hlist_node entry;
0142 int ifindex;
0143 u32 ref_count;
0144 __be16 vlan_id;
0145 };
0146
0147 struct ofdpa_neigh_tbl_entry {
0148 struct hlist_node entry;
0149 __be32 ip_addr;
0150 struct net_device *dev;
0151 u32 ref_count;
0152 u32 index;
0153 u8 eth_dst[ETH_ALEN];
0154 bool ttl_check;
0155 };
0156
0157 enum {
0158 OFDPA_CTRL_LINK_LOCAL_MCAST,
0159 OFDPA_CTRL_LOCAL_ARP,
0160 OFDPA_CTRL_IPV4_MCAST,
0161 OFDPA_CTRL_IPV6_MCAST,
0162 OFDPA_CTRL_DFLT_BRIDGING,
0163 OFDPA_CTRL_DFLT_OVS,
0164 OFDPA_CTRL_MAX,
0165 };
0166
0167 #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
0168 #define OFDPA_N_INTERNAL_VLANS 255
0169 #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
0170 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
0171 #define OFDPA_UNTAGGED_VID 0
0172
0173 struct ofdpa {
0174 struct rocker *rocker;
0175 DECLARE_HASHTABLE(flow_tbl, 16);
0176 spinlock_t flow_tbl_lock;
0177 u64 flow_tbl_next_cookie;
0178 DECLARE_HASHTABLE(group_tbl, 16);
0179 spinlock_t group_tbl_lock;
0180 struct timer_list fdb_cleanup_timer;
0181 DECLARE_HASHTABLE(fdb_tbl, 16);
0182 spinlock_t fdb_tbl_lock;
0183 unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
0184 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
0185 spinlock_t internal_vlan_tbl_lock;
0186 DECLARE_HASHTABLE(neigh_tbl, 16);
0187 spinlock_t neigh_tbl_lock;
0188 u32 neigh_tbl_next_index;
0189 unsigned long ageing_time;
0190 bool fib_aborted;
0191 };
0192
0193 struct ofdpa_port {
0194 struct ofdpa *ofdpa;
0195 struct rocker_port *rocker_port;
0196 struct net_device *dev;
0197 u32 pport;
0198 struct net_device *bridge_dev;
0199 __be16 internal_vlan_id;
0200 int stp_state;
0201 u32 brport_flags;
0202 unsigned long ageing_time;
0203 bool ctrls[OFDPA_CTRL_MAX];
0204 unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
0205 };
0206
0207 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
0208 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
0209 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
0210 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
0211 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
0212 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
0213 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
0214 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
0215 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
0216
0217
0218
0219
0220
0221 enum {
0222 OFDPA_PRIORITY_UNKNOWN = 0,
0223 OFDPA_PRIORITY_IG_PORT = 1,
0224 OFDPA_PRIORITY_VLAN = 1,
0225 OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
0226 OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
0227 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
0228 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
0229 OFDPA_PRIORITY_BRIDGING_VLAN = 3,
0230 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
0231 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
0232 OFDPA_PRIORITY_BRIDGING_TENANT = 3,
0233 OFDPA_PRIORITY_ACL_CTRL = 3,
0234 OFDPA_PRIORITY_ACL_NORMAL = 2,
0235 OFDPA_PRIORITY_ACL_DFLT = 1,
0236 };
0237
0238 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
0239 {
0240 u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
0241 u16 end = 0xffe;
0242 u16 _vlan_id = ntohs(vlan_id);
0243
0244 return (_vlan_id >= start && _vlan_id <= end);
0245 }
0246
0247 static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
0248 u16 vid, bool *pop_vlan)
0249 {
0250 __be16 vlan_id;
0251
0252 if (pop_vlan)
0253 *pop_vlan = false;
0254 vlan_id = htons(vid);
0255 if (!vlan_id) {
0256 vlan_id = ofdpa_port->internal_vlan_id;
0257 if (pop_vlan)
0258 *pop_vlan = true;
0259 }
0260
0261 return vlan_id;
0262 }
0263
0264 static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
0265 __be16 vlan_id)
0266 {
0267 if (ofdpa_vlan_id_is_internal(vlan_id))
0268 return 0;
0269
0270 return ntohs(vlan_id);
0271 }
0272
0273 static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
0274 const char *kind)
0275 {
0276 return ofdpa_port->bridge_dev &&
0277 !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
0278 }
0279
0280 static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
0281 {
0282 return ofdpa_port_is_slave(ofdpa_port, "bridge");
0283 }
0284
0285 static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
0286 {
0287 return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
0288 }
0289
0290 #define OFDPA_OP_FLAG_REMOVE BIT(0)
0291 #define OFDPA_OP_FLAG_NOWAIT BIT(1)
0292 #define OFDPA_OP_FLAG_LEARNED BIT(2)
0293 #define OFDPA_OP_FLAG_REFRESH BIT(3)
0294
0295 static bool ofdpa_flags_nowait(int flags)
0296 {
0297 return flags & OFDPA_OP_FLAG_NOWAIT;
0298 }
0299
0300
0301
0302
0303
0304 static int
0305 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
0306 const struct ofdpa_flow_tbl_entry *entry)
0307 {
0308 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
0309 entry->key.ig_port.in_pport))
0310 return -EMSGSIZE;
0311 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
0312 entry->key.ig_port.in_pport_mask))
0313 return -EMSGSIZE;
0314 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
0315 entry->key.ig_port.goto_tbl))
0316 return -EMSGSIZE;
0317
0318 return 0;
0319 }
0320
0321 static int
0322 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
0323 const struct ofdpa_flow_tbl_entry *entry)
0324 {
0325 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
0326 entry->key.vlan.in_pport))
0327 return -EMSGSIZE;
0328 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
0329 entry->key.vlan.vlan_id))
0330 return -EMSGSIZE;
0331 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
0332 entry->key.vlan.vlan_id_mask))
0333 return -EMSGSIZE;
0334 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
0335 entry->key.vlan.goto_tbl))
0336 return -EMSGSIZE;
0337 if (entry->key.vlan.untagged &&
0338 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
0339 entry->key.vlan.new_vlan_id))
0340 return -EMSGSIZE;
0341
0342 return 0;
0343 }
0344
0345 static int
0346 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
0347 const struct ofdpa_flow_tbl_entry *entry)
0348 {
0349 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
0350 entry->key.term_mac.in_pport))
0351 return -EMSGSIZE;
0352 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
0353 entry->key.term_mac.in_pport_mask))
0354 return -EMSGSIZE;
0355 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
0356 entry->key.term_mac.eth_type))
0357 return -EMSGSIZE;
0358 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
0359 ETH_ALEN, entry->key.term_mac.eth_dst))
0360 return -EMSGSIZE;
0361 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
0362 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
0363 return -EMSGSIZE;
0364 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
0365 entry->key.term_mac.vlan_id))
0366 return -EMSGSIZE;
0367 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
0368 entry->key.term_mac.vlan_id_mask))
0369 return -EMSGSIZE;
0370 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
0371 entry->key.term_mac.goto_tbl))
0372 return -EMSGSIZE;
0373 if (entry->key.term_mac.copy_to_cpu &&
0374 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
0375 entry->key.term_mac.copy_to_cpu))
0376 return -EMSGSIZE;
0377
0378 return 0;
0379 }
0380
0381 static int
0382 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
0383 const struct ofdpa_flow_tbl_entry *entry)
0384 {
0385 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
0386 entry->key.ucast_routing.eth_type))
0387 return -EMSGSIZE;
0388 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
0389 entry->key.ucast_routing.dst4))
0390 return -EMSGSIZE;
0391 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
0392 entry->key.ucast_routing.dst4_mask))
0393 return -EMSGSIZE;
0394 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
0395 entry->key.ucast_routing.goto_tbl))
0396 return -EMSGSIZE;
0397 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
0398 entry->key.ucast_routing.group_id))
0399 return -EMSGSIZE;
0400
0401 return 0;
0402 }
0403
0404 static int
0405 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
0406 const struct ofdpa_flow_tbl_entry *entry)
0407 {
0408 if (entry->key.bridge.has_eth_dst &&
0409 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
0410 ETH_ALEN, entry->key.bridge.eth_dst))
0411 return -EMSGSIZE;
0412 if (entry->key.bridge.has_eth_dst_mask &&
0413 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
0414 ETH_ALEN, entry->key.bridge.eth_dst_mask))
0415 return -EMSGSIZE;
0416 if (entry->key.bridge.vlan_id &&
0417 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
0418 entry->key.bridge.vlan_id))
0419 return -EMSGSIZE;
0420 if (entry->key.bridge.tunnel_id &&
0421 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
0422 entry->key.bridge.tunnel_id))
0423 return -EMSGSIZE;
0424 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
0425 entry->key.bridge.goto_tbl))
0426 return -EMSGSIZE;
0427 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
0428 entry->key.bridge.group_id))
0429 return -EMSGSIZE;
0430 if (entry->key.bridge.copy_to_cpu &&
0431 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
0432 entry->key.bridge.copy_to_cpu))
0433 return -EMSGSIZE;
0434
0435 return 0;
0436 }
0437
0438 static int
0439 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
0440 const struct ofdpa_flow_tbl_entry *entry)
0441 {
0442 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
0443 entry->key.acl.in_pport))
0444 return -EMSGSIZE;
0445 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
0446 entry->key.acl.in_pport_mask))
0447 return -EMSGSIZE;
0448 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
0449 ETH_ALEN, entry->key.acl.eth_src))
0450 return -EMSGSIZE;
0451 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
0452 ETH_ALEN, entry->key.acl.eth_src_mask))
0453 return -EMSGSIZE;
0454 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
0455 ETH_ALEN, entry->key.acl.eth_dst))
0456 return -EMSGSIZE;
0457 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
0458 ETH_ALEN, entry->key.acl.eth_dst_mask))
0459 return -EMSGSIZE;
0460 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
0461 entry->key.acl.eth_type))
0462 return -EMSGSIZE;
0463 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
0464 entry->key.acl.vlan_id))
0465 return -EMSGSIZE;
0466 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
0467 entry->key.acl.vlan_id_mask))
0468 return -EMSGSIZE;
0469
0470 switch (ntohs(entry->key.acl.eth_type)) {
0471 case ETH_P_IP:
0472 case ETH_P_IPV6:
0473 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
0474 entry->key.acl.ip_proto))
0475 return -EMSGSIZE;
0476 if (rocker_tlv_put_u8(desc_info,
0477 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
0478 entry->key.acl.ip_proto_mask))
0479 return -EMSGSIZE;
0480 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
0481 entry->key.acl.ip_tos & 0x3f))
0482 return -EMSGSIZE;
0483 if (rocker_tlv_put_u8(desc_info,
0484 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
0485 entry->key.acl.ip_tos_mask & 0x3f))
0486 return -EMSGSIZE;
0487 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
0488 (entry->key.acl.ip_tos & 0xc0) >> 6))
0489 return -EMSGSIZE;
0490 if (rocker_tlv_put_u8(desc_info,
0491 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
0492 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
0493 return -EMSGSIZE;
0494 break;
0495 }
0496
0497 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
0498 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
0499 entry->key.acl.group_id))
0500 return -EMSGSIZE;
0501
0502 return 0;
0503 }
0504
0505 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
0506 struct rocker_desc_info *desc_info,
0507 void *priv)
0508 {
0509 const struct ofdpa_flow_tbl_entry *entry = priv;
0510 struct rocker_tlv *cmd_info;
0511 int err = 0;
0512
0513 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
0514 return -EMSGSIZE;
0515 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
0516 if (!cmd_info)
0517 return -EMSGSIZE;
0518 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
0519 entry->key.tbl_id))
0520 return -EMSGSIZE;
0521 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
0522 entry->key.priority))
0523 return -EMSGSIZE;
0524 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
0525 return -EMSGSIZE;
0526 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
0527 entry->cookie))
0528 return -EMSGSIZE;
0529
0530 switch (entry->key.tbl_id) {
0531 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
0532 err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
0533 break;
0534 case ROCKER_OF_DPA_TABLE_ID_VLAN:
0535 err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
0536 break;
0537 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
0538 err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
0539 break;
0540 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
0541 err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
0542 break;
0543 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
0544 err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
0545 break;
0546 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
0547 err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
0548 break;
0549 default:
0550 err = -ENOTSUPP;
0551 break;
0552 }
0553
0554 if (err)
0555 return err;
0556
0557 rocker_tlv_nest_end(desc_info, cmd_info);
0558
0559 return 0;
0560 }
0561
0562 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
0563 struct rocker_desc_info *desc_info,
0564 void *priv)
0565 {
0566 const struct ofdpa_flow_tbl_entry *entry = priv;
0567 struct rocker_tlv *cmd_info;
0568
0569 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
0570 return -EMSGSIZE;
0571 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
0572 if (!cmd_info)
0573 return -EMSGSIZE;
0574 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
0575 entry->cookie))
0576 return -EMSGSIZE;
0577 rocker_tlv_nest_end(desc_info, cmd_info);
0578
0579 return 0;
0580 }
0581
0582 static int
0583 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
0584 struct ofdpa_group_tbl_entry *entry)
0585 {
0586 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
0587 ROCKER_GROUP_PORT_GET(entry->group_id)))
0588 return -EMSGSIZE;
0589 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
0590 entry->l2_interface.pop_vlan))
0591 return -EMSGSIZE;
0592
0593 return 0;
0594 }
0595
0596 static int
0597 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
0598 const struct ofdpa_group_tbl_entry *entry)
0599 {
0600 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
0601 entry->l2_rewrite.group_id))
0602 return -EMSGSIZE;
0603 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
0604 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
0605 ETH_ALEN, entry->l2_rewrite.eth_src))
0606 return -EMSGSIZE;
0607 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
0608 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
0609 ETH_ALEN, entry->l2_rewrite.eth_dst))
0610 return -EMSGSIZE;
0611 if (entry->l2_rewrite.vlan_id &&
0612 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
0613 entry->l2_rewrite.vlan_id))
0614 return -EMSGSIZE;
0615
0616 return 0;
0617 }
0618
0619 static int
0620 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
0621 const struct ofdpa_group_tbl_entry *entry)
0622 {
0623 int i;
0624 struct rocker_tlv *group_ids;
0625
0626 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
0627 entry->group_count))
0628 return -EMSGSIZE;
0629
0630 group_ids = rocker_tlv_nest_start(desc_info,
0631 ROCKER_TLV_OF_DPA_GROUP_IDS);
0632 if (!group_ids)
0633 return -EMSGSIZE;
0634
0635 for (i = 0; i < entry->group_count; i++)
0636
0637 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
0638 return -EMSGSIZE;
0639
0640 rocker_tlv_nest_end(desc_info, group_ids);
0641
0642 return 0;
0643 }
0644
0645 static int
0646 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
0647 const struct ofdpa_group_tbl_entry *entry)
0648 {
0649 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
0650 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
0651 ETH_ALEN, entry->l3_unicast.eth_src))
0652 return -EMSGSIZE;
0653 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
0654 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
0655 ETH_ALEN, entry->l3_unicast.eth_dst))
0656 return -EMSGSIZE;
0657 if (entry->l3_unicast.vlan_id &&
0658 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
0659 entry->l3_unicast.vlan_id))
0660 return -EMSGSIZE;
0661 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
0662 entry->l3_unicast.ttl_check))
0663 return -EMSGSIZE;
0664 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
0665 entry->l3_unicast.group_id))
0666 return -EMSGSIZE;
0667
0668 return 0;
0669 }
0670
0671 static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
0672 struct rocker_desc_info *desc_info,
0673 void *priv)
0674 {
0675 struct ofdpa_group_tbl_entry *entry = priv;
0676 struct rocker_tlv *cmd_info;
0677 int err = 0;
0678
0679 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
0680 return -EMSGSIZE;
0681 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
0682 if (!cmd_info)
0683 return -EMSGSIZE;
0684
0685 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
0686 entry->group_id))
0687 return -EMSGSIZE;
0688
0689 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
0690 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
0691 err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
0692 break;
0693 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
0694 err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
0695 break;
0696 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
0697 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
0698 err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
0699 break;
0700 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
0701 err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
0702 break;
0703 default:
0704 err = -ENOTSUPP;
0705 break;
0706 }
0707
0708 if (err)
0709 return err;
0710
0711 rocker_tlv_nest_end(desc_info, cmd_info);
0712
0713 return 0;
0714 }
0715
0716 static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
0717 struct rocker_desc_info *desc_info,
0718 void *priv)
0719 {
0720 const struct ofdpa_group_tbl_entry *entry = priv;
0721 struct rocker_tlv *cmd_info;
0722
0723 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
0724 return -EMSGSIZE;
0725 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
0726 if (!cmd_info)
0727 return -EMSGSIZE;
0728 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
0729 entry->group_id))
0730 return -EMSGSIZE;
0731 rocker_tlv_nest_end(desc_info, cmd_info);
0732
0733 return 0;
0734 }
0735
0736
0737
0738
0739
0740 static struct ofdpa_flow_tbl_entry *
0741 ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
0742 const struct ofdpa_flow_tbl_entry *match)
0743 {
0744 struct ofdpa_flow_tbl_entry *found;
0745 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
0746
0747 hash_for_each_possible(ofdpa->flow_tbl, found,
0748 entry, match->key_crc32) {
0749 if (memcmp(&found->key, &match->key, key_len) == 0)
0750 return found;
0751 }
0752
0753 return NULL;
0754 }
0755
0756 static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
0757 int flags, struct ofdpa_flow_tbl_entry *match)
0758 {
0759 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
0760 struct ofdpa_flow_tbl_entry *found;
0761 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
0762 unsigned long lock_flags;
0763
0764 match->key_crc32 = crc32(~0, &match->key, key_len);
0765
0766 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
0767
0768 found = ofdpa_flow_tbl_find(ofdpa, match);
0769
0770 if (found) {
0771 match->cookie = found->cookie;
0772 hash_del(&found->entry);
0773 kfree(found);
0774 found = match;
0775 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
0776 } else {
0777 found = match;
0778 found->cookie = ofdpa->flow_tbl_next_cookie++;
0779 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
0780 }
0781
0782 hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
0783 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
0784
0785 return rocker_cmd_exec(ofdpa_port->rocker_port,
0786 ofdpa_flags_nowait(flags),
0787 ofdpa_cmd_flow_tbl_add,
0788 found, NULL, NULL);
0789 }
0790
0791 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
0792 int flags, struct ofdpa_flow_tbl_entry *match)
0793 {
0794 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
0795 struct ofdpa_flow_tbl_entry *found;
0796 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
0797 unsigned long lock_flags;
0798 int err = 0;
0799
0800 match->key_crc32 = crc32(~0, &match->key, key_len);
0801
0802 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
0803
0804 found = ofdpa_flow_tbl_find(ofdpa, match);
0805
0806 if (found) {
0807 hash_del(&found->entry);
0808 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
0809 }
0810
0811 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
0812
0813 kfree(match);
0814
0815 if (found) {
0816 err = rocker_cmd_exec(ofdpa_port->rocker_port,
0817 ofdpa_flags_nowait(flags),
0818 ofdpa_cmd_flow_tbl_del,
0819 found, NULL, NULL);
0820 kfree(found);
0821 }
0822
0823 return err;
0824 }
0825
0826 static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
0827 struct ofdpa_flow_tbl_entry *entry)
0828 {
0829 if (flags & OFDPA_OP_FLAG_REMOVE)
0830 return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
0831 else
0832 return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
0833 }
0834
0835 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
0836 u32 in_pport, u32 in_pport_mask,
0837 enum rocker_of_dpa_table_id goto_tbl)
0838 {
0839 struct ofdpa_flow_tbl_entry *entry;
0840
0841 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0842 if (!entry)
0843 return -ENOMEM;
0844
0845 entry->key.priority = OFDPA_PRIORITY_IG_PORT;
0846 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
0847 entry->key.ig_port.in_pport = in_pport;
0848 entry->key.ig_port.in_pport_mask = in_pport_mask;
0849 entry->key.ig_port.goto_tbl = goto_tbl;
0850
0851 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
0852 }
0853
0854 static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
0855 int flags,
0856 u32 in_pport, __be16 vlan_id,
0857 __be16 vlan_id_mask,
0858 enum rocker_of_dpa_table_id goto_tbl,
0859 bool untagged, __be16 new_vlan_id)
0860 {
0861 struct ofdpa_flow_tbl_entry *entry;
0862
0863 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0864 if (!entry)
0865 return -ENOMEM;
0866
0867 entry->key.priority = OFDPA_PRIORITY_VLAN;
0868 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
0869 entry->key.vlan.in_pport = in_pport;
0870 entry->key.vlan.vlan_id = vlan_id;
0871 entry->key.vlan.vlan_id_mask = vlan_id_mask;
0872 entry->key.vlan.goto_tbl = goto_tbl;
0873
0874 entry->key.vlan.untagged = untagged;
0875 entry->key.vlan.new_vlan_id = new_vlan_id;
0876
0877 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
0878 }
0879
0880 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
0881 u32 in_pport, u32 in_pport_mask,
0882 __be16 eth_type, const u8 *eth_dst,
0883 const u8 *eth_dst_mask, __be16 vlan_id,
0884 __be16 vlan_id_mask, bool copy_to_cpu,
0885 int flags)
0886 {
0887 struct ofdpa_flow_tbl_entry *entry;
0888
0889 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0890 if (!entry)
0891 return -ENOMEM;
0892
0893 if (is_multicast_ether_addr(eth_dst)) {
0894 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
0895 entry->key.term_mac.goto_tbl =
0896 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
0897 } else {
0898 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
0899 entry->key.term_mac.goto_tbl =
0900 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
0901 }
0902
0903 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
0904 entry->key.term_mac.in_pport = in_pport;
0905 entry->key.term_mac.in_pport_mask = in_pport_mask;
0906 entry->key.term_mac.eth_type = eth_type;
0907 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
0908 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
0909 entry->key.term_mac.vlan_id = vlan_id;
0910 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
0911 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
0912
0913 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
0914 }
0915
0916 static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
0917 int flags, const u8 *eth_dst,
0918 const u8 *eth_dst_mask, __be16 vlan_id,
0919 u32 tunnel_id,
0920 enum rocker_of_dpa_table_id goto_tbl,
0921 u32 group_id, bool copy_to_cpu)
0922 {
0923 struct ofdpa_flow_tbl_entry *entry;
0924 u32 priority;
0925 bool vlan_bridging = !!vlan_id;
0926 bool dflt = !eth_dst || eth_dst_mask;
0927 bool wild = false;
0928
0929 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
0930 if (!entry)
0931 return -ENOMEM;
0932
0933 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
0934
0935 if (eth_dst) {
0936 entry->key.bridge.has_eth_dst = 1;
0937 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
0938 }
0939 if (eth_dst_mask) {
0940 entry->key.bridge.has_eth_dst_mask = 1;
0941 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
0942 if (!ether_addr_equal(eth_dst_mask, ff_mac))
0943 wild = true;
0944 }
0945
0946 priority = OFDPA_PRIORITY_UNKNOWN;
0947 if (vlan_bridging && dflt && wild)
0948 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
0949 else if (vlan_bridging && dflt && !wild)
0950 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
0951 else if (vlan_bridging && !dflt)
0952 priority = OFDPA_PRIORITY_BRIDGING_VLAN;
0953 else if (!vlan_bridging && dflt && wild)
0954 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
0955 else if (!vlan_bridging && dflt && !wild)
0956 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
0957 else if (!vlan_bridging && !dflt)
0958 priority = OFDPA_PRIORITY_BRIDGING_TENANT;
0959
0960 entry->key.priority = priority;
0961 entry->key.bridge.vlan_id = vlan_id;
0962 entry->key.bridge.tunnel_id = tunnel_id;
0963 entry->key.bridge.goto_tbl = goto_tbl;
0964 entry->key.bridge.group_id = group_id;
0965 entry->key.bridge.copy_to_cpu = copy_to_cpu;
0966
0967 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
0968 }
0969
0970 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
0971 __be16 eth_type, __be32 dst,
0972 __be32 dst_mask, u32 priority,
0973 enum rocker_of_dpa_table_id goto_tbl,
0974 u32 group_id, struct fib_info *fi,
0975 int flags)
0976 {
0977 struct ofdpa_flow_tbl_entry *entry;
0978
0979 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0980 if (!entry)
0981 return -ENOMEM;
0982
0983 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
0984 entry->key.priority = priority;
0985 entry->key.ucast_routing.eth_type = eth_type;
0986 entry->key.ucast_routing.dst4 = dst;
0987 entry->key.ucast_routing.dst4_mask = dst_mask;
0988 entry->key.ucast_routing.goto_tbl = goto_tbl;
0989 entry->key.ucast_routing.group_id = group_id;
0990 entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
0991 ucast_routing.group_id);
0992 entry->fi = fi;
0993
0994 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
0995 }
0996
0997 static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
0998 u32 in_pport, u32 in_pport_mask,
0999 const u8 *eth_src, const u8 *eth_src_mask,
1000 const u8 *eth_dst, const u8 *eth_dst_mask,
1001 __be16 eth_type, __be16 vlan_id,
1002 __be16 vlan_id_mask, u8 ip_proto,
1003 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1004 u32 group_id)
1005 {
1006 u32 priority;
1007 struct ofdpa_flow_tbl_entry *entry;
1008
1009 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1010 if (!entry)
1011 return -ENOMEM;
1012
1013 priority = OFDPA_PRIORITY_ACL_NORMAL;
1014 if (eth_dst && eth_dst_mask) {
1015 if (ether_addr_equal(eth_dst_mask, mcast_mac))
1016 priority = OFDPA_PRIORITY_ACL_DFLT;
1017 else if (is_link_local_ether_addr(eth_dst))
1018 priority = OFDPA_PRIORITY_ACL_CTRL;
1019 }
1020
1021 entry->key.priority = priority;
1022 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1023 entry->key.acl.in_pport = in_pport;
1024 entry->key.acl.in_pport_mask = in_pport_mask;
1025
1026 if (eth_src)
1027 ether_addr_copy(entry->key.acl.eth_src, eth_src);
1028 if (eth_src_mask)
1029 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1030 if (eth_dst)
1031 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1032 if (eth_dst_mask)
1033 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1034
1035 entry->key.acl.eth_type = eth_type;
1036 entry->key.acl.vlan_id = vlan_id;
1037 entry->key.acl.vlan_id_mask = vlan_id_mask;
1038 entry->key.acl.ip_proto = ip_proto;
1039 entry->key.acl.ip_proto_mask = ip_proto_mask;
1040 entry->key.acl.ip_tos = ip_tos;
1041 entry->key.acl.ip_tos_mask = ip_tos_mask;
1042 entry->key.acl.group_id = group_id;
1043
1044 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1045 }
1046
1047 static struct ofdpa_group_tbl_entry *
1048 ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1049 const struct ofdpa_group_tbl_entry *match)
1050 {
1051 struct ofdpa_group_tbl_entry *found;
1052
1053 hash_for_each_possible(ofdpa->group_tbl, found,
1054 entry, match->group_id) {
1055 if (found->group_id == match->group_id)
1056 return found;
1057 }
1058
1059 return NULL;
1060 }
1061
1062 static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
1063 {
1064 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1065 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1066 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1067 kfree(entry->group_ids);
1068 break;
1069 default:
1070 break;
1071 }
1072 kfree(entry);
1073 }
1074
1075 static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
1076 struct ofdpa_group_tbl_entry *match)
1077 {
1078 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1079 struct ofdpa_group_tbl_entry *found;
1080 unsigned long lock_flags;
1081
1082 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1083
1084 found = ofdpa_group_tbl_find(ofdpa, match);
1085
1086 if (found) {
1087 hash_del(&found->entry);
1088 ofdpa_group_tbl_entry_free(found);
1089 found = match;
1090 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1091 } else {
1092 found = match;
1093 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1094 }
1095
1096 hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1097
1098 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1099
1100 return rocker_cmd_exec(ofdpa_port->rocker_port,
1101 ofdpa_flags_nowait(flags),
1102 ofdpa_cmd_group_tbl_add,
1103 found, NULL, NULL);
1104 }
1105
1106 static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
1107 struct ofdpa_group_tbl_entry *match)
1108 {
1109 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1110 struct ofdpa_group_tbl_entry *found;
1111 unsigned long lock_flags;
1112 int err = 0;
1113
1114 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1115
1116 found = ofdpa_group_tbl_find(ofdpa, match);
1117
1118 if (found) {
1119 hash_del(&found->entry);
1120 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1121 }
1122
1123 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1124
1125 ofdpa_group_tbl_entry_free(match);
1126
1127 if (found) {
1128 err = rocker_cmd_exec(ofdpa_port->rocker_port,
1129 ofdpa_flags_nowait(flags),
1130 ofdpa_cmd_group_tbl_del,
1131 found, NULL, NULL);
1132 ofdpa_group_tbl_entry_free(found);
1133 }
1134
1135 return err;
1136 }
1137
1138 static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
1139 struct ofdpa_group_tbl_entry *entry)
1140 {
1141 if (flags & OFDPA_OP_FLAG_REMOVE)
1142 return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1143 else
1144 return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1145 }
1146
1147 static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1148 int flags, __be16 vlan_id,
1149 u32 out_pport, int pop_vlan)
1150 {
1151 struct ofdpa_group_tbl_entry *entry;
1152
1153 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1154 if (!entry)
1155 return -ENOMEM;
1156
1157 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1158 entry->l2_interface.pop_vlan = pop_vlan;
1159
1160 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1161 }
1162
1163 static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1164 int flags, u8 group_count,
1165 const u32 *group_ids, u32 group_id)
1166 {
1167 struct ofdpa_group_tbl_entry *entry;
1168
1169 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1170 if (!entry)
1171 return -ENOMEM;
1172
1173 entry->group_id = group_id;
1174 entry->group_count = group_count;
1175
1176 entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
1177 if (!entry->group_ids) {
1178 kfree(entry);
1179 return -ENOMEM;
1180 }
1181 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1182
1183 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1184 }
1185
1186 static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1187 int flags, __be16 vlan_id,
1188 u8 group_count, const u32 *group_ids,
1189 u32 group_id)
1190 {
1191 return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1192 group_count, group_ids,
1193 group_id);
1194 }
1195
1196 static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
1197 u32 index, const u8 *src_mac, const u8 *dst_mac,
1198 __be16 vlan_id, bool ttl_check, u32 pport)
1199 {
1200 struct ofdpa_group_tbl_entry *entry;
1201
1202 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1203 if (!entry)
1204 return -ENOMEM;
1205
1206 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1207 if (src_mac)
1208 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1209 if (dst_mac)
1210 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1211 entry->l3_unicast.vlan_id = vlan_id;
1212 entry->l3_unicast.ttl_check = ttl_check;
1213 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1214
1215 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1216 }
1217
1218 static struct ofdpa_neigh_tbl_entry *
1219 ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1220 {
1221 struct ofdpa_neigh_tbl_entry *found;
1222
1223 hash_for_each_possible(ofdpa->neigh_tbl, found,
1224 entry, be32_to_cpu(ip_addr))
1225 if (found->ip_addr == ip_addr)
1226 return found;
1227
1228 return NULL;
1229 }
1230
1231 static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1232 struct ofdpa_neigh_tbl_entry *entry)
1233 {
1234 entry->index = ofdpa->neigh_tbl_next_index++;
1235 entry->ref_count++;
1236 hash_add(ofdpa->neigh_tbl, &entry->entry,
1237 be32_to_cpu(entry->ip_addr));
1238 }
1239
1240 static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
1241 {
1242 if (--entry->ref_count == 0) {
1243 hash_del(&entry->entry);
1244 kfree(entry);
1245 }
1246 }
1247
1248 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1249 const u8 *eth_dst, bool ttl_check)
1250 {
1251 if (eth_dst) {
1252 ether_addr_copy(entry->eth_dst, eth_dst);
1253 entry->ttl_check = ttl_check;
1254 } else {
1255 entry->ref_count++;
1256 }
1257 }
1258
1259 static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1260 int flags, __be32 ip_addr, const u8 *eth_dst)
1261 {
1262 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1263 struct ofdpa_neigh_tbl_entry *entry;
1264 struct ofdpa_neigh_tbl_entry *found;
1265 unsigned long lock_flags;
1266 __be16 eth_type = htons(ETH_P_IP);
1267 enum rocker_of_dpa_table_id goto_tbl =
1268 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1269 u32 group_id;
1270 u32 priority = 0;
1271 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1272 bool updating;
1273 bool removing;
1274 int err = 0;
1275
1276 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1277 if (!entry)
1278 return -ENOMEM;
1279
1280 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1281
1282 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1283
1284 updating = found && adding;
1285 removing = found && !adding;
1286 adding = !found && adding;
1287
1288 if (adding) {
1289 entry->ip_addr = ip_addr;
1290 entry->dev = ofdpa_port->dev;
1291 ether_addr_copy(entry->eth_dst, eth_dst);
1292 entry->ttl_check = true;
1293 ofdpa_neigh_add(ofdpa, entry);
1294 } else if (removing) {
1295 memcpy(entry, found, sizeof(*entry));
1296 ofdpa_neigh_del(found);
1297 } else if (updating) {
1298 ofdpa_neigh_update(found, eth_dst, true);
1299 memcpy(entry, found, sizeof(*entry));
1300 } else {
1301 err = -ENOENT;
1302 }
1303
1304 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1305
1306 if (err)
1307 goto err_out;
1308
1309
1310
1311
1312
1313
1314
1315 err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1316 entry->index,
1317 ofdpa_port->dev->dev_addr,
1318 entry->eth_dst,
1319 ofdpa_port->internal_vlan_id,
1320 entry->ttl_check,
1321 ofdpa_port->pport);
1322 if (err) {
1323 netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1324 err, entry->index);
1325 goto err_out;
1326 }
1327
1328 if (adding || removing) {
1329 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1330 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1331 eth_type, ip_addr,
1332 inet_make_mask(32),
1333 priority, goto_tbl,
1334 group_id, NULL, flags);
1335
1336 if (err)
1337 netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1338 err, &entry->ip_addr, group_id);
1339 }
1340
1341 err_out:
1342 if (!adding)
1343 kfree(entry);
1344
1345 return err;
1346 }
1347
1348 static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1349 __be32 ip_addr)
1350 {
1351 struct net_device *dev = ofdpa_port->dev;
1352 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1353 int err = 0;
1354
1355 if (!n) {
1356 n = neigh_create(&arp_tbl, &ip_addr, dev);
1357 if (IS_ERR(n))
1358 return PTR_ERR(n);
1359 }
1360
1361
1362
1363
1364
1365
1366 if (n->nud_state & NUD_VALID)
1367 err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1368 ip_addr, n->ha);
1369 else
1370 neigh_event_send(n, NULL);
1371
1372 neigh_release(n);
1373 return err;
1374 }
1375
1376 static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1377 int flags, __be32 ip_addr, u32 *index)
1378 {
1379 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1380 struct ofdpa_neigh_tbl_entry *entry;
1381 struct ofdpa_neigh_tbl_entry *found;
1382 unsigned long lock_flags;
1383 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1384 bool updating;
1385 bool removing;
1386 bool resolved = true;
1387 int err = 0;
1388
1389 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1390 if (!entry)
1391 return -ENOMEM;
1392
1393 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1394
1395 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1396
1397 updating = found && adding;
1398 removing = found && !adding;
1399 adding = !found && adding;
1400
1401 if (adding) {
1402 entry->ip_addr = ip_addr;
1403 entry->dev = ofdpa_port->dev;
1404 ofdpa_neigh_add(ofdpa, entry);
1405 *index = entry->index;
1406 resolved = false;
1407 } else if (removing) {
1408 *index = found->index;
1409 ofdpa_neigh_del(found);
1410 } else if (updating) {
1411 ofdpa_neigh_update(found, NULL, false);
1412 resolved = !is_zero_ether_addr(found->eth_dst);
1413 *index = found->index;
1414 } else {
1415 err = -ENOENT;
1416 }
1417
1418 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1419
1420 if (!adding)
1421 kfree(entry);
1422
1423 if (err)
1424 return err;
1425
1426
1427
1428 if (!resolved)
1429 err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1430
1431 return err;
1432 }
1433
1434 static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1435 int port_index)
1436 {
1437 struct rocker_port *rocker_port;
1438
1439 rocker_port = ofdpa->rocker->ports[port_index];
1440 return rocker_port ? rocker_port->wpriv : NULL;
1441 }
1442
1443 static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1444 int flags, __be16 vlan_id)
1445 {
1446 struct ofdpa_port *p;
1447 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1448 unsigned int port_count = ofdpa->rocker->port_count;
1449 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1450 u32 *group_ids;
1451 u8 group_count = 0;
1452 int err = 0;
1453 int i;
1454
1455 group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1456 if (!group_ids)
1457 return -ENOMEM;
1458
1459
1460
1461
1462
1463
1464 for (i = 0; i < port_count; i++) {
1465 p = ofdpa_port_get(ofdpa, i);
1466 if (!p)
1467 continue;
1468 if (!ofdpa_port_is_bridged(p))
1469 continue;
1470 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1471 group_ids[group_count++] =
1472 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1473 }
1474 }
1475
1476
1477 if (group_count == 0)
1478 goto no_ports_in_vlan;
1479
1480 err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1481 group_count, group_ids, group_id);
1482 if (err)
1483 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1484
1485 no_ports_in_vlan:
1486 kfree(group_ids);
1487 return err;
1488 }
1489
1490 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
1491 __be16 vlan_id, bool pop_vlan)
1492 {
1493 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1494 unsigned int port_count = ofdpa->rocker->port_count;
1495 struct ofdpa_port *p;
1496 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1497 u32 out_pport;
1498 int ref = 0;
1499 int err;
1500 int i;
1501
1502
1503
1504
1505
1506 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1507 ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1508 out_pport = ofdpa_port->pport;
1509 err = ofdpa_group_l2_interface(ofdpa_port, flags,
1510 vlan_id, out_pport, pop_vlan);
1511 if (err) {
1512 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1513 err, out_pport);
1514 return err;
1515 }
1516 }
1517
1518
1519
1520
1521
1522
1523 for (i = 0; i < port_count; i++) {
1524 p = ofdpa_port_get(ofdpa, i);
1525 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1526 ref++;
1527 }
1528
1529 if ((!adding || ref != 1) && (adding || ref != 0))
1530 return 0;
1531
1532 out_pport = 0;
1533 err = ofdpa_group_l2_interface(ofdpa_port, flags,
1534 vlan_id, out_pport, pop_vlan);
1535 if (err) {
1536 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1537 return err;
1538 }
1539
1540 return 0;
1541 }
1542
1543 static struct ofdpa_ctrl {
1544 const u8 *eth_dst;
1545 const u8 *eth_dst_mask;
1546 __be16 eth_type;
1547 bool acl;
1548 bool bridge;
1549 bool term;
1550 bool copy_to_cpu;
1551 } ofdpa_ctrls[] = {
1552 [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1553
1554 .eth_dst = ll_mac,
1555 .eth_dst_mask = ll_mask,
1556 .acl = true,
1557 },
1558 [OFDPA_CTRL_LOCAL_ARP] = {
1559
1560 .eth_dst = zero_mac,
1561 .eth_dst_mask = zero_mac,
1562 .eth_type = htons(ETH_P_ARP),
1563 .acl = true,
1564 },
1565 [OFDPA_CTRL_IPV4_MCAST] = {
1566
1567 .eth_dst = ipv4_mcast,
1568 .eth_dst_mask = ipv4_mask,
1569 .eth_type = htons(ETH_P_IP),
1570 .term = true,
1571 .copy_to_cpu = true,
1572 },
1573 [OFDPA_CTRL_IPV6_MCAST] = {
1574
1575 .eth_dst = ipv6_mcast,
1576 .eth_dst_mask = ipv6_mask,
1577 .eth_type = htons(ETH_P_IPV6),
1578 .term = true,
1579 .copy_to_cpu = true,
1580 },
1581 [OFDPA_CTRL_DFLT_BRIDGING] = {
1582
1583 .bridge = true,
1584 .copy_to_cpu = true,
1585 },
1586 [OFDPA_CTRL_DFLT_OVS] = {
1587
1588 .eth_dst = zero_mac,
1589 .eth_dst_mask = zero_mac,
1590 .acl = true,
1591 },
1592 };
1593
1594 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
1595 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1596 {
1597 u32 in_pport = ofdpa_port->pport;
1598 u32 in_pport_mask = 0xffffffff;
1599 u32 out_pport = 0;
1600 const u8 *eth_src = NULL;
1601 const u8 *eth_src_mask = NULL;
1602 __be16 vlan_id_mask = htons(0xffff);
1603 u8 ip_proto = 0;
1604 u8 ip_proto_mask = 0;
1605 u8 ip_tos = 0;
1606 u8 ip_tos_mask = 0;
1607 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1608 int err;
1609
1610 err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1611 in_pport, in_pport_mask,
1612 eth_src, eth_src_mask,
1613 ctrl->eth_dst, ctrl->eth_dst_mask,
1614 ctrl->eth_type,
1615 vlan_id, vlan_id_mask,
1616 ip_proto, ip_proto_mask,
1617 ip_tos, ip_tos_mask,
1618 group_id);
1619
1620 if (err)
1621 netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1622
1623 return err;
1624 }
1625
1626 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1627 int flags, const struct ofdpa_ctrl *ctrl,
1628 __be16 vlan_id)
1629 {
1630 enum rocker_of_dpa_table_id goto_tbl =
1631 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1632 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1633 u32 tunnel_id = 0;
1634 int err;
1635
1636 if (!ofdpa_port_is_bridged(ofdpa_port))
1637 return 0;
1638
1639 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1640 ctrl->eth_dst, ctrl->eth_dst_mask,
1641 vlan_id, tunnel_id,
1642 goto_tbl, group_id, ctrl->copy_to_cpu);
1643
1644 if (err)
1645 netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1646
1647 return err;
1648 }
1649
1650 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
1651 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1652 {
1653 u32 in_pport_mask = 0xffffffff;
1654 __be16 vlan_id_mask = htons(0xffff);
1655 int err;
1656
1657 if (ntohs(vlan_id) == 0)
1658 vlan_id = ofdpa_port->internal_vlan_id;
1659
1660 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
1661 ctrl->eth_type, ctrl->eth_dst,
1662 ctrl->eth_dst_mask, vlan_id,
1663 vlan_id_mask, ctrl->copy_to_cpu,
1664 flags);
1665
1666 if (err)
1667 netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1668
1669 return err;
1670 }
1671
1672 static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
1673 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1674 {
1675 if (ctrl->acl)
1676 return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1677 ctrl, vlan_id);
1678 if (ctrl->bridge)
1679 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1680 ctrl, vlan_id);
1681
1682 if (ctrl->term)
1683 return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1684 ctrl, vlan_id);
1685
1686 return -EOPNOTSUPP;
1687 }
1688
1689 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
1690 __be16 vlan_id)
1691 {
1692 int err = 0;
1693 int i;
1694
1695 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1696 if (ofdpa_port->ctrls[i]) {
1697 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1698 &ofdpa_ctrls[i], vlan_id);
1699 if (err)
1700 return err;
1701 }
1702 }
1703
1704 return err;
1705 }
1706
1707 static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
1708 const struct ofdpa_ctrl *ctrl)
1709 {
1710 u16 vid;
1711 int err = 0;
1712
1713 for (vid = 1; vid < VLAN_N_VID; vid++) {
1714 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1715 continue;
1716 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1717 ctrl, htons(vid));
1718 if (err)
1719 break;
1720 }
1721
1722 return err;
1723 }
1724
1725 static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1726 u16 vid)
1727 {
1728 enum rocker_of_dpa_table_id goto_tbl =
1729 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1730 u32 in_pport = ofdpa_port->pport;
1731 __be16 vlan_id = htons(vid);
1732 __be16 vlan_id_mask = htons(0xffff);
1733 __be16 internal_vlan_id;
1734 bool untagged;
1735 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1736 int err;
1737
1738 internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1739
1740 if (adding &&
1741 test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1742 return 0;
1743 else if (!adding &&
1744 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1745 return 0;
1746
1747 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1748
1749 if (adding) {
1750 err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1751 internal_vlan_id);
1752 if (err) {
1753 netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1754 goto err_vlan_add;
1755 }
1756 }
1757
1758 err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1759 internal_vlan_id, untagged);
1760 if (err) {
1761 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1762 goto err_vlan_l2_groups;
1763 }
1764
1765 err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1766 internal_vlan_id);
1767 if (err) {
1768 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1769 goto err_flood_group;
1770 }
1771
1772 err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1773 in_pport, vlan_id, vlan_id_mask,
1774 goto_tbl, untagged, internal_vlan_id);
1775 if (err)
1776 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1777
1778 return 0;
1779
1780 err_vlan_add:
1781 err_vlan_l2_groups:
1782 err_flood_group:
1783 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1784 return err;
1785 }
1786
1787 static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
1788 {
1789 enum rocker_of_dpa_table_id goto_tbl;
1790 u32 in_pport;
1791 u32 in_pport_mask;
1792 int err;
1793
1794
1795
1796
1797
1798 in_pport = 0;
1799 in_pport_mask = 0xffff0000;
1800 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1801
1802 err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1803 in_pport, in_pport_mask,
1804 goto_tbl);
1805 if (err)
1806 netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1807
1808 return err;
1809 }
1810
1811 struct ofdpa_fdb_learn_work {
1812 struct work_struct work;
1813 struct ofdpa_port *ofdpa_port;
1814 int flags;
1815 u8 addr[ETH_ALEN];
1816 u16 vid;
1817 };
1818
1819 static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1820 {
1821 const struct ofdpa_fdb_learn_work *lw =
1822 container_of(work, struct ofdpa_fdb_learn_work, work);
1823 bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1824 bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1825 struct switchdev_notifier_fdb_info info = {};
1826
1827 info.addr = lw->addr;
1828 info.vid = lw->vid;
1829
1830 rtnl_lock();
1831 if (learned && removing)
1832 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
1833 lw->ofdpa_port->dev, &info.info, NULL);
1834 else if (learned && !removing)
1835 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
1836 lw->ofdpa_port->dev, &info.info, NULL);
1837 rtnl_unlock();
1838
1839 kfree(work);
1840 }
1841
1842 static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1843 int flags, const u8 *addr, __be16 vlan_id)
1844 {
1845 struct ofdpa_fdb_learn_work *lw;
1846 enum rocker_of_dpa_table_id goto_tbl =
1847 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1848 u32 out_pport = ofdpa_port->pport;
1849 u32 tunnel_id = 0;
1850 u32 group_id = ROCKER_GROUP_NONE;
1851 bool copy_to_cpu = false;
1852 int err;
1853
1854 if (ofdpa_port_is_bridged(ofdpa_port))
1855 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1856
1857 if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1858 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1859 NULL, vlan_id, tunnel_id, goto_tbl,
1860 group_id, copy_to_cpu);
1861 if (err)
1862 return err;
1863 }
1864
1865 if (!ofdpa_port_is_bridged(ofdpa_port))
1866 return 0;
1867
1868 lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1869 if (!lw)
1870 return -ENOMEM;
1871
1872 INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1873
1874 lw->ofdpa_port = ofdpa_port;
1875 lw->flags = flags;
1876 ether_addr_copy(lw->addr, addr);
1877 lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1878
1879 schedule_work(&lw->work);
1880 return 0;
1881 }
1882
1883 static struct ofdpa_fdb_tbl_entry *
1884 ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1885 const struct ofdpa_fdb_tbl_entry *match)
1886 {
1887 struct ofdpa_fdb_tbl_entry *found;
1888
1889 hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1890 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1891 return found;
1892
1893 return NULL;
1894 }
1895
1896 static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
1897 const unsigned char *addr,
1898 __be16 vlan_id, int flags)
1899 {
1900 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1901 struct ofdpa_fdb_tbl_entry *fdb;
1902 struct ofdpa_fdb_tbl_entry *found;
1903 bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1904 unsigned long lock_flags;
1905
1906 fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1907 if (!fdb)
1908 return -ENOMEM;
1909
1910 fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1911 fdb->touched = jiffies;
1912 fdb->key.ofdpa_port = ofdpa_port;
1913 ether_addr_copy(fdb->key.addr, addr);
1914 fdb->key.vlan_id = vlan_id;
1915 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1916
1917 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1918
1919 found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1920
1921 if (found) {
1922 found->touched = jiffies;
1923 if (removing) {
1924 kfree(fdb);
1925 hash_del(&found->entry);
1926 }
1927 } else if (!removing) {
1928 hash_add(ofdpa->fdb_tbl, &fdb->entry,
1929 fdb->key_crc32);
1930 }
1931
1932 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1933
1934
1935 if (!found != !removing) {
1936 kfree(fdb);
1937 if (!found && removing)
1938 return 0;
1939
1940 flags |= OFDPA_OP_FLAG_REFRESH;
1941 }
1942
1943 return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1944 }
1945
1946 static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
1947 {
1948 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1949 struct ofdpa_fdb_tbl_entry *found;
1950 unsigned long lock_flags;
1951 struct hlist_node *tmp;
1952 int bkt;
1953 int err = 0;
1954
1955 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1956 ofdpa_port->stp_state == BR_STATE_FORWARDING)
1957 return 0;
1958
1959 flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1960
1961 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1962
1963 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1964 if (found->key.ofdpa_port != ofdpa_port)
1965 continue;
1966 if (!found->learned)
1967 continue;
1968 err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1969 found->key.addr,
1970 found->key.vlan_id);
1971 if (err)
1972 goto err_out;
1973 hash_del(&found->entry);
1974 }
1975
1976 err_out:
1977 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1978
1979 return err;
1980 }
1981
1982 static void ofdpa_fdb_cleanup(struct timer_list *t)
1983 {
1984 struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1985 struct ofdpa_port *ofdpa_port;
1986 struct ofdpa_fdb_tbl_entry *entry;
1987 struct hlist_node *tmp;
1988 unsigned long next_timer = jiffies + ofdpa->ageing_time;
1989 unsigned long expires;
1990 unsigned long lock_flags;
1991 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1992 OFDPA_OP_FLAG_LEARNED;
1993 int bkt;
1994
1995 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1996
1997 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
1998 if (!entry->learned)
1999 continue;
2000 ofdpa_port = entry->key.ofdpa_port;
2001 expires = entry->touched + ofdpa_port->ageing_time;
2002 if (time_before_eq(expires, jiffies)) {
2003 ofdpa_port_fdb_learn(ofdpa_port, flags,
2004 entry->key.addr,
2005 entry->key.vlan_id);
2006 hash_del(&entry->entry);
2007 } else if (time_before(expires, next_timer)) {
2008 next_timer = expires;
2009 }
2010 }
2011
2012 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2013
2014 mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2015 }
2016
2017 static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2018 int flags, __be16 vlan_id)
2019 {
2020 u32 in_pport_mask = 0xffffffff;
2021 __be16 eth_type;
2022 const u8 *dst_mac_mask = ff_mac;
2023 __be16 vlan_id_mask = htons(0xffff);
2024 bool copy_to_cpu = false;
2025 int err;
2026
2027 if (ntohs(vlan_id) == 0)
2028 vlan_id = ofdpa_port->internal_vlan_id;
2029
2030 eth_type = htons(ETH_P_IP);
2031 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2032 in_pport_mask, eth_type,
2033 ofdpa_port->dev->dev_addr,
2034 dst_mac_mask, vlan_id, vlan_id_mask,
2035 copy_to_cpu, flags);
2036 if (err)
2037 return err;
2038
2039 eth_type = htons(ETH_P_IPV6);
2040 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2041 in_pport_mask, eth_type,
2042 ofdpa_port->dev->dev_addr,
2043 dst_mac_mask, vlan_id, vlan_id_mask,
2044 copy_to_cpu, flags);
2045
2046 return err;
2047 }
2048
2049 static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
2050 {
2051 bool pop_vlan;
2052 u32 out_pport;
2053 __be16 vlan_id;
2054 u16 vid;
2055 int err;
2056
2057
2058
2059
2060
2061
2062
2063
2064 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2065 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2066 flags |= OFDPA_OP_FLAG_REMOVE;
2067
2068 out_pport = ofdpa_port->pport;
2069 for (vid = 1; vid < VLAN_N_VID; vid++) {
2070 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2071 continue;
2072 vlan_id = htons(vid);
2073 pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2074 err = ofdpa_group_l2_interface(ofdpa_port, flags,
2075 vlan_id, out_pport, pop_vlan);
2076 if (err) {
2077 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2078 err, out_pport);
2079 return err;
2080 }
2081 }
2082
2083 return 0;
2084 }
2085
2086 static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2087 int flags, u8 state)
2088 {
2089 bool want[OFDPA_CTRL_MAX] = { 0, };
2090 bool prev_ctrls[OFDPA_CTRL_MAX];
2091 u8 prev_state;
2092 int err;
2093 int i;
2094
2095 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2096 prev_state = ofdpa_port->stp_state;
2097
2098 if (ofdpa_port->stp_state == state)
2099 return 0;
2100
2101 ofdpa_port->stp_state = state;
2102
2103 switch (state) {
2104 case BR_STATE_DISABLED:
2105
2106 break;
2107 case BR_STATE_LISTENING:
2108 case BR_STATE_BLOCKING:
2109 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2110 break;
2111 case BR_STATE_LEARNING:
2112 case BR_STATE_FORWARDING:
2113 if (!ofdpa_port_is_ovsed(ofdpa_port))
2114 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2115 want[OFDPA_CTRL_IPV4_MCAST] = true;
2116 want[OFDPA_CTRL_IPV6_MCAST] = true;
2117 if (ofdpa_port_is_bridged(ofdpa_port))
2118 want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2119 else if (ofdpa_port_is_ovsed(ofdpa_port))
2120 want[OFDPA_CTRL_DFLT_OVS] = true;
2121 else
2122 want[OFDPA_CTRL_LOCAL_ARP] = true;
2123 break;
2124 }
2125
2126 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2127 if (want[i] != ofdpa_port->ctrls[i]) {
2128 int ctrl_flags = flags |
2129 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2130 err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2131 &ofdpa_ctrls[i]);
2132 if (err)
2133 goto err_port_ctrl;
2134 ofdpa_port->ctrls[i] = want[i];
2135 }
2136 }
2137
2138 err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2139 if (err)
2140 goto err_fdb_flush;
2141
2142 err = ofdpa_port_fwding(ofdpa_port, flags);
2143 if (err)
2144 goto err_port_fwding;
2145
2146 return 0;
2147
2148 err_port_ctrl:
2149 err_fdb_flush:
2150 err_port_fwding:
2151 memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2152 ofdpa_port->stp_state = prev_state;
2153 return err;
2154 }
2155
2156 static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2157 {
2158 if (ofdpa_port_is_bridged(ofdpa_port))
2159
2160 return 0;
2161
2162
2163 return ofdpa_port_stp_update(ofdpa_port, flags,
2164 BR_STATE_FORWARDING);
2165 }
2166
2167 static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2168 {
2169 if (ofdpa_port_is_bridged(ofdpa_port))
2170
2171 return 0;
2172
2173
2174 return ofdpa_port_stp_update(ofdpa_port, flags,
2175 BR_STATE_DISABLED);
2176 }
2177
2178 static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2179 u16 vid, u16 flags)
2180 {
2181 int err;
2182
2183
2184
2185 err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2186 if (err)
2187 return err;
2188
2189 err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2190 if (err)
2191 ofdpa_port_vlan(ofdpa_port,
2192 OFDPA_OP_FLAG_REMOVE, vid);
2193
2194 return err;
2195 }
2196
2197 static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2198 u16 vid, u16 flags)
2199 {
2200 int err;
2201
2202 err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2203 htons(vid));
2204 if (err)
2205 return err;
2206
2207 return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2208 vid);
2209 }
2210
2211 static struct ofdpa_internal_vlan_tbl_entry *
2212 ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2213 {
2214 struct ofdpa_internal_vlan_tbl_entry *found;
2215
2216 hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2217 entry, ifindex) {
2218 if (found->ifindex == ifindex)
2219 return found;
2220 }
2221
2222 return NULL;
2223 }
2224
2225 static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2226 int ifindex)
2227 {
2228 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2229 struct ofdpa_internal_vlan_tbl_entry *entry;
2230 struct ofdpa_internal_vlan_tbl_entry *found;
2231 unsigned long lock_flags;
2232 int i;
2233
2234 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2235 if (!entry)
2236 return 0;
2237
2238 entry->ifindex = ifindex;
2239
2240 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2241
2242 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2243 if (found) {
2244 kfree(entry);
2245 goto found;
2246 }
2247
2248 found = entry;
2249 hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2250
2251 for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2252 if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2253 continue;
2254 found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2255 goto found;
2256 }
2257
2258 netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2259
2260 found:
2261 found->ref_count++;
2262 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2263
2264 return found->vlan_id;
2265 }
2266
2267 static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
2268 int dst_len, struct fib_info *fi, u32 tb_id,
2269 int flags)
2270 {
2271 const struct fib_nh *nh;
2272 __be16 eth_type = htons(ETH_P_IP);
2273 __be32 dst_mask = inet_make_mask(dst_len);
2274 __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2275 u32 priority = fi->fib_priority;
2276 enum rocker_of_dpa_table_id goto_tbl =
2277 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2278 u32 group_id;
2279 bool nh_on_port;
2280 bool has_gw;
2281 u32 index;
2282 int err;
2283
2284
2285
2286 nh = fib_info_nh(fi, 0);
2287 nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev);
2288 has_gw = !!nh->fib_nh_gw4;
2289
2290 if (has_gw && nh_on_port) {
2291 err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2292 nh->fib_nh_gw4, &index);
2293 if (err)
2294 return err;
2295
2296 group_id = ROCKER_GROUP_L3_UNICAST(index);
2297 } else {
2298
2299 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2300 }
2301
2302 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2303 dst_mask, priority, goto_tbl,
2304 group_id, fi, flags);
2305 if (err)
2306 netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2307 err, &dst);
2308
2309 return err;
2310 }
2311
2312 static void
2313 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2314 int ifindex)
2315 {
2316 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2317 struct ofdpa_internal_vlan_tbl_entry *found;
2318 unsigned long lock_flags;
2319 unsigned long bit;
2320
2321 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2322
2323 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2324 if (!found) {
2325 netdev_err(ofdpa_port->dev,
2326 "ifindex (%d) not found in internal VLAN tbl\n",
2327 ifindex);
2328 goto not_found;
2329 }
2330
2331 if (--found->ref_count <= 0) {
2332 bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2333 clear_bit(bit, ofdpa->internal_vlan_bitmap);
2334 hash_del(&found->entry);
2335 kfree(found);
2336 }
2337
2338 not_found:
2339 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2340 }
2341
2342
2343
2344
2345
2346 static int ofdpa_init(struct rocker *rocker)
2347 {
2348 struct ofdpa *ofdpa = rocker->wpriv;
2349
2350 ofdpa->rocker = rocker;
2351
2352 hash_init(ofdpa->flow_tbl);
2353 spin_lock_init(&ofdpa->flow_tbl_lock);
2354
2355 hash_init(ofdpa->group_tbl);
2356 spin_lock_init(&ofdpa->group_tbl_lock);
2357
2358 hash_init(ofdpa->fdb_tbl);
2359 spin_lock_init(&ofdpa->fdb_tbl_lock);
2360
2361 hash_init(ofdpa->internal_vlan_tbl);
2362 spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2363
2364 hash_init(ofdpa->neigh_tbl);
2365 spin_lock_init(&ofdpa->neigh_tbl_lock);
2366
2367 timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
2368 mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2369
2370 ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2371
2372 return 0;
2373 }
2374
2375 static void ofdpa_fini(struct rocker *rocker)
2376 {
2377 struct ofdpa *ofdpa = rocker->wpriv;
2378
2379 unsigned long flags;
2380 struct ofdpa_flow_tbl_entry *flow_entry;
2381 struct ofdpa_group_tbl_entry *group_entry;
2382 struct ofdpa_fdb_tbl_entry *fdb_entry;
2383 struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2384 struct ofdpa_neigh_tbl_entry *neigh_entry;
2385 struct hlist_node *tmp;
2386 int bkt;
2387
2388 del_timer_sync(&ofdpa->fdb_cleanup_timer);
2389 flush_workqueue(rocker->rocker_owq);
2390
2391 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2392 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2393 hash_del(&flow_entry->entry);
2394 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2395
2396 spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2397 hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2398 hash_del(&group_entry->entry);
2399 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2400
2401 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2402 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2403 hash_del(&fdb_entry->entry);
2404 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2405
2406 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2407 hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2408 tmp, internal_vlan_entry, entry)
2409 hash_del(&internal_vlan_entry->entry);
2410 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2411
2412 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2413 hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2414 hash_del(&neigh_entry->entry);
2415 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2416 }
2417
2418 static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2419 {
2420 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2421
2422 ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2423 ofdpa_port->rocker_port = rocker_port;
2424 ofdpa_port->dev = rocker_port->dev;
2425 ofdpa_port->pport = rocker_port->pport;
2426 ofdpa_port->brport_flags = BR_LEARNING;
2427 ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2428 return 0;
2429 }
2430
2431 static int ofdpa_port_init(struct rocker_port *rocker_port)
2432 {
2433 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2434 int err;
2435
2436 rocker_port_set_learning(rocker_port,
2437 !!(ofdpa_port->brport_flags & BR_LEARNING));
2438
2439 err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2440 if (err) {
2441 netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2442 return err;
2443 }
2444
2445 ofdpa_port->internal_vlan_id =
2446 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2447 ofdpa_port->dev->ifindex);
2448
2449 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2450 if (err) {
2451 netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2452 goto err_untagged_vlan;
2453 }
2454 return 0;
2455
2456 err_untagged_vlan:
2457 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2458 return err;
2459 }
2460
2461 static void ofdpa_port_fini(struct rocker_port *rocker_port)
2462 {
2463 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2464
2465 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2466 }
2467
2468 static int ofdpa_port_open(struct rocker_port *rocker_port)
2469 {
2470 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2471
2472 return ofdpa_port_fwd_enable(ofdpa_port, 0);
2473 }
2474
2475 static void ofdpa_port_stop(struct rocker_port *rocker_port)
2476 {
2477 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2478
2479 ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2480 }
2481
2482 static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2483 u8 state)
2484 {
2485 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2486
2487 return ofdpa_port_stp_update(ofdpa_port, 0, state);
2488 }
2489
2490 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2491 unsigned long brport_flags)
2492 {
2493 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2494 unsigned long orig_flags;
2495 int err = 0;
2496
2497 orig_flags = ofdpa_port->brport_flags;
2498 ofdpa_port->brport_flags = brport_flags;
2499
2500 if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
2501 err = rocker_port_set_learning(ofdpa_port->rocker_port,
2502 !!(ofdpa_port->brport_flags & BR_LEARNING));
2503
2504 return err;
2505 }
2506
2507 static int
2508 ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2509 rocker_port,
2510 unsigned long *
2511 p_brport_flags_support)
2512 {
2513 *p_brport_flags_support = BR_LEARNING;
2514 return 0;
2515 }
2516
2517 static int
2518 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2519 u32 ageing_time)
2520 {
2521 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2522 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2523
2524 ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2525 if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2526 ofdpa->ageing_time = ofdpa_port->ageing_time;
2527 mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2528
2529 return 0;
2530 }
2531
2532 static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2533 const struct switchdev_obj_port_vlan *vlan)
2534 {
2535 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2536
2537 return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
2538 }
2539
2540 static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2541 const struct switchdev_obj_port_vlan *vlan)
2542 {
2543 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2544
2545 return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
2546 }
2547
2548 static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2549 u16 vid, const unsigned char *addr)
2550 {
2551 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2552 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2553
2554 if (!ofdpa_port_is_bridged(ofdpa_port))
2555 return -EINVAL;
2556
2557 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2558 }
2559
2560 static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2561 u16 vid, const unsigned char *addr)
2562 {
2563 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2564 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2565 int flags = OFDPA_OP_FLAG_REMOVE;
2566
2567 if (!ofdpa_port_is_bridged(ofdpa_port))
2568 return -EINVAL;
2569
2570 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2571 }
2572
2573 static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2574 struct net_device *bridge,
2575 struct netlink_ext_ack *extack)
2576 {
2577 struct net_device *dev = ofdpa_port->dev;
2578 int err;
2579
2580
2581
2582
2583
2584
2585
2586 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2587 if (err)
2588 return err;
2589
2590 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2591 ofdpa_port->dev->ifindex);
2592 ofdpa_port->internal_vlan_id =
2593 ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2594
2595 ofdpa_port->bridge_dev = bridge;
2596
2597 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2598 if (err)
2599 return err;
2600
2601 return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL,
2602 false, extack);
2603 }
2604
2605 static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2606 {
2607 struct net_device *dev = ofdpa_port->dev;
2608 int err;
2609
2610 switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL);
2611
2612 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2613 if (err)
2614 return err;
2615
2616 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2617 ofdpa_port->bridge_dev->ifindex);
2618 ofdpa_port->internal_vlan_id =
2619 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2620 ofdpa_port->dev->ifindex);
2621
2622 ofdpa_port->bridge_dev = NULL;
2623
2624 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2625 if (err)
2626 return err;
2627
2628 if (ofdpa_port->dev->flags & IFF_UP)
2629 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2630
2631 return err;
2632 }
2633
2634 static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2635 struct net_device *master)
2636 {
2637 int err;
2638
2639 ofdpa_port->bridge_dev = master;
2640
2641 err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2642 if (err)
2643 return err;
2644 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2645
2646 return err;
2647 }
2648
2649 static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2650 struct net_device *master,
2651 struct netlink_ext_ack *extack)
2652 {
2653 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2654 int err = 0;
2655
2656 if (netif_is_bridge_master(master))
2657 err = ofdpa_port_bridge_join(ofdpa_port, master, extack);
2658 else if (netif_is_ovs_master(master))
2659 err = ofdpa_port_ovs_changed(ofdpa_port, master);
2660 return err;
2661 }
2662
2663 static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2664 struct net_device *master)
2665 {
2666 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2667 int err = 0;
2668
2669 if (ofdpa_port_is_bridged(ofdpa_port))
2670 err = ofdpa_port_bridge_leave(ofdpa_port);
2671 else if (ofdpa_port_is_ovsed(ofdpa_port))
2672 err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2673 return err;
2674 }
2675
2676 static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2677 struct neighbour *n)
2678 {
2679 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2680 int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2681 OFDPA_OP_FLAG_NOWAIT;
2682 __be32 ip_addr = *(__be32 *) n->primary_key;
2683
2684 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2685 }
2686
2687 static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2688 struct neighbour *n)
2689 {
2690 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2691 int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2692 __be32 ip_addr = *(__be32 *) n->primary_key;
2693
2694 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2695 }
2696
2697 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2698 const unsigned char *addr,
2699 __be16 vlan_id)
2700 {
2701 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2702 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2703
2704 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2705 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2706 return 0;
2707
2708 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2709 }
2710
2711 static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2712 struct rocker *rocker)
2713 {
2714 struct rocker_port *rocker_port;
2715
2716 rocker_port = rocker_port_dev_lower_find(dev, rocker);
2717 return rocker_port ? rocker_port->wpriv : NULL;
2718 }
2719
2720 static int ofdpa_fib4_add(struct rocker *rocker,
2721 const struct fib_entry_notifier_info *fen_info)
2722 {
2723 struct ofdpa *ofdpa = rocker->wpriv;
2724 struct ofdpa_port *ofdpa_port;
2725 struct fib_nh *nh;
2726 int err;
2727
2728 if (ofdpa->fib_aborted)
2729 return 0;
2730 nh = fib_info_nh(fen_info->fi, 0);
2731 ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2732 if (!ofdpa_port)
2733 return 0;
2734 err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2735 fen_info->dst_len, fen_info->fi,
2736 fen_info->tb_id, 0);
2737 if (err)
2738 return err;
2739 nh->fib_nh_flags |= RTNH_F_OFFLOAD;
2740 return 0;
2741 }
2742
2743 static int ofdpa_fib4_del(struct rocker *rocker,
2744 const struct fib_entry_notifier_info *fen_info)
2745 {
2746 struct ofdpa *ofdpa = rocker->wpriv;
2747 struct ofdpa_port *ofdpa_port;
2748 struct fib_nh *nh;
2749
2750 if (ofdpa->fib_aborted)
2751 return 0;
2752 nh = fib_info_nh(fen_info->fi, 0);
2753 ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2754 if (!ofdpa_port)
2755 return 0;
2756 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2757 return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2758 fen_info->dst_len, fen_info->fi,
2759 fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2760 }
2761
2762 static void ofdpa_fib4_abort(struct rocker *rocker)
2763 {
2764 struct ofdpa *ofdpa = rocker->wpriv;
2765 struct ofdpa_port *ofdpa_port;
2766 struct ofdpa_flow_tbl_entry *flow_entry;
2767 struct hlist_node *tmp;
2768 unsigned long flags;
2769 int bkt;
2770
2771 if (ofdpa->fib_aborted)
2772 return;
2773
2774 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2775 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2776 struct fib_nh *nh;
2777
2778 if (flow_entry->key.tbl_id !=
2779 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2780 continue;
2781 nh = fib_info_nh(flow_entry->fi, 0);
2782 ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2783 if (!ofdpa_port)
2784 continue;
2785 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2786 ofdpa_flow_tbl_del(ofdpa_port,
2787 OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT,
2788 flow_entry);
2789 }
2790 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2791 ofdpa->fib_aborted = true;
2792 }
2793
2794 struct rocker_world_ops rocker_ofdpa_ops = {
2795 .kind = "ofdpa",
2796 .priv_size = sizeof(struct ofdpa),
2797 .port_priv_size = sizeof(struct ofdpa_port),
2798 .mode = ROCKER_PORT_MODE_OF_DPA,
2799 .init = ofdpa_init,
2800 .fini = ofdpa_fini,
2801 .port_pre_init = ofdpa_port_pre_init,
2802 .port_init = ofdpa_port_init,
2803 .port_fini = ofdpa_port_fini,
2804 .port_open = ofdpa_port_open,
2805 .port_stop = ofdpa_port_stop,
2806 .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2807 .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2808 .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2809 .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2810 .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2811 .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2812 .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2813 .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2814 .port_master_linked = ofdpa_port_master_linked,
2815 .port_master_unlinked = ofdpa_port_master_unlinked,
2816 .port_neigh_update = ofdpa_port_neigh_update,
2817 .port_neigh_destroy = ofdpa_port_neigh_destroy,
2818 .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2819 .fib4_add = ofdpa_fib4_add,
2820 .fib4_del = ofdpa_fib4_del,
2821 .fib4_abort = ofdpa_fib4_abort,
2822 };