0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048 #include <linux/dsa/mv88e6xxx.h>
0049 #include <linux/etherdevice.h>
0050 #include <linux/list.h>
0051 #include <linux/slab.h>
0052
0053 #include "dsa_priv.h"
0054
0055 #define DSA_HLEN 4
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 enum dsa_cmd {
0084 DSA_CMD_TO_CPU = 0,
0085 DSA_CMD_FROM_CPU = 1,
0086 DSA_CMD_TO_SNIFFER = 2,
0087 DSA_CMD_FORWARD = 3
0088 };
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 enum dsa_code {
0116 DSA_CODE_MGMT_TRAP = 0,
0117 DSA_CODE_FRAME2REG = 1,
0118 DSA_CODE_IGMP_MLD_TRAP = 2,
0119 DSA_CODE_POLICY_TRAP = 3,
0120 DSA_CODE_ARP_MIRROR = 4,
0121 DSA_CODE_POLICY_MIRROR = 5,
0122 DSA_CODE_RESERVED_6 = 6,
0123 DSA_CODE_RESERVED_7 = 7
0124 };
0125
0126 static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
0127 u8 extra)
0128 {
0129 struct dsa_port *dp = dsa_slave_to_port(dev);
0130 struct net_device *br_dev;
0131 u8 tag_dev, tag_port;
0132 enum dsa_cmd cmd;
0133 u8 *dsa_header;
0134
0135 if (skb->offload_fwd_mark) {
0136 unsigned int bridge_num = dsa_port_bridge_num_get(dp);
0137 struct dsa_switch_tree *dst = dp->ds->dst;
0138
0139 cmd = DSA_CMD_FORWARD;
0140
0141
0142
0143
0144
0145 tag_dev = dst->last_switch + bridge_num;
0146 tag_port = 0;
0147 } else {
0148 cmd = DSA_CMD_FROM_CPU;
0149 tag_dev = dp->ds->index;
0150 tag_port = dp->index;
0151 }
0152
0153 br_dev = dsa_port_bridge_dev_get(dp);
0154
0155
0156
0157
0158
0159
0160
0161 if (skb->protocol == htons(ETH_P_8021Q) &&
0162 (!br_dev || br_vlan_enabled(br_dev))) {
0163 if (extra) {
0164 skb_push(skb, extra);
0165 dsa_alloc_etype_header(skb, extra);
0166 }
0167
0168
0169 dsa_header = dsa_etype_header_pos_tx(skb) + extra;
0170 dsa_header[0] = (cmd << 6) | 0x20 | tag_dev;
0171 dsa_header[1] = tag_port << 3;
0172
0173
0174 if (dsa_header[2] & 0x10) {
0175 dsa_header[1] |= 0x01;
0176 dsa_header[2] &= ~0x10;
0177 }
0178 } else {
0179 u16 vid;
0180
0181 vid = br_dev ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
0182
0183 skb_push(skb, DSA_HLEN + extra);
0184 dsa_alloc_etype_header(skb, DSA_HLEN + extra);
0185
0186
0187 dsa_header = dsa_etype_header_pos_tx(skb) + extra;
0188
0189 dsa_header[0] = (cmd << 6) | tag_dev;
0190 dsa_header[1] = tag_port << 3;
0191 dsa_header[2] = vid >> 8;
0192 dsa_header[3] = vid & 0xff;
0193 }
0194
0195 return skb;
0196 }
0197
0198 static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
0199 u8 extra)
0200 {
0201 bool trap = false, trunk = false;
0202 int source_device, source_port;
0203 enum dsa_code code;
0204 enum dsa_cmd cmd;
0205 u8 *dsa_header;
0206
0207
0208 dsa_header = dsa_etype_header_pos_rx(skb);
0209
0210 cmd = dsa_header[0] >> 6;
0211 switch (cmd) {
0212 case DSA_CMD_FORWARD:
0213 trunk = !!(dsa_header[1] & 4);
0214 break;
0215
0216 case DSA_CMD_TO_CPU:
0217 code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
0218
0219 switch (code) {
0220 case DSA_CODE_FRAME2REG:
0221
0222
0223
0224 return NULL;
0225 case DSA_CODE_ARP_MIRROR:
0226 case DSA_CODE_POLICY_MIRROR:
0227
0228
0229
0230
0231 break;
0232 case DSA_CODE_MGMT_TRAP:
0233 case DSA_CODE_IGMP_MLD_TRAP:
0234 case DSA_CODE_POLICY_TRAP:
0235
0236
0237
0238 trap = true;
0239 break;
0240 default:
0241
0242
0243
0244 return NULL;
0245 }
0246
0247 break;
0248
0249 default:
0250 return NULL;
0251 }
0252
0253 source_device = dsa_header[0] & 0x1f;
0254 source_port = (dsa_header[1] >> 3) & 0x1f;
0255
0256 if (trunk) {
0257 struct dsa_port *cpu_dp = dev->dsa_ptr;
0258 struct dsa_lag *lag;
0259
0260
0261
0262
0263
0264 lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
0265 skb->dev = lag ? lag->dev : NULL;
0266 } else {
0267 skb->dev = dsa_master_find_slave(dev, source_device,
0268 source_port);
0269 }
0270
0271 if (!skb->dev)
0272 return NULL;
0273
0274
0275
0276
0277
0278 if (trunk)
0279 skb->offload_fwd_mark = true;
0280 else if (!trap)
0281 dsa_default_offload_fwd_mark(skb);
0282
0283
0284
0285
0286
0287
0288 if (dsa_header[0] & 0x20) {
0289 u8 new_header[4];
0290
0291
0292
0293
0294
0295 new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
0296 new_header[1] = ETH_P_8021Q & 0xff;
0297 new_header[2] = dsa_header[2] & ~0x10;
0298 new_header[3] = dsa_header[3];
0299
0300
0301
0302
0303 if (dsa_header[1] & 0x01)
0304 new_header[2] |= 0x10;
0305
0306
0307 if (skb->ip_summed == CHECKSUM_COMPLETE) {
0308 __wsum c = skb->csum;
0309 c = csum_add(c, csum_partial(new_header + 2, 2, 0));
0310 c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0));
0311 skb->csum = c;
0312 }
0313
0314 memcpy(dsa_header, new_header, DSA_HLEN);
0315
0316 if (extra)
0317 dsa_strip_etype_header(skb, extra);
0318 } else {
0319 skb_pull_rcsum(skb, DSA_HLEN);
0320 dsa_strip_etype_header(skb, DSA_HLEN + extra);
0321 }
0322
0323 return skb;
0324 }
0325
0326 #if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
0327
0328 static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
0329 {
0330 return dsa_xmit_ll(skb, dev, 0);
0331 }
0332
0333 static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev)
0334 {
0335 if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
0336 return NULL;
0337
0338 return dsa_rcv_ll(skb, dev, 0);
0339 }
0340
0341 static const struct dsa_device_ops dsa_netdev_ops = {
0342 .name = "dsa",
0343 .proto = DSA_TAG_PROTO_DSA,
0344 .xmit = dsa_xmit,
0345 .rcv = dsa_rcv,
0346 .needed_headroom = DSA_HLEN,
0347 };
0348
0349 DSA_TAG_DRIVER(dsa_netdev_ops);
0350 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA);
0351 #endif
0352
0353 #if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
0354
0355 #define EDSA_HLEN 8
0356
0357 static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
0358 {
0359 u8 *edsa_header;
0360
0361 skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
0362 if (!skb)
0363 return NULL;
0364
0365 edsa_header = dsa_etype_header_pos_tx(skb);
0366 edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
0367 edsa_header[1] = ETH_P_EDSA & 0xff;
0368 edsa_header[2] = 0x00;
0369 edsa_header[3] = 0x00;
0370 return skb;
0371 }
0372
0373 static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev)
0374 {
0375 if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
0376 return NULL;
0377
0378 skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
0379
0380 return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
0381 }
0382
0383 static const struct dsa_device_ops edsa_netdev_ops = {
0384 .name = "edsa",
0385 .proto = DSA_TAG_PROTO_EDSA,
0386 .xmit = edsa_xmit,
0387 .rcv = edsa_rcv,
0388 .needed_headroom = EDSA_HLEN,
0389 };
0390
0391 DSA_TAG_DRIVER(edsa_netdev_ops);
0392 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
0393 #endif
0394
0395 static struct dsa_tag_driver *dsa_tag_drivers[] = {
0396 #if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
0397 &DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
0398 #endif
0399 #if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
0400 &DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
0401 #endif
0402 };
0403
0404 module_dsa_tag_drivers(dsa_tag_drivers);
0405
0406 MODULE_LICENSE("GPL");