0001
0002
0003
0004 #include <linux/if_vlan.h>
0005 #include <linux/dsa/sja1105.h>
0006 #include <linux/dsa/8021q.h>
0007 #include <linux/packing.h>
0008 #include "dsa_priv.h"
0009
0010
0011 #define SJA1110_HEADER_HOST_TO_SWITCH BIT(15)
0012
0013
0014 #define SJA1110_RX_HEADER_IS_METADATA BIT(14)
0015 #define SJA1110_RX_HEADER_HOST_ONLY BIT(13)
0016 #define SJA1110_RX_HEADER_HAS_TRAILER BIT(12)
0017
0018
0019 #define SJA1110_RX_HEADER_SRC_PORT(x) (((x) & GENMASK(7, 4)) >> 4)
0020 #define SJA1110_RX_HEADER_SWITCH_ID(x) ((x) & GENMASK(3, 0))
0021
0022
0023 #define SJA1110_RX_HEADER_TRAILER_POS(x) ((x) & GENMASK(11, 0))
0024
0025 #define SJA1110_RX_TRAILER_SWITCH_ID(x) (((x) & GENMASK(7, 4)) >> 4)
0026 #define SJA1110_RX_TRAILER_SRC_PORT(x) ((x) & GENMASK(3, 0))
0027
0028
0029 #define SJA1110_RX_HEADER_N_TS(x) (((x) & GENMASK(8, 4)) >> 4)
0030
0031
0032 #define SJA1110_TX_HEADER_UPDATE_TC BIT(14)
0033 #define SJA1110_TX_HEADER_TAKE_TS BIT(13)
0034 #define SJA1110_TX_HEADER_TAKE_TS_CASC BIT(12)
0035 #define SJA1110_TX_HEADER_HAS_TRAILER BIT(11)
0036
0037
0038 #define SJA1110_TX_HEADER_PRIO(x) (((x) << 7) & GENMASK(10, 7))
0039 #define SJA1110_TX_HEADER_TSTAMP_ID(x) ((x) & GENMASK(7, 0))
0040
0041
0042 #define SJA1110_TX_HEADER_TRAILER_POS(x) ((x) & GENMASK(10, 0))
0043
0044 #define SJA1110_TX_TRAILER_TSTAMP_ID(x) (((x) << 24) & GENMASK(31, 24))
0045 #define SJA1110_TX_TRAILER_PRIO(x) (((x) << 21) & GENMASK(23, 21))
0046 #define SJA1110_TX_TRAILER_SWITCHID(x) (((x) << 12) & GENMASK(15, 12))
0047 #define SJA1110_TX_TRAILER_DESTPORTS(x) (((x) << 1) & GENMASK(11, 1))
0048
0049 #define SJA1110_META_TSTAMP_SIZE 10
0050
0051 #define SJA1110_HEADER_LEN 4
0052 #define SJA1110_RX_TRAILER_LEN 13
0053 #define SJA1110_TX_TRAILER_LEN 4
0054 #define SJA1110_MAX_PADDING_LEN 15
0055
0056 #define SJA1105_HWTS_RX_EN 0
0057
0058 struct sja1105_tagger_private {
0059 struct sja1105_tagger_data data;
0060 unsigned long state;
0061
0062
0063
0064 spinlock_t meta_lock;
0065 struct sk_buff *stampable_skb;
0066 struct kthread_worker *xmit_worker;
0067 };
0068
0069 static struct sja1105_tagger_private *
0070 sja1105_tagger_private(struct dsa_switch *ds)
0071 {
0072 return ds->tagger_data;
0073 }
0074
0075
0076 static inline bool sja1105_is_link_local(const struct sk_buff *skb)
0077 {
0078 const struct ethhdr *hdr = eth_hdr(skb);
0079 u64 dmac = ether_addr_to_u64(hdr->h_dest);
0080
0081 if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
0082 return false;
0083 if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
0084 SJA1105_LINKLOCAL_FILTER_A)
0085 return true;
0086 if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
0087 SJA1105_LINKLOCAL_FILTER_B)
0088 return true;
0089 return false;
0090 }
0091
0092 struct sja1105_meta {
0093 u64 tstamp;
0094 u64 dmac_byte_4;
0095 u64 dmac_byte_3;
0096 u64 source_port;
0097 u64 switch_id;
0098 };
0099
0100 static void sja1105_meta_unpack(const struct sk_buff *skb,
0101 struct sja1105_meta *meta)
0102 {
0103 u8 *buf = skb_mac_header(skb) + ETH_HLEN;
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
0116 packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
0117 packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
0118 packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
0119 packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
0120 }
0121
0122 static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
0123 {
0124 const struct ethhdr *hdr = eth_hdr(skb);
0125 u64 smac = ether_addr_to_u64(hdr->h_source);
0126 u64 dmac = ether_addr_to_u64(hdr->h_dest);
0127
0128 if (smac != SJA1105_META_SMAC)
0129 return false;
0130 if (dmac != SJA1105_META_DMAC)
0131 return false;
0132 if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
0133 return false;
0134 return true;
0135 }
0136
0137
0138 static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
0139 struct sk_buff *skb)
0140 {
0141 struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds);
0142 struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds);
0143 void (*xmit_work_fn)(struct kthread_work *work);
0144 struct sja1105_deferred_xmit_work *xmit_work;
0145 struct kthread_worker *xmit_worker;
0146
0147 xmit_work_fn = tagger_data->xmit_work_fn;
0148 xmit_worker = priv->xmit_worker;
0149
0150 if (!xmit_work_fn || !xmit_worker)
0151 return NULL;
0152
0153 xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
0154 if (!xmit_work)
0155 return NULL;
0156
0157 kthread_init_work(&xmit_work->work, xmit_work_fn);
0158
0159
0160
0161 xmit_work->dp = dp;
0162 xmit_work->skb = skb_get(skb);
0163
0164 kthread_queue_work(xmit_worker, &xmit_work->work);
0165
0166 return NULL;
0167 }
0168
0169
0170
0171
0172 static u16 sja1105_xmit_tpid(struct dsa_port *dp)
0173 {
0174 struct dsa_switch *ds = dp->ds;
0175 struct dsa_port *other_dp;
0176 u16 proto;
0177
0178
0179
0180
0181 if (!dsa_port_is_vlan_filtering(dp))
0182 return ETH_P_SJA1105;
0183
0184
0185
0186
0187
0188 dsa_switch_for_each_port(other_dp, ds) {
0189 struct net_device *br = dsa_port_bridge_dev_get(other_dp);
0190
0191 if (!br)
0192 continue;
0193
0194
0195
0196
0197
0198 br_vlan_get_proto(br, &proto);
0199
0200 return proto;
0201 }
0202
0203 WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n");
0204
0205 return ETH_P_SJA1105;
0206 }
0207
0208 static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
0209 struct net_device *netdev)
0210 {
0211 struct dsa_port *dp = dsa_slave_to_port(netdev);
0212 unsigned int bridge_num = dsa_port_bridge_num_get(dp);
0213 struct net_device *br = dsa_port_bridge_dev_get(dp);
0214 u16 tx_vid;
0215
0216
0217
0218
0219
0220
0221
0222 if (br_vlan_enabled(br))
0223 return skb;
0224
0225
0226
0227
0228
0229 tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
0230
0231 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
0232 }
0233
0234
0235
0236
0237
0238 static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
0239 struct sk_buff *skb, u8 pcp)
0240 {
0241 __be16 xmit_tpid = htons(sja1105_xmit_tpid(dp));
0242 struct vlan_ethhdr *hdr;
0243
0244
0245
0246
0247
0248 if (unlikely(skb_vlan_tag_present(skb))) {
0249 skb = __vlan_hwaccel_push_inside(skb);
0250 if (!skb)
0251 return NULL;
0252 }
0253
0254 hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
0255
0256
0257 if (hdr->h_vlan_proto == xmit_tpid)
0258 return skb;
0259
0260 return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) |
0261 SJA1105_DEFAULT_VLAN);
0262 }
0263
0264 static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
0265 struct net_device *netdev)
0266 {
0267 struct dsa_port *dp = dsa_slave_to_port(netdev);
0268 u16 queue_mapping = skb_get_queue_mapping(skb);
0269 u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
0270 u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
0271
0272 if (skb->offload_fwd_mark)
0273 return sja1105_imprecise_xmit(skb, netdev);
0274
0275
0276
0277
0278
0279 if (unlikely(sja1105_is_link_local(skb))) {
0280 skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
0281 if (!skb)
0282 return NULL;
0283
0284 return sja1105_defer_xmit(dp, skb);
0285 }
0286
0287 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
0288 ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
0289 }
0290
0291 static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
0292 struct net_device *netdev)
0293 {
0294 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
0295 struct dsa_port *dp = dsa_slave_to_port(netdev);
0296 u16 queue_mapping = skb_get_queue_mapping(skb);
0297 u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
0298 u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
0299 __be32 *tx_trailer;
0300 __be16 *tx_header;
0301 int trailer_pos;
0302
0303 if (skb->offload_fwd_mark)
0304 return sja1105_imprecise_xmit(skb, netdev);
0305
0306
0307
0308
0309
0310 if (likely(!sja1105_is_link_local(skb)))
0311 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
0312 ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
0313
0314 skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
0315 if (!skb)
0316 return NULL;
0317
0318 skb_push(skb, SJA1110_HEADER_LEN);
0319
0320 dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);
0321
0322 trailer_pos = skb->len;
0323
0324 tx_header = dsa_etype_header_pos_tx(skb);
0325 tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
0326
0327 tx_header[0] = htons(ETH_P_SJA1110);
0328 tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH |
0329 SJA1110_TX_HEADER_HAS_TRAILER |
0330 SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
0331 *tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
0332 SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
0333 SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
0334 if (clone) {
0335 u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
0336
0337 tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS);
0338 *tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
0339 }
0340
0341 return skb;
0342 }
0343
0344 static void sja1105_transfer_meta(struct sk_buff *skb,
0345 const struct sja1105_meta *meta)
0346 {
0347 struct ethhdr *hdr = eth_hdr(skb);
0348
0349 hdr->h_dest[3] = meta->dmac_byte_3;
0350 hdr->h_dest[4] = meta->dmac_byte_4;
0351 SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
0352 }
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374 static struct sk_buff
0375 *sja1105_rcv_meta_state_machine(struct sk_buff *skb,
0376 struct sja1105_meta *meta,
0377 bool is_link_local,
0378 bool is_meta)
0379 {
0380
0381
0382
0383 if (is_link_local) {
0384 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
0385 struct sja1105_tagger_private *priv;
0386 struct dsa_switch *ds = dp->ds;
0387
0388 priv = sja1105_tagger_private(ds);
0389
0390 if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
0391
0392 return skb;
0393
0394 spin_lock(&priv->meta_lock);
0395
0396
0397
0398 if (priv->stampable_skb) {
0399 dev_err_ratelimited(ds->dev,
0400 "Expected meta frame, is %12llx "
0401 "in the DSA master multicast filter?\n",
0402 SJA1105_META_DMAC);
0403 kfree_skb(priv->stampable_skb);
0404 }
0405
0406
0407
0408
0409 priv->stampable_skb = skb_get(skb);
0410 spin_unlock(&priv->meta_lock);
0411
0412
0413 return NULL;
0414
0415
0416
0417
0418
0419
0420
0421 } else if (is_meta) {
0422 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
0423 struct sja1105_tagger_private *priv;
0424 struct dsa_switch *ds = dp->ds;
0425 struct sk_buff *stampable_skb;
0426
0427 priv = sja1105_tagger_private(ds);
0428
0429
0430
0431
0432 if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
0433 return NULL;
0434
0435 spin_lock(&priv->meta_lock);
0436
0437 stampable_skb = priv->stampable_skb;
0438 priv->stampable_skb = NULL;
0439
0440
0441
0442
0443 if (!stampable_skb) {
0444 dev_err_ratelimited(ds->dev,
0445 "Unexpected meta frame\n");
0446 spin_unlock(&priv->meta_lock);
0447 return NULL;
0448 }
0449
0450 if (stampable_skb->dev != skb->dev) {
0451 dev_err_ratelimited(ds->dev,
0452 "Meta frame on wrong port\n");
0453 spin_unlock(&priv->meta_lock);
0454 return NULL;
0455 }
0456
0457
0458
0459
0460 kfree_skb(skb);
0461 skb = stampable_skb;
0462 sja1105_transfer_meta(skb, meta);
0463
0464 spin_unlock(&priv->meta_lock);
0465 }
0466
0467 return skb;
0468 }
0469
0470 static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
0471 {
0472 struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
0473
0474 return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
0475 }
0476
0477 static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
0478 {
0479 struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
0480
0481 if (on)
0482 set_bit(SJA1105_HWTS_RX_EN, &priv->state);
0483 else
0484 clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
0485
0486
0487 if (!priv->stampable_skb)
0488 return;
0489
0490 kfree_skb(priv->stampable_skb);
0491 priv->stampable_skb = NULL;
0492 }
0493
0494 static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
0495 {
0496 u16 tpid = ntohs(eth_hdr(skb)->h_proto);
0497
0498 return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
0499 skb_vlan_tag_present(skb);
0500 }
0501
0502 static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
0503 {
0504 return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
0505 }
0506
0507
0508
0509
0510
0511 static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
0512 int *switch_id, int *vbid, u16 *vid)
0513 {
0514 struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
0515 u16 vlan_tci;
0516
0517 if (skb_vlan_tag_present(skb))
0518 vlan_tci = skb_vlan_tag_get(skb);
0519 else
0520 vlan_tci = ntohs(hdr->h_vlan_TCI);
0521
0522 if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
0523 return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
0524
0525
0526 *vid = vlan_tci & VLAN_VID_MASK;
0527 }
0528
0529 static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
0530 struct net_device *netdev)
0531 {
0532 int source_port = -1, switch_id = -1, vbid = -1;
0533 struct sja1105_meta meta = {0};
0534 struct ethhdr *hdr;
0535 bool is_link_local;
0536 bool is_meta;
0537 u16 vid;
0538
0539 hdr = eth_hdr(skb);
0540 is_link_local = sja1105_is_link_local(skb);
0541 is_meta = sja1105_is_meta_frame(skb);
0542
0543 if (sja1105_skb_has_tag_8021q(skb)) {
0544
0545 sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
0546 } else if (is_link_local) {
0547
0548
0549
0550
0551 source_port = hdr->h_dest[3];
0552 switch_id = hdr->h_dest[4];
0553
0554 hdr->h_dest[3] = 0;
0555 hdr->h_dest[4] = 0;
0556 } else if (is_meta) {
0557 sja1105_meta_unpack(skb, &meta);
0558 source_port = meta.source_port;
0559 switch_id = meta.switch_id;
0560 } else {
0561 return NULL;
0562 }
0563
0564 if (vbid >= 1)
0565 skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
0566 else if (source_port == -1 || switch_id == -1)
0567 skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
0568 else
0569 skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
0570 if (!skb->dev) {
0571 netdev_warn(netdev, "Couldn't decode source port\n");
0572 return NULL;
0573 }
0574
0575 if (!is_link_local)
0576 dsa_default_offload_fwd_mark(skb);
0577
0578 return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
0579 is_meta);
0580 }
0581
0582 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
0583 {
0584 u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
0585 int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
0586 int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
0587 struct sja1105_tagger_data *tagger_data;
0588 struct net_device *master = skb->dev;
0589 struct dsa_port *cpu_dp;
0590 struct dsa_switch *ds;
0591 int i;
0592
0593 cpu_dp = master->dsa_ptr;
0594 ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
0595 if (!ds) {
0596 net_err_ratelimited("%s: cannot find switch id %d\n",
0597 master->name, switch_id);
0598 return NULL;
0599 }
0600
0601 tagger_data = sja1105_tagger_data(ds);
0602 if (!tagger_data->meta_tstamp_handler)
0603 return NULL;
0604
0605 for (i = 0; i <= n_ts; i++) {
0606 u8 ts_id, source_port, dir;
0607 u64 tstamp;
0608
0609 ts_id = buf[0];
0610 source_port = (buf[1] & GENMASK(7, 4)) >> 4;
0611 dir = (buf[1] & BIT(3)) >> 3;
0612 tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
0613
0614 tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir,
0615 tstamp);
0616
0617 buf += SJA1110_META_TSTAMP_SIZE;
0618 }
0619
0620
0621 return NULL;
0622 }
0623
0624 static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
0625 int *source_port,
0626 int *switch_id,
0627 bool *host_only)
0628 {
0629 u16 rx_header;
0630
0631 if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
0632 return NULL;
0633
0634
0635
0636
0637
0638
0639 rx_header = ntohs(*(__be16 *)skb->data);
0640
0641 if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
0642 *host_only = true;
0643
0644 if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
0645 return sja1110_rcv_meta(skb, rx_header);
0646
0647
0648 if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
0649 int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
0650 u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
0651 u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
0652 u8 last_byte = rx_trailer[12];
0653
0654
0655
0656
0657 packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
0658
0659 *source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
0660 *switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
0661
0662
0663
0664
0665
0666
0667
0668 pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN);
0669
0670 } else {
0671 *source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
0672 *switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
0673 }
0674
0675
0676 skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
0677
0678 dsa_strip_etype_header(skb, SJA1110_HEADER_LEN);
0679
0680
0681
0682
0683 skb_set_mac_header(skb, -ETH_HLEN);
0684
0685 return skb;
0686 }
0687
0688 static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
0689 struct net_device *netdev)
0690 {
0691 int source_port = -1, switch_id = -1, vbid = -1;
0692 bool host_only = false;
0693 u16 vid = 0;
0694
0695 if (sja1110_skb_has_inband_control_extension(skb)) {
0696 skb = sja1110_rcv_inband_control_extension(skb, &source_port,
0697 &switch_id,
0698 &host_only);
0699 if (!skb)
0700 return NULL;
0701 }
0702
0703
0704 if (likely(sja1105_skb_has_tag_8021q(skb)))
0705 sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
0706
0707 if (vbid >= 1)
0708 skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
0709 else if (source_port == -1 || switch_id == -1)
0710 skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
0711 else
0712 skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
0713 if (!skb->dev) {
0714 netdev_warn(netdev, "Couldn't decode source port\n");
0715 return NULL;
0716 }
0717
0718 if (!host_only)
0719 dsa_default_offload_fwd_mark(skb);
0720
0721 return skb;
0722 }
0723
0724 static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
0725 int *offset)
0726 {
0727
0728 if (unlikely(sja1105_is_link_local(skb)))
0729 return;
0730
0731 dsa_tag_generic_flow_dissect(skb, proto, offset);
0732 }
0733
0734 static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
0735 int *offset)
0736 {
0737
0738
0739
0740 if (unlikely(sja1105_is_link_local(skb)))
0741 return dsa_tag_generic_flow_dissect(skb, proto, offset);
0742
0743
0744 *offset = VLAN_HLEN;
0745 *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
0746 }
0747
0748 static void sja1105_disconnect(struct dsa_switch *ds)
0749 {
0750 struct sja1105_tagger_private *priv = ds->tagger_data;
0751
0752 kthread_destroy_worker(priv->xmit_worker);
0753 kfree(priv);
0754 ds->tagger_data = NULL;
0755 }
0756
0757 static int sja1105_connect(struct dsa_switch *ds)
0758 {
0759 struct sja1105_tagger_data *tagger_data;
0760 struct sja1105_tagger_private *priv;
0761 struct kthread_worker *xmit_worker;
0762 int err;
0763
0764 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0765 if (!priv)
0766 return -ENOMEM;
0767
0768 spin_lock_init(&priv->meta_lock);
0769
0770 xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
0771 ds->dst->index, ds->index);
0772 if (IS_ERR(xmit_worker)) {
0773 err = PTR_ERR(xmit_worker);
0774 kfree(priv);
0775 return err;
0776 }
0777
0778 priv->xmit_worker = xmit_worker;
0779
0780 tagger_data = &priv->data;
0781 tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
0782 tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
0783 ds->tagger_data = priv;
0784
0785 return 0;
0786 }
0787
0788 static const struct dsa_device_ops sja1105_netdev_ops = {
0789 .name = "sja1105",
0790 .proto = DSA_TAG_PROTO_SJA1105,
0791 .xmit = sja1105_xmit,
0792 .rcv = sja1105_rcv,
0793 .connect = sja1105_connect,
0794 .disconnect = sja1105_disconnect,
0795 .needed_headroom = VLAN_HLEN,
0796 .flow_dissect = sja1105_flow_dissect,
0797 .promisc_on_master = true,
0798 };
0799
0800 DSA_TAG_DRIVER(sja1105_netdev_ops);
0801 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105);
0802
0803 static const struct dsa_device_ops sja1110_netdev_ops = {
0804 .name = "sja1110",
0805 .proto = DSA_TAG_PROTO_SJA1110,
0806 .xmit = sja1110_xmit,
0807 .rcv = sja1110_rcv,
0808 .connect = sja1105_connect,
0809 .disconnect = sja1105_disconnect,
0810 .flow_dissect = sja1110_flow_dissect,
0811 .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
0812 .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
0813 };
0814
0815 DSA_TAG_DRIVER(sja1110_netdev_ops);
0816 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110);
0817
0818 static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
0819 &DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
0820 &DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
0821 };
0822
0823 module_dsa_tag_drivers(sja1105_tag_driver_array);
0824
0825 MODULE_LICENSE("GPL v2");