0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/skbuff.h>
0033 #include <linux/netdevice.h>
0034 #include <linux/if.h>
0035 #include <linux/if_vlan.h>
0036 #include <linux/jhash.h>
0037 #include <linux/slab.h>
0038 #include <linux/export.h>
0039 #include <net/neighbour.h>
0040 #include "common.h"
0041 #include "t3cdev.h"
0042 #include "cxgb3_defs.h"
0043 #include "l2t.h"
0044 #include "t3_cpl.h"
0045 #include "firmware_exports.h"
0046
0047 #define VLAN_NONE 0xfff
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 static inline unsigned int vlan_prio(const struct l2t_entry *e)
0064 {
0065 return e->vlan >> 13;
0066 }
0067
0068 static inline unsigned int arp_hash(u32 key, int ifindex,
0069 const struct l2t_data *d)
0070 {
0071 return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
0072 }
0073
0074 static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
0075 {
0076 neigh_hold(n);
0077 if (e->neigh)
0078 neigh_release(e->neigh);
0079 e->neigh = n;
0080 }
0081
0082
0083
0084
0085
0086
0087 static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
0088 struct l2t_entry *e)
0089 {
0090 struct cpl_l2t_write_req *req;
0091 struct sk_buff *tmp;
0092
0093 if (!skb) {
0094 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
0095 if (!skb)
0096 return -ENOMEM;
0097 }
0098
0099 req = __skb_put(skb, sizeof(*req));
0100 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
0101 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
0102 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
0103 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
0104 V_L2T_W_PRIO(vlan_prio(e)));
0105 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
0106 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
0107 skb->priority = CPL_PRIORITY_CONTROL;
0108 cxgb3_ofld_send(dev, skb);
0109
0110 skb_queue_walk_safe(&e->arpq, skb, tmp) {
0111 __skb_unlink(skb, &e->arpq);
0112 cxgb3_ofld_send(dev, skb);
0113 }
0114 e->state = L2T_STATE_VALID;
0115
0116 return 0;
0117 }
0118
0119
0120
0121
0122
0123 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
0124 {
0125 __skb_queue_tail(&e->arpq, skb);
0126 }
0127
0128 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
0129 struct l2t_entry *e)
0130 {
0131 again:
0132 switch (e->state) {
0133 case L2T_STATE_STALE:
0134 neigh_event_send(e->neigh, NULL);
0135 spin_lock_bh(&e->lock);
0136 if (e->state == L2T_STATE_STALE)
0137 e->state = L2T_STATE_VALID;
0138 spin_unlock_bh(&e->lock);
0139 fallthrough;
0140 case L2T_STATE_VALID:
0141 return cxgb3_ofld_send(dev, skb);
0142 case L2T_STATE_RESOLVING:
0143 spin_lock_bh(&e->lock);
0144 if (e->state != L2T_STATE_RESOLVING) {
0145
0146 spin_unlock_bh(&e->lock);
0147 goto again;
0148 }
0149 arpq_enqueue(e, skb);
0150 spin_unlock_bh(&e->lock);
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 if (!neigh_event_send(e->neigh, NULL)) {
0161 skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
0162 GFP_ATOMIC);
0163 if (!skb)
0164 break;
0165
0166 spin_lock_bh(&e->lock);
0167 if (!skb_queue_empty(&e->arpq))
0168 setup_l2e_send_pending(dev, skb, e);
0169 else
0170 __kfree_skb(skb);
0171 spin_unlock_bh(&e->lock);
0172 }
0173 }
0174 return 0;
0175 }
0176
0177 EXPORT_SYMBOL(t3_l2t_send_slow);
0178
0179 void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
0180 {
0181 again:
0182 switch (e->state) {
0183 case L2T_STATE_STALE:
0184 neigh_event_send(e->neigh, NULL);
0185 spin_lock_bh(&e->lock);
0186 if (e->state == L2T_STATE_STALE) {
0187 e->state = L2T_STATE_VALID;
0188 }
0189 spin_unlock_bh(&e->lock);
0190 return;
0191 case L2T_STATE_VALID:
0192 return;
0193 case L2T_STATE_RESOLVING:
0194 spin_lock_bh(&e->lock);
0195 if (e->state != L2T_STATE_RESOLVING) {
0196
0197 spin_unlock_bh(&e->lock);
0198 goto again;
0199 }
0200 spin_unlock_bh(&e->lock);
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 neigh_event_send(e->neigh, NULL);
0211 }
0212 }
0213
0214 EXPORT_SYMBOL(t3_l2t_send_event);
0215
0216
0217
0218
0219 static struct l2t_entry *alloc_l2e(struct l2t_data *d)
0220 {
0221 struct l2t_entry *end, *e, **p;
0222
0223 if (!atomic_read(&d->nfree))
0224 return NULL;
0225
0226
0227 for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
0228 if (atomic_read(&e->refcnt) == 0)
0229 goto found;
0230
0231 for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
0232 found:
0233 d->rover = e + 1;
0234 atomic_dec(&d->nfree);
0235
0236
0237
0238
0239
0240 if (e->state != L2T_STATE_UNUSED) {
0241 int hash = arp_hash(e->addr, e->ifindex, d);
0242
0243 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
0244 if (*p == e) {
0245 *p = e->next;
0246 break;
0247 }
0248 e->state = L2T_STATE_UNUSED;
0249 }
0250 return e;
0251 }
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
0265 {
0266 spin_lock_bh(&e->lock);
0267 if (atomic_read(&e->refcnt) == 0) {
0268 if (e->neigh) {
0269 neigh_release(e->neigh);
0270 e->neigh = NULL;
0271 }
0272 }
0273 spin_unlock_bh(&e->lock);
0274 atomic_inc(&d->nfree);
0275 }
0276
0277 EXPORT_SYMBOL(t3_l2e_free);
0278
0279
0280
0281
0282
0283 static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
0284 {
0285 unsigned int nud_state;
0286
0287 spin_lock(&e->lock);
0288
0289 if (neigh != e->neigh)
0290 neigh_replace(e, neigh);
0291 nud_state = neigh->nud_state;
0292 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
0293 !(nud_state & NUD_VALID))
0294 e->state = L2T_STATE_RESOLVING;
0295 else if (nud_state & NUD_CONNECTED)
0296 e->state = L2T_STATE_VALID;
0297 else
0298 e->state = L2T_STATE_STALE;
0299 spin_unlock(&e->lock);
0300 }
0301
0302 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
0303 struct net_device *dev, const void *daddr)
0304 {
0305 struct l2t_entry *e = NULL;
0306 struct neighbour *neigh;
0307 struct port_info *p;
0308 struct l2t_data *d;
0309 int hash;
0310 u32 addr;
0311 int ifidx;
0312 int smt_idx;
0313
0314 rcu_read_lock();
0315 neigh = dst_neigh_lookup(dst, daddr);
0316 if (!neigh)
0317 goto done_rcu;
0318
0319 addr = *(u32 *) neigh->primary_key;
0320 ifidx = neigh->dev->ifindex;
0321
0322 if (!dev)
0323 dev = neigh->dev;
0324 p = netdev_priv(dev);
0325 smt_idx = p->port_id;
0326
0327 d = L2DATA(cdev);
0328 if (!d)
0329 goto done_rcu;
0330
0331 hash = arp_hash(addr, ifidx, d);
0332
0333 write_lock_bh(&d->lock);
0334 for (e = d->l2tab[hash].first; e; e = e->next)
0335 if (e->addr == addr && e->ifindex == ifidx &&
0336 e->smt_idx == smt_idx) {
0337 l2t_hold(d, e);
0338 if (atomic_read(&e->refcnt) == 1)
0339 reuse_entry(e, neigh);
0340 goto done_unlock;
0341 }
0342
0343
0344 e = alloc_l2e(d);
0345 if (e) {
0346 spin_lock(&e->lock);
0347 e->next = d->l2tab[hash].first;
0348 d->l2tab[hash].first = e;
0349 e->state = L2T_STATE_RESOLVING;
0350 e->addr = addr;
0351 e->ifindex = ifidx;
0352 e->smt_idx = smt_idx;
0353 atomic_set(&e->refcnt, 1);
0354 neigh_replace(e, neigh);
0355 if (is_vlan_dev(neigh->dev))
0356 e->vlan = vlan_dev_vlan_id(neigh->dev);
0357 else
0358 e->vlan = VLAN_NONE;
0359 spin_unlock(&e->lock);
0360 }
0361 done_unlock:
0362 write_unlock_bh(&d->lock);
0363 done_rcu:
0364 if (neigh)
0365 neigh_release(neigh);
0366 rcu_read_unlock();
0367 return e;
0368 }
0369
0370 EXPORT_SYMBOL(t3_l2t_get);
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
0381 {
0382 struct sk_buff *skb, *tmp;
0383
0384 skb_queue_walk_safe(arpq, skb, tmp) {
0385 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
0386
0387 __skb_unlink(skb, arpq);
0388 if (cb->arp_failure_handler)
0389 cb->arp_failure_handler(dev, skb);
0390 else
0391 cxgb3_ofld_send(dev, skb);
0392 }
0393 }
0394
0395
0396
0397
0398
0399 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
0400 {
0401 struct sk_buff_head arpq;
0402 struct l2t_entry *e;
0403 struct l2t_data *d = L2DATA(dev);
0404 u32 addr = *(u32 *) neigh->primary_key;
0405 int ifidx = neigh->dev->ifindex;
0406 int hash = arp_hash(addr, ifidx, d);
0407
0408 read_lock_bh(&d->lock);
0409 for (e = d->l2tab[hash].first; e; e = e->next)
0410 if (e->addr == addr && e->ifindex == ifidx) {
0411 spin_lock(&e->lock);
0412 goto found;
0413 }
0414 read_unlock_bh(&d->lock);
0415 return;
0416
0417 found:
0418 __skb_queue_head_init(&arpq);
0419
0420 read_unlock(&d->lock);
0421 if (atomic_read(&e->refcnt)) {
0422 if (neigh != e->neigh)
0423 neigh_replace(e, neigh);
0424
0425 if (e->state == L2T_STATE_RESOLVING) {
0426 if (neigh->nud_state & NUD_FAILED) {
0427 skb_queue_splice_init(&e->arpq, &arpq);
0428 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
0429 setup_l2e_send_pending(dev, NULL, e);
0430 } else {
0431 e->state = neigh->nud_state & NUD_CONNECTED ?
0432 L2T_STATE_VALID : L2T_STATE_STALE;
0433 if (!ether_addr_equal(e->dmac, neigh->ha))
0434 setup_l2e_send_pending(dev, NULL, e);
0435 }
0436 }
0437 spin_unlock_bh(&e->lock);
0438
0439 if (!skb_queue_empty(&arpq))
0440 handle_failed_resolution(dev, &arpq);
0441 }
0442
0443 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
0444 {
0445 struct l2t_data *d;
0446 int i;
0447
0448 d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
0449 if (!d)
0450 return NULL;
0451
0452 d->nentries = l2t_capacity;
0453 d->rover = &d->l2tab[1];
0454 atomic_set(&d->nfree, l2t_capacity - 1);
0455 rwlock_init(&d->lock);
0456
0457 for (i = 0; i < l2t_capacity; ++i) {
0458 d->l2tab[i].idx = i;
0459 d->l2tab[i].state = L2T_STATE_UNUSED;
0460 __skb_queue_head_init(&d->l2tab[i].arpq);
0461 spin_lock_init(&d->l2tab[i].lock);
0462 atomic_set(&d->l2tab[i].refcnt, 0);
0463 }
0464 return d;
0465 }