Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 #include <linux/skbuff.h>
0033 #include <linux/netdevice.h>
0034 #include <linux/if.h>
0035 #include <linux/if_vlan.h>
0036 #include <linux/jhash.h>
0037 #include <linux/slab.h>
0038 #include <linux/export.h>
0039 #include <net/neighbour.h>
0040 #include "common.h"
0041 #include "t3cdev.h"
0042 #include "cxgb3_defs.h"
0043 #include "l2t.h"
0044 #include "t3_cpl.h"
0045 #include "firmware_exports.h"
0046 
0047 #define VLAN_NONE 0xfff
0048 
0049 /*
0050  * Module locking notes:  There is a RW lock protecting the L2 table as a
0051  * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
0052  * under the protection of the table lock, individual entry changes happen
0053  * while holding that entry's spinlock.  The table lock nests outside the
0054  * entry locks.  Allocations of new entries take the table lock as writers so
0055  * no other lookups can happen while allocating new entries.  Entry updates
0056  * take the table lock as readers so multiple entries can be updated in
0057  * parallel.  An L2T entry can be dropped by decrementing its reference count
0058  * and therefore can happen in parallel with entry allocation but no entry
0059  * can change state or increment its ref count during allocation as both of
0060  * these perform lookups.
0061  */
0062 
0063 static inline unsigned int vlan_prio(const struct l2t_entry *e)
0064 {
0065     return e->vlan >> 13;
0066 }
0067 
0068 static inline unsigned int arp_hash(u32 key, int ifindex,
0069                     const struct l2t_data *d)
0070 {
0071     return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
0072 }
0073 
0074 static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
0075 {
0076     neigh_hold(n);
0077     if (e->neigh)
0078         neigh_release(e->neigh);
0079     e->neigh = n;
0080 }
0081 
0082 /*
0083  * Set up an L2T entry and send any packets waiting in the arp queue.  The
0084  * supplied skb is used for the CPL_L2T_WRITE_REQ.  Must be called with the
0085  * entry locked.
0086  */
0087 static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
0088                   struct l2t_entry *e)
0089 {
0090     struct cpl_l2t_write_req *req;
0091     struct sk_buff *tmp;
0092 
0093     if (!skb) {
0094         skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
0095         if (!skb)
0096             return -ENOMEM;
0097     }
0098 
0099     req = __skb_put(skb, sizeof(*req));
0100     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
0101     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
0102     req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
0103                 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
0104                 V_L2T_W_PRIO(vlan_prio(e)));
0105     memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
0106     memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
0107     skb->priority = CPL_PRIORITY_CONTROL;
0108     cxgb3_ofld_send(dev, skb);
0109 
0110     skb_queue_walk_safe(&e->arpq, skb, tmp) {
0111         __skb_unlink(skb, &e->arpq);
0112         cxgb3_ofld_send(dev, skb);
0113     }
0114     e->state = L2T_STATE_VALID;
0115 
0116     return 0;
0117 }
0118 
0119 /*
0120  * Add a packet to the an L2T entry's queue of packets awaiting resolution.
0121  * Must be called with the entry's lock held.
0122  */
0123 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
0124 {
0125     __skb_queue_tail(&e->arpq, skb);
0126 }
0127 
0128 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
0129              struct l2t_entry *e)
0130 {
0131 again:
0132     switch (e->state) {
0133     case L2T_STATE_STALE:   /* entry is stale, kick off revalidation */
0134         neigh_event_send(e->neigh, NULL);
0135         spin_lock_bh(&e->lock);
0136         if (e->state == L2T_STATE_STALE)
0137             e->state = L2T_STATE_VALID;
0138         spin_unlock_bh(&e->lock);
0139         fallthrough;
0140     case L2T_STATE_VALID:   /* fast-path, send the packet on */
0141         return cxgb3_ofld_send(dev, skb);
0142     case L2T_STATE_RESOLVING:
0143         spin_lock_bh(&e->lock);
0144         if (e->state != L2T_STATE_RESOLVING) {
0145             /* ARP already completed */
0146             spin_unlock_bh(&e->lock);
0147             goto again;
0148         }
0149         arpq_enqueue(e, skb);
0150         spin_unlock_bh(&e->lock);
0151 
0152         /*
0153          * Only the first packet added to the arpq should kick off
0154          * resolution.  However, because the alloc_skb below can fail,
0155          * we allow each packet added to the arpq to retry resolution
0156          * as a way of recovering from transient memory exhaustion.
0157          * A better way would be to use a work request to retry L2T
0158          * entries when there's no memory.
0159          */
0160         if (!neigh_event_send(e->neigh, NULL)) {
0161             skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
0162                     GFP_ATOMIC);
0163             if (!skb)
0164                 break;
0165 
0166             spin_lock_bh(&e->lock);
0167             if (!skb_queue_empty(&e->arpq))
0168                 setup_l2e_send_pending(dev, skb, e);
0169             else    /* we lost the race */
0170                 __kfree_skb(skb);
0171             spin_unlock_bh(&e->lock);
0172         }
0173     }
0174     return 0;
0175 }
0176 
0177 EXPORT_SYMBOL(t3_l2t_send_slow);
0178 
0179 void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
0180 {
0181 again:
0182     switch (e->state) {
0183     case L2T_STATE_STALE:   /* entry is stale, kick off revalidation */
0184         neigh_event_send(e->neigh, NULL);
0185         spin_lock_bh(&e->lock);
0186         if (e->state == L2T_STATE_STALE) {
0187             e->state = L2T_STATE_VALID;
0188         }
0189         spin_unlock_bh(&e->lock);
0190         return;
0191     case L2T_STATE_VALID:   /* fast-path, send the packet on */
0192         return;
0193     case L2T_STATE_RESOLVING:
0194         spin_lock_bh(&e->lock);
0195         if (e->state != L2T_STATE_RESOLVING) {
0196             /* ARP already completed */
0197             spin_unlock_bh(&e->lock);
0198             goto again;
0199         }
0200         spin_unlock_bh(&e->lock);
0201 
0202         /*
0203          * Only the first packet added to the arpq should kick off
0204          * resolution.  However, because the alloc_skb below can fail,
0205          * we allow each packet added to the arpq to retry resolution
0206          * as a way of recovering from transient memory exhaustion.
0207          * A better way would be to use a work request to retry L2T
0208          * entries when there's no memory.
0209          */
0210         neigh_event_send(e->neigh, NULL);
0211     }
0212 }
0213 
0214 EXPORT_SYMBOL(t3_l2t_send_event);
0215 
0216 /*
0217  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
0218  */
0219 static struct l2t_entry *alloc_l2e(struct l2t_data *d)
0220 {
0221     struct l2t_entry *end, *e, **p;
0222 
0223     if (!atomic_read(&d->nfree))
0224         return NULL;
0225 
0226     /* there's definitely a free entry */
0227     for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
0228         if (atomic_read(&e->refcnt) == 0)
0229             goto found;
0230 
0231     for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
0232 found:
0233     d->rover = e + 1;
0234     atomic_dec(&d->nfree);
0235 
0236     /*
0237      * The entry we found may be an inactive entry that is
0238      * presently in the hash table.  We need to remove it.
0239      */
0240     if (e->state != L2T_STATE_UNUSED) {
0241         int hash = arp_hash(e->addr, e->ifindex, d);
0242 
0243         for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
0244             if (*p == e) {
0245                 *p = e->next;
0246                 break;
0247             }
0248         e->state = L2T_STATE_UNUSED;
0249     }
0250     return e;
0251 }
0252 
0253 /*
0254  * Called when an L2T entry has no more users.  The entry is left in the hash
0255  * table since it is likely to be reused but we also bump nfree to indicate
0256  * that the entry can be reallocated for a different neighbor.  We also drop
0257  * the existing neighbor reference in case the neighbor is going away and is
0258  * waiting on our reference.
0259  *
0260  * Because entries can be reallocated to other neighbors once their ref count
0261  * drops to 0 we need to take the entry's lock to avoid races with a new
0262  * incarnation.
0263  */
0264 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
0265 {
0266     spin_lock_bh(&e->lock);
0267     if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
0268         if (e->neigh) {
0269             neigh_release(e->neigh);
0270             e->neigh = NULL;
0271         }
0272     }
0273     spin_unlock_bh(&e->lock);
0274     atomic_inc(&d->nfree);
0275 }
0276 
0277 EXPORT_SYMBOL(t3_l2e_free);
0278 
0279 /*
0280  * Update an L2T entry that was previously used for the same next hop as neigh.
0281  * Must be called with softirqs disabled.
0282  */
0283 static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
0284 {
0285     unsigned int nud_state;
0286 
0287     spin_lock(&e->lock);    /* avoid race with t3_l2t_free */
0288 
0289     if (neigh != e->neigh)
0290         neigh_replace(e, neigh);
0291     nud_state = neigh->nud_state;
0292     if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
0293         !(nud_state & NUD_VALID))
0294         e->state = L2T_STATE_RESOLVING;
0295     else if (nud_state & NUD_CONNECTED)
0296         e->state = L2T_STATE_VALID;
0297     else
0298         e->state = L2T_STATE_STALE;
0299     spin_unlock(&e->lock);
0300 }
0301 
0302 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
0303                  struct net_device *dev, const void *daddr)
0304 {
0305     struct l2t_entry *e = NULL;
0306     struct neighbour *neigh;
0307     struct port_info *p;
0308     struct l2t_data *d;
0309     int hash;
0310     u32 addr;
0311     int ifidx;
0312     int smt_idx;
0313 
0314     rcu_read_lock();
0315     neigh = dst_neigh_lookup(dst, daddr);
0316     if (!neigh)
0317         goto done_rcu;
0318 
0319     addr = *(u32 *) neigh->primary_key;
0320     ifidx = neigh->dev->ifindex;
0321 
0322     if (!dev)
0323         dev = neigh->dev;
0324     p = netdev_priv(dev);
0325     smt_idx = p->port_id;
0326 
0327     d = L2DATA(cdev);
0328     if (!d)
0329         goto done_rcu;
0330 
0331     hash = arp_hash(addr, ifidx, d);
0332 
0333     write_lock_bh(&d->lock);
0334     for (e = d->l2tab[hash].first; e; e = e->next)
0335         if (e->addr == addr && e->ifindex == ifidx &&
0336             e->smt_idx == smt_idx) {
0337             l2t_hold(d, e);
0338             if (atomic_read(&e->refcnt) == 1)
0339                 reuse_entry(e, neigh);
0340             goto done_unlock;
0341         }
0342 
0343     /* Need to allocate a new entry */
0344     e = alloc_l2e(d);
0345     if (e) {
0346         spin_lock(&e->lock);    /* avoid race with t3_l2t_free */
0347         e->next = d->l2tab[hash].first;
0348         d->l2tab[hash].first = e;
0349         e->state = L2T_STATE_RESOLVING;
0350         e->addr = addr;
0351         e->ifindex = ifidx;
0352         e->smt_idx = smt_idx;
0353         atomic_set(&e->refcnt, 1);
0354         neigh_replace(e, neigh);
0355         if (is_vlan_dev(neigh->dev))
0356             e->vlan = vlan_dev_vlan_id(neigh->dev);
0357         else
0358             e->vlan = VLAN_NONE;
0359         spin_unlock(&e->lock);
0360     }
0361 done_unlock:
0362     write_unlock_bh(&d->lock);
0363 done_rcu:
0364     if (neigh)
0365         neigh_release(neigh);
0366     rcu_read_unlock();
0367     return e;
0368 }
0369 
0370 EXPORT_SYMBOL(t3_l2t_get);
0371 
0372 /*
0373  * Called when address resolution fails for an L2T entry to handle packets
0374  * on the arpq head.  If a packet specifies a failure handler it is invoked,
0375  * otherwise the packets is sent to the offload device.
0376  *
0377  * XXX: maybe we should abandon the latter behavior and just require a failure
0378  * handler.
0379  */
0380 static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
0381 {
0382     struct sk_buff *skb, *tmp;
0383 
0384     skb_queue_walk_safe(arpq, skb, tmp) {
0385         struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
0386 
0387         __skb_unlink(skb, arpq);
0388         if (cb->arp_failure_handler)
0389             cb->arp_failure_handler(dev, skb);
0390         else
0391             cxgb3_ofld_send(dev, skb);
0392     }
0393 }
0394 
0395 /*
0396  * Called when the host's ARP layer makes a change to some entry that is
0397  * loaded into the HW L2 table.
0398  */
0399 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
0400 {
0401     struct sk_buff_head arpq;
0402     struct l2t_entry *e;
0403     struct l2t_data *d = L2DATA(dev);
0404     u32 addr = *(u32 *) neigh->primary_key;
0405     int ifidx = neigh->dev->ifindex;
0406     int hash = arp_hash(addr, ifidx, d);
0407 
0408     read_lock_bh(&d->lock);
0409     for (e = d->l2tab[hash].first; e; e = e->next)
0410         if (e->addr == addr && e->ifindex == ifidx) {
0411             spin_lock(&e->lock);
0412             goto found;
0413         }
0414     read_unlock_bh(&d->lock);
0415     return;
0416 
0417 found:
0418     __skb_queue_head_init(&arpq);
0419 
0420     read_unlock(&d->lock);
0421     if (atomic_read(&e->refcnt)) {
0422         if (neigh != e->neigh)
0423             neigh_replace(e, neigh);
0424 
0425         if (e->state == L2T_STATE_RESOLVING) {
0426             if (neigh->nud_state & NUD_FAILED) {
0427                 skb_queue_splice_init(&e->arpq, &arpq);
0428             } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
0429                 setup_l2e_send_pending(dev, NULL, e);
0430         } else {
0431             e->state = neigh->nud_state & NUD_CONNECTED ?
0432                 L2T_STATE_VALID : L2T_STATE_STALE;
0433             if (!ether_addr_equal(e->dmac, neigh->ha))
0434                 setup_l2e_send_pending(dev, NULL, e);
0435         }
0436     }
0437     spin_unlock_bh(&e->lock);
0438 
0439     if (!skb_queue_empty(&arpq))
0440         handle_failed_resolution(dev, &arpq);
0441 }
0442 
0443 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
0444 {
0445     struct l2t_data *d;
0446     int i;
0447 
0448     d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
0449     if (!d)
0450         return NULL;
0451 
0452     d->nentries = l2t_capacity;
0453     d->rover = &d->l2tab[1];    /* entry 0 is not used */
0454     atomic_set(&d->nfree, l2t_capacity - 1);
0455     rwlock_init(&d->lock);
0456 
0457     for (i = 0; i < l2t_capacity; ++i) {
0458         d->l2tab[i].idx = i;
0459         d->l2tab[i].state = L2T_STATE_UNUSED;
0460         __skb_queue_head_init(&d->l2tab[i].arpq);
0461         spin_lock_init(&d->l2tab[i].lock);
0462         atomic_set(&d->l2tab[i].refcnt, 0);
0463     }
0464     return d;
0465 }