0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #ifndef _CHELSIO_L2T_H
0033 #define _CHELSIO_L2T_H
0034
0035 #include <linux/spinlock.h>
0036 #include "t3cdev.h"
0037 #include <linux/atomic.h>
0038
0039 enum {
0040 L2T_STATE_VALID,
0041 L2T_STATE_STALE,
0042 L2T_STATE_RESOLVING,
0043 L2T_STATE_UNUSED
0044 };
0045
0046 struct neighbour;
0047 struct sk_buff;
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 struct l2t_entry {
0058 u16 state;
0059 u16 idx;
0060 u32 addr;
0061 int ifindex;
0062 u16 smt_idx;
0063 u16 vlan;
0064 struct neighbour *neigh;
0065 struct l2t_entry *first;
0066 struct l2t_entry *next;
0067 struct sk_buff_head arpq;
0068 spinlock_t lock;
0069 atomic_t refcnt;
0070 u8 dmac[6];
0071 };
0072
0073 struct l2t_data {
0074 unsigned int nentries;
0075 struct l2t_entry *rover;
0076 atomic_t nfree;
0077 rwlock_t lock;
0078 struct rcu_head rcu_head;
0079 struct l2t_entry l2tab[];
0080 };
0081
0082 typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
0083 struct sk_buff * skb);
0084
0085
0086
0087
0088 struct l2t_skb_cb {
0089 arp_failure_handler_func arp_failure_handler;
0090 };
0091
0092 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
0093
0094 static inline void set_arp_failure_handler(struct sk_buff *skb,
0095 arp_failure_handler_func hnd)
0096 {
0097 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
0098 }
0099
0100
0101
0102
0103 #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
0104
0105 #define W_TCB_L2T_IX 0
0106 #define S_TCB_L2T_IX 7
0107 #define M_TCB_L2T_IX 0x7ffULL
0108 #define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
0109
0110 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
0111 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
0112 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
0113 struct net_device *dev, const void *daddr);
0114 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
0115 struct l2t_entry *e);
0116 void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
0117 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
0118
0119 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
0120
0121 static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
0122 struct l2t_entry *e)
0123 {
0124 if (likely(e->state == L2T_STATE_VALID))
0125 return cxgb3_ofld_send(dev, skb);
0126 return t3_l2t_send_slow(dev, skb, e);
0127 }
0128
0129 static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
0130 {
0131 struct l2t_data *d;
0132
0133 rcu_read_lock();
0134 d = L2DATA(t);
0135
0136 if (atomic_dec_and_test(&e->refcnt) && d)
0137 t3_l2e_free(d, e);
0138
0139 rcu_read_unlock();
0140 }
0141
0142 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
0143 {
0144 if (d && atomic_add_return(1, &e->refcnt) == 1)
0145 atomic_dec(&d->nfree);
0146 }
0147
0148 #endif