0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/skbuff.h>
0036 #include <linux/netdevice.h>
0037 #include <linux/if.h>
0038 #include <linux/if_vlan.h>
0039 #include <linux/jhash.h>
0040 #include <linux/module.h>
0041 #include <linux/debugfs.h>
0042 #include <linux/seq_file.h>
0043 #include <net/neighbour.h>
0044 #include "cxgb4.h"
0045 #include "l2t.h"
0046 #include "t4_msg.h"
0047 #include "t4fw_api.h"
0048 #include "t4_regs.h"
0049 #include "t4_values.h"
0050
0051
0052 #define SYNC_WR_S 12
0053 #define SYNC_WR_V(x) ((x) << SYNC_WR_S)
0054 #define SYNC_WR_F SYNC_WR_V(1)
0055
0056 struct l2t_data {
0057 unsigned int l2t_start;
0058 unsigned int l2t_size;
0059 rwlock_t lock;
0060 atomic_t nfree;
0061 struct l2t_entry *rover;
0062 struct l2t_entry l2tab[];
0063 };
0064
0065 static inline unsigned int vlan_prio(const struct l2t_entry *e)
0066 {
0067 return e->vlan >> VLAN_PRIO_SHIFT;
0068 }
0069
0070 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
0071 {
0072 if (atomic_add_return(1, &e->refcnt) == 1)
0073 atomic_dec(&d->nfree);
0074 }
0075
0076
0077
0078
0079
0080
0081
0082 enum {
0083 L2T_MIN_HASH_BUCKETS = 2,
0084 };
0085
0086 static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
0087 int ifindex)
0088 {
0089 unsigned int l2t_size_half = d->l2t_size / 2;
0090
0091 return jhash_2words(*key, ifindex, 0) % l2t_size_half;
0092 }
0093
0094 static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
0095 int ifindex)
0096 {
0097 unsigned int l2t_size_half = d->l2t_size / 2;
0098 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
0099
0100 return (l2t_size_half +
0101 (jhash_2words(xor, ifindex, 0) % l2t_size_half));
0102 }
0103
0104 static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
0105 int addr_len, int ifindex)
0106 {
0107 return addr_len == 4 ? arp_hash(d, addr, ifindex) :
0108 ipv6_hash(d, addr, ifindex);
0109 }
0110
0111
0112
0113
0114
0115
0116
0117
0118 static int addreq(const struct l2t_entry *e, const u32 *addr)
0119 {
0120 if (e->v6)
0121 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
0122 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
0123 return e->addr[0] ^ addr[0];
0124 }
0125
0126 static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
0127 {
0128 neigh_hold(n);
0129 if (e->neigh)
0130 neigh_release(e->neigh);
0131 e->neigh = n;
0132 }
0133
0134
0135
0136
0137
0138 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
0139 {
0140 struct l2t_data *d = adap->l2t;
0141 unsigned int l2t_idx = e->idx + d->l2t_start;
0142 struct sk_buff *skb;
0143 struct cpl_l2t_write_req *req;
0144
0145 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
0146 if (!skb)
0147 return -ENOMEM;
0148
0149 req = __skb_put(skb, sizeof(*req));
0150 INIT_TP_WR(req, 0);
0151
0152 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
0153 l2t_idx | (sync ? SYNC_WR_F : 0) |
0154 TID_QID_V(adap->sge.fw_evtq.abs_id)));
0155 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
0156 req->l2t_idx = htons(l2t_idx);
0157 req->vlan = htons(e->vlan);
0158 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
0159 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
0160 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
0161
0162 t4_mgmt_tx(adap, skb);
0163
0164 if (sync && e->state != L2T_STATE_SWITCHING)
0165 e->state = L2T_STATE_SYNC_WRITE;
0166 return 0;
0167 }
0168
0169
0170
0171
0172
0173 static void send_pending(struct adapter *adap, struct l2t_entry *e)
0174 {
0175 struct sk_buff *skb;
0176
0177 while ((skb = __skb_dequeue(&e->arpq)) != NULL)
0178 t4_ofld_send(adap, skb);
0179 }
0180
0181
0182
0183
0184
0185
0186 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
0187 {
0188 struct l2t_data *d = adap->l2t;
0189 unsigned int tid = GET_TID(rpl);
0190 unsigned int l2t_idx = tid % L2T_SIZE;
0191
0192 if (unlikely(rpl->status != CPL_ERR_NONE)) {
0193 dev_err(adap->pdev_dev,
0194 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
0195 rpl->status, l2t_idx);
0196 return;
0197 }
0198
0199 if (tid & SYNC_WR_F) {
0200 struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
0201
0202 spin_lock(&e->lock);
0203 if (e->state != L2T_STATE_SWITCHING) {
0204 send_pending(adap, e);
0205 e->state = (e->neigh->nud_state & NUD_STALE) ?
0206 L2T_STATE_STALE : L2T_STATE_VALID;
0207 }
0208 spin_unlock(&e->lock);
0209 }
0210 }
0211
0212
0213
0214
0215
0216 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
0217 {
0218 __skb_queue_tail(&e->arpq, skb);
0219 }
0220
0221 int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
0222 struct l2t_entry *e)
0223 {
0224 struct adapter *adap = netdev2adap(dev);
0225
0226 again:
0227 switch (e->state) {
0228 case L2T_STATE_STALE:
0229 neigh_event_send(e->neigh, NULL);
0230 spin_lock_bh(&e->lock);
0231 if (e->state == L2T_STATE_STALE)
0232 e->state = L2T_STATE_VALID;
0233 spin_unlock_bh(&e->lock);
0234 fallthrough;
0235 case L2T_STATE_VALID:
0236 return t4_ofld_send(adap, skb);
0237 case L2T_STATE_RESOLVING:
0238 case L2T_STATE_SYNC_WRITE:
0239 spin_lock_bh(&e->lock);
0240 if (e->state != L2T_STATE_SYNC_WRITE &&
0241 e->state != L2T_STATE_RESOLVING) {
0242 spin_unlock_bh(&e->lock);
0243 goto again;
0244 }
0245 arpq_enqueue(e, skb);
0246 spin_unlock_bh(&e->lock);
0247
0248 if (e->state == L2T_STATE_RESOLVING &&
0249 !neigh_event_send(e->neigh, NULL)) {
0250 spin_lock_bh(&e->lock);
0251 if (e->state == L2T_STATE_RESOLVING &&
0252 !skb_queue_empty(&e->arpq))
0253 write_l2e(adap, e, 1);
0254 spin_unlock_bh(&e->lock);
0255 }
0256 }
0257 return 0;
0258 }
0259 EXPORT_SYMBOL(cxgb4_l2t_send);
0260
0261
0262
0263
0264 static struct l2t_entry *alloc_l2e(struct l2t_data *d)
0265 {
0266 struct l2t_entry *end, *e, **p;
0267
0268 if (!atomic_read(&d->nfree))
0269 return NULL;
0270
0271
0272 for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
0273 if (atomic_read(&e->refcnt) == 0)
0274 goto found;
0275
0276 for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
0277 ;
0278 found:
0279 d->rover = e + 1;
0280 atomic_dec(&d->nfree);
0281
0282
0283
0284
0285
0286 if (e->state < L2T_STATE_SWITCHING)
0287 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
0288 if (*p == e) {
0289 *p = e->next;
0290 e->next = NULL;
0291 break;
0292 }
0293
0294 e->state = L2T_STATE_UNUSED;
0295 return e;
0296 }
0297
0298 static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
0299 u8 port, u8 *dmac)
0300 {
0301 struct l2t_entry *end, *e, **p;
0302 struct l2t_entry *first_free = NULL;
0303
0304 for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
0305 if (atomic_read(&e->refcnt) == 0) {
0306 if (!first_free)
0307 first_free = e;
0308 } else {
0309 if (e->state == L2T_STATE_SWITCHING) {
0310 if (ether_addr_equal(e->dmac, dmac) &&
0311 (e->vlan == vlan) && (e->lport == port))
0312 goto exists;
0313 }
0314 }
0315 }
0316
0317 if (first_free) {
0318 e = first_free;
0319 goto found;
0320 }
0321
0322 return NULL;
0323
0324 found:
0325
0326
0327
0328 if (e->state < L2T_STATE_SWITCHING)
0329 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
0330 if (*p == e) {
0331 *p = e->next;
0332 e->next = NULL;
0333 break;
0334 }
0335 e->state = L2T_STATE_UNUSED;
0336
0337 exists:
0338 return e;
0339 }
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351 static void _t4_l2e_free(struct l2t_entry *e)
0352 {
0353 struct l2t_data *d;
0354
0355 if (atomic_read(&e->refcnt) == 0) {
0356 if (e->neigh) {
0357 neigh_release(e->neigh);
0358 e->neigh = NULL;
0359 }
0360 __skb_queue_purge(&e->arpq);
0361 }
0362
0363 d = container_of(e, struct l2t_data, l2tab[e->idx]);
0364 atomic_inc(&d->nfree);
0365 }
0366
0367
0368 static void t4_l2e_free(struct l2t_entry *e)
0369 {
0370 struct l2t_data *d;
0371
0372 spin_lock_bh(&e->lock);
0373 if (atomic_read(&e->refcnt) == 0) {
0374 if (e->neigh) {
0375 neigh_release(e->neigh);
0376 e->neigh = NULL;
0377 }
0378 __skb_queue_purge(&e->arpq);
0379 }
0380 spin_unlock_bh(&e->lock);
0381
0382 d = container_of(e, struct l2t_data, l2tab[e->idx]);
0383 atomic_inc(&d->nfree);
0384 }
0385
0386 void cxgb4_l2t_release(struct l2t_entry *e)
0387 {
0388 if (atomic_dec_and_test(&e->refcnt))
0389 t4_l2e_free(e);
0390 }
0391 EXPORT_SYMBOL(cxgb4_l2t_release);
0392
0393
0394
0395
0396
0397 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
0398 {
0399 unsigned int nud_state;
0400
0401 spin_lock(&e->lock);
0402 if (neigh != e->neigh)
0403 neigh_replace(e, neigh);
0404 nud_state = neigh->nud_state;
0405 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
0406 !(nud_state & NUD_VALID))
0407 e->state = L2T_STATE_RESOLVING;
0408 else if (nud_state & NUD_CONNECTED)
0409 e->state = L2T_STATE_VALID;
0410 else
0411 e->state = L2T_STATE_STALE;
0412 spin_unlock(&e->lock);
0413 }
0414
0415 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
0416 const struct net_device *physdev,
0417 unsigned int priority)
0418 {
0419 u8 lport;
0420 u16 vlan;
0421 struct l2t_entry *e;
0422 unsigned int addr_len = neigh->tbl->key_len;
0423 u32 *addr = (u32 *)neigh->primary_key;
0424 int ifidx = neigh->dev->ifindex;
0425 int hash = addr_hash(d, addr, addr_len, ifidx);
0426
0427 if (neigh->dev->flags & IFF_LOOPBACK)
0428 lport = netdev2pinfo(physdev)->tx_chan + 4;
0429 else
0430 lport = netdev2pinfo(physdev)->lport;
0431
0432 if (is_vlan_dev(neigh->dev)) {
0433 vlan = vlan_dev_vlan_id(neigh->dev);
0434 vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority);
0435 } else {
0436 vlan = VLAN_NONE;
0437 }
0438
0439 write_lock_bh(&d->lock);
0440 for (e = d->l2tab[hash].first; e; e = e->next)
0441 if (!addreq(e, addr) && e->ifindex == ifidx &&
0442 e->vlan == vlan && e->lport == lport) {
0443 l2t_hold(d, e);
0444 if (atomic_read(&e->refcnt) == 1)
0445 reuse_entry(e, neigh);
0446 goto done;
0447 }
0448
0449
0450 e = alloc_l2e(d);
0451 if (e) {
0452 spin_lock(&e->lock);
0453 e->state = L2T_STATE_RESOLVING;
0454 if (neigh->dev->flags & IFF_LOOPBACK)
0455 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
0456 memcpy(e->addr, addr, addr_len);
0457 e->ifindex = ifidx;
0458 e->hash = hash;
0459 e->lport = lport;
0460 e->v6 = addr_len == 16;
0461 atomic_set(&e->refcnt, 1);
0462 neigh_replace(e, neigh);
0463 e->vlan = vlan;
0464 e->next = d->l2tab[hash].first;
0465 d->l2tab[hash].first = e;
0466 spin_unlock(&e->lock);
0467 }
0468 done:
0469 write_unlock_bh(&d->lock);
0470 return e;
0471 }
0472 EXPORT_SYMBOL(cxgb4_l2t_get);
0473
0474 u64 cxgb4_select_ntuple(struct net_device *dev,
0475 const struct l2t_entry *l2t)
0476 {
0477 struct adapter *adap = netdev2adap(dev);
0478 struct tp_params *tp = &adap->params.tp;
0479 u64 ntuple = 0;
0480
0481
0482
0483
0484 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
0485 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
0486
0487 if (tp->port_shift >= 0)
0488 ntuple |= (u64)l2t->lport << tp->port_shift;
0489
0490 if (tp->protocol_shift >= 0)
0491 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
0492
0493 if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
0494 struct port_info *pi = (struct port_info *)netdev_priv(dev);
0495
0496 ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) |
0497 FT_VNID_ID_PF_V(adap->pf) |
0498 FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift;
0499 }
0500
0501 return ntuple;
0502 }
0503 EXPORT_SYMBOL(cxgb4_select_ntuple);
0504
0505
0506
0507
0508
0509 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
0510 {
0511 unsigned int addr_len = neigh->tbl->key_len;
0512 u32 *addr = (u32 *) neigh->primary_key;
0513 int hash, ifidx = neigh->dev->ifindex;
0514 struct sk_buff_head *arpq = NULL;
0515 struct l2t_data *d = adap->l2t;
0516 struct l2t_entry *e;
0517
0518 hash = addr_hash(d, addr, addr_len, ifidx);
0519 read_lock_bh(&d->lock);
0520 for (e = d->l2tab[hash].first; e; e = e->next)
0521 if (!addreq(e, addr) && e->ifindex == ifidx) {
0522 spin_lock(&e->lock);
0523 if (atomic_read(&e->refcnt))
0524 goto found;
0525 spin_unlock(&e->lock);
0526 break;
0527 }
0528 read_unlock_bh(&d->lock);
0529 return;
0530
0531 found:
0532 read_unlock(&d->lock);
0533
0534 if (neigh != e->neigh)
0535 neigh_replace(e, neigh);
0536
0537 if (e->state == L2T_STATE_RESOLVING) {
0538 if (neigh->nud_state & NUD_FAILED) {
0539 arpq = &e->arpq;
0540 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
0541 !skb_queue_empty(&e->arpq)) {
0542 write_l2e(adap, e, 1);
0543 }
0544 } else {
0545 e->state = neigh->nud_state & NUD_CONNECTED ?
0546 L2T_STATE_VALID : L2T_STATE_STALE;
0547 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
0548 write_l2e(adap, e, 0);
0549 }
0550
0551 if (arpq) {
0552 struct sk_buff *skb;
0553
0554
0555
0556
0557
0558
0559 while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
0560 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
0561
0562 spin_unlock(&e->lock);
0563 if (cb->arp_err_handler)
0564 cb->arp_err_handler(cb->handle, skb);
0565 else
0566 t4_ofld_send(adap, skb);
0567 spin_lock(&e->lock);
0568 }
0569 }
0570 spin_unlock_bh(&e->lock);
0571 }
0572
0573
0574
0575
0576
0577 struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
0578 u8 port, u8 *eth_addr)
0579 {
0580 struct l2t_data *d = adap->l2t;
0581 struct l2t_entry *e;
0582 int ret;
0583
0584 write_lock_bh(&d->lock);
0585 e = find_or_alloc_l2e(d, vlan, port, eth_addr);
0586 if (e) {
0587 spin_lock(&e->lock);
0588 if (!atomic_read(&e->refcnt)) {
0589 e->state = L2T_STATE_SWITCHING;
0590 e->vlan = vlan;
0591 e->lport = port;
0592 ether_addr_copy(e->dmac, eth_addr);
0593 atomic_set(&e->refcnt, 1);
0594 ret = write_l2e(adap, e, 0);
0595 if (ret < 0) {
0596 _t4_l2e_free(e);
0597 spin_unlock(&e->lock);
0598 write_unlock_bh(&d->lock);
0599 return NULL;
0600 }
0601 } else {
0602 atomic_inc(&e->refcnt);
0603 }
0604
0605 spin_unlock(&e->lock);
0606 }
0607 write_unlock_bh(&d->lock);
0608 return e;
0609 }
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621 struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
0622 u8 port, u8 *dmac)
0623 {
0624 struct adapter *adap = netdev2adap(dev);
0625
0626 return t4_l2t_alloc_switching(adap, vlan, port, dmac);
0627 }
0628 EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
0629
0630 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
0631 {
0632 unsigned int l2t_size;
0633 int i;
0634 struct l2t_data *d;
0635
0636 if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
0637 return NULL;
0638 l2t_size = l2t_end - l2t_start + 1;
0639 if (l2t_size < L2T_MIN_HASH_BUCKETS)
0640 return NULL;
0641
0642 d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL);
0643 if (!d)
0644 return NULL;
0645
0646 d->l2t_start = l2t_start;
0647 d->l2t_size = l2t_size;
0648
0649 d->rover = d->l2tab;
0650 atomic_set(&d->nfree, l2t_size);
0651 rwlock_init(&d->lock);
0652
0653 for (i = 0; i < d->l2t_size; ++i) {
0654 d->l2tab[i].idx = i;
0655 d->l2tab[i].state = L2T_STATE_UNUSED;
0656 spin_lock_init(&d->l2tab[i].lock);
0657 atomic_set(&d->l2tab[i].refcnt, 0);
0658 skb_queue_head_init(&d->l2tab[i].arpq);
0659 }
0660 return d;
0661 }
0662
0663 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
0664 {
0665 struct l2t_data *d = seq->private;
0666
0667 return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
0668 }
0669
0670 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
0671 {
0672 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
0673 }
0674
0675 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
0676 {
0677 v = l2t_get_idx(seq, *pos);
0678 ++(*pos);
0679 return v;
0680 }
0681
0682 static void l2t_seq_stop(struct seq_file *seq, void *v)
0683 {
0684 }
0685
0686 static char l2e_state(const struct l2t_entry *e)
0687 {
0688 switch (e->state) {
0689 case L2T_STATE_VALID: return 'V';
0690 case L2T_STATE_STALE: return 'S';
0691 case L2T_STATE_SYNC_WRITE: return 'W';
0692 case L2T_STATE_RESOLVING:
0693 return skb_queue_empty(&e->arpq) ? 'R' : 'A';
0694 case L2T_STATE_SWITCHING: return 'X';
0695 default:
0696 return 'U';
0697 }
0698 }
0699
0700 bool cxgb4_check_l2t_valid(struct l2t_entry *e)
0701 {
0702 bool valid;
0703
0704 spin_lock(&e->lock);
0705 valid = (e->state == L2T_STATE_VALID);
0706 spin_unlock(&e->lock);
0707 return valid;
0708 }
0709 EXPORT_SYMBOL(cxgb4_check_l2t_valid);
0710
0711 static int l2t_seq_show(struct seq_file *seq, void *v)
0712 {
0713 if (v == SEQ_START_TOKEN)
0714 seq_puts(seq, " Idx IP address "
0715 "Ethernet address VLAN/P LP State Users Port\n");
0716 else {
0717 char ip[60];
0718 struct l2t_data *d = seq->private;
0719 struct l2t_entry *e = v;
0720
0721 spin_lock_bh(&e->lock);
0722 if (e->state == L2T_STATE_SWITCHING)
0723 ip[0] = '\0';
0724 else
0725 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
0726 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
0727 e->idx + d->l2t_start, ip, e->dmac,
0728 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
0729 l2e_state(e), atomic_read(&e->refcnt),
0730 e->neigh ? e->neigh->dev->name : "");
0731 spin_unlock_bh(&e->lock);
0732 }
0733 return 0;
0734 }
0735
0736 static const struct seq_operations l2t_seq_ops = {
0737 .start = l2t_seq_start,
0738 .next = l2t_seq_next,
0739 .stop = l2t_seq_stop,
0740 .show = l2t_seq_show
0741 };
0742
0743 static int l2t_seq_open(struct inode *inode, struct file *file)
0744 {
0745 int rc = seq_open(file, &l2t_seq_ops);
0746
0747 if (!rc) {
0748 struct adapter *adap = inode->i_private;
0749 struct seq_file *seq = file->private_data;
0750
0751 seq->private = adap->l2t;
0752 }
0753 return rc;
0754 }
0755
0756 const struct file_operations t4_l2t_fops = {
0757 .owner = THIS_MODULE,
0758 .open = l2t_seq_open,
0759 .read = seq_read,
0760 .llseek = seq_lseek,
0761 .release = seq_release,
0762 };