0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0034
0035 #include <linux/list.h>
0036 #include <linux/slab.h>
0037 #include <net/neighbour.h>
0038 #include <linux/notifier.h>
0039 #include <linux/atomic.h>
0040 #include <linux/proc_fs.h>
0041 #include <linux/if_vlan.h>
0042 #include <net/netevent.h>
0043 #include <linux/highmem.h>
0044 #include <linux/vmalloc.h>
0045 #include <linux/export.h>
0046
0047 #include "common.h"
0048 #include "regs.h"
0049 #include "cxgb3_ioctl.h"
0050 #include "cxgb3_ctl_defs.h"
0051 #include "cxgb3_defs.h"
0052 #include "l2t.h"
0053 #include "firmware_exports.h"
0054 #include "cxgb3_offload.h"
0055
0056 static LIST_HEAD(client_list);
0057 static LIST_HEAD(ofld_dev_list);
0058 static DEFINE_MUTEX(cxgb3_db_lock);
0059
0060 static DEFINE_RWLOCK(adapter_list_lock);
0061 static LIST_HEAD(adapter_list);
0062
0063 static const unsigned int MAX_ATIDS = 64 * 1024;
0064 static const unsigned int ATID_BASE = 0x10000;
0065
0066 static void cxgb_neigh_update(struct neighbour *neigh);
0067 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
0068 struct neighbour *neigh, const void *daddr);
0069
0070 static inline int offload_activated(struct t3cdev *tdev)
0071 {
0072 const struct adapter *adapter = tdev2adap(tdev);
0073
0074 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084 void cxgb3_register_client(struct cxgb3_client *client)
0085 {
0086 struct t3cdev *tdev;
0087
0088 mutex_lock(&cxgb3_db_lock);
0089 list_add_tail(&client->client_list, &client_list);
0090
0091 if (client->add) {
0092 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
0093 if (offload_activated(tdev))
0094 client->add(tdev);
0095 }
0096 }
0097 mutex_unlock(&cxgb3_db_lock);
0098 }
0099
0100 EXPORT_SYMBOL(cxgb3_register_client);
0101
0102
0103
0104
0105
0106
0107
0108
0109 void cxgb3_unregister_client(struct cxgb3_client *client)
0110 {
0111 struct t3cdev *tdev;
0112
0113 mutex_lock(&cxgb3_db_lock);
0114 list_del(&client->client_list);
0115
0116 if (client->remove) {
0117 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
0118 if (offload_activated(tdev))
0119 client->remove(tdev);
0120 }
0121 }
0122 mutex_unlock(&cxgb3_db_lock);
0123 }
0124
0125 EXPORT_SYMBOL(cxgb3_unregister_client);
0126
0127
0128
0129
0130
0131
0132
0133 void cxgb3_add_clients(struct t3cdev *tdev)
0134 {
0135 struct cxgb3_client *client;
0136
0137 mutex_lock(&cxgb3_db_lock);
0138 list_for_each_entry(client, &client_list, client_list) {
0139 if (client->add)
0140 client->add(tdev);
0141 }
0142 mutex_unlock(&cxgb3_db_lock);
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152 void cxgb3_remove_clients(struct t3cdev *tdev)
0153 {
0154 struct cxgb3_client *client;
0155
0156 mutex_lock(&cxgb3_db_lock);
0157 list_for_each_entry(client, &client_list, client_list) {
0158 if (client->remove)
0159 client->remove(tdev);
0160 }
0161 mutex_unlock(&cxgb3_db_lock);
0162 }
0163
0164 void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
0165 {
0166 struct cxgb3_client *client;
0167
0168 mutex_lock(&cxgb3_db_lock);
0169 list_for_each_entry(client, &client_list, client_list) {
0170 if (client->event_handler)
0171 client->event_handler(tdev, event, port);
0172 }
0173 mutex_unlock(&cxgb3_db_lock);
0174 }
0175
0176 static struct net_device *get_iff_from_mac(struct adapter *adapter,
0177 const unsigned char *mac,
0178 unsigned int vlan)
0179 {
0180 int i;
0181
0182 for_each_port(adapter, i) {
0183 struct net_device *dev = adapter->port[i];
0184
0185 if (ether_addr_equal(dev->dev_addr, mac)) {
0186 rcu_read_lock();
0187 if (vlan && vlan != VLAN_VID_MASK) {
0188 dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
0189 } else if (netif_is_bond_slave(dev)) {
0190 struct net_device *upper_dev;
0191
0192 while ((upper_dev =
0193 netdev_master_upper_dev_get_rcu(dev)))
0194 dev = upper_dev;
0195 }
0196 rcu_read_unlock();
0197 return dev;
0198 }
0199 }
0200 return NULL;
0201 }
0202
0203 static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
0204 void *data)
0205 {
0206 int i;
0207 int ret = 0;
0208 unsigned int val = 0;
0209 struct ulp_iscsi_info *uiip = data;
0210
0211 switch (req) {
0212 case ULP_ISCSI_GET_PARAMS:
0213 uiip->pdev = adapter->pdev;
0214 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
0215 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
0216 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
0217
0218 val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
0219 for (i = 0; i < 4; i++, val >>= 8)
0220 uiip->pgsz_factor[i] = val & 0xFF;
0221
0222 val = t3_read_reg(adapter, A_TP_PARA_REG7);
0223 uiip->max_txsz =
0224 uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
0225 (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
0226
0227
0228
0229
0230 val = min(adapter->params.tp.tx_pg_size,
0231 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
0232 uiip->max_txsz = min(val, uiip->max_txsz);
0233
0234
0235 val = t3_read_reg(adapter, A_TP_PARA_REG2);
0236 if ((val >> S_MAXRXDATA) != 0x3f60) {
0237 val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
0238 val |= V_MAXRXDATA(0x3f60);
0239 pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
0240 adapter->name, val);
0241 t3_write_reg(adapter, A_TP_PARA_REG2, val);
0242 }
0243
0244
0245
0246
0247
0248 val = min(adapter->params.tp.rx_pg_size,
0249 ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
0250 S_MAXRXDATA) & M_MAXRXDATA);
0251 uiip->max_rxsz = min(val, uiip->max_rxsz);
0252 break;
0253 case ULP_ISCSI_SET_PARAMS:
0254 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
0255
0256 for (i = 0; i < 4; i++)
0257 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
0258 if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
0259 pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
0260 adapter->name, val, uiip->pgsz_factor[0],
0261 uiip->pgsz_factor[1], uiip->pgsz_factor[2],
0262 uiip->pgsz_factor[3]);
0263 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
0264 }
0265 break;
0266 default:
0267 ret = -EOPNOTSUPP;
0268 }
0269 return ret;
0270 }
0271
0272
0273 #define ASYNC_NOTIF_RSPQ 0
0274
0275 static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
0276 {
0277 int ret = 0;
0278
0279 switch (req) {
0280 case RDMA_GET_PARAMS: {
0281 struct rdma_info *rdma = data;
0282 struct pci_dev *pdev = adapter->pdev;
0283
0284 rdma->udbell_physbase = pci_resource_start(pdev, 2);
0285 rdma->udbell_len = pci_resource_len(pdev, 2);
0286 rdma->tpt_base =
0287 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
0288 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
0289 rdma->pbl_base =
0290 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
0291 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
0292 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
0293 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
0294 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
0295 rdma->pdev = pdev;
0296 break;
0297 }
0298 case RDMA_CQ_OP:{
0299 unsigned long flags;
0300 struct rdma_cq_op *rdma = data;
0301
0302
0303 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
0304 ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
0305 rdma->credits);
0306 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
0307 break;
0308 }
0309 case RDMA_GET_MEM:{
0310 struct ch_mem_range *t = data;
0311 struct mc7 *mem;
0312
0313 if ((t->addr & 7) || (t->len & 7))
0314 return -EINVAL;
0315 if (t->mem_id == MEM_CM)
0316 mem = &adapter->cm;
0317 else if (t->mem_id == MEM_PMRX)
0318 mem = &adapter->pmrx;
0319 else if (t->mem_id == MEM_PMTX)
0320 mem = &adapter->pmtx;
0321 else
0322 return -EINVAL;
0323
0324 ret =
0325 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
0326 (u64 *) t->buf);
0327 if (ret)
0328 return ret;
0329 break;
0330 }
0331 case RDMA_CQ_SETUP:{
0332 struct rdma_cq_setup *rdma = data;
0333
0334 spin_lock_irq(&adapter->sge.reg_lock);
0335 ret =
0336 t3_sge_init_cqcntxt(adapter, rdma->id,
0337 rdma->base_addr, rdma->size,
0338 ASYNC_NOTIF_RSPQ,
0339 rdma->ovfl_mode, rdma->credits,
0340 rdma->credit_thres);
0341 spin_unlock_irq(&adapter->sge.reg_lock);
0342 break;
0343 }
0344 case RDMA_CQ_DISABLE:
0345 spin_lock_irq(&adapter->sge.reg_lock);
0346 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
0347 spin_unlock_irq(&adapter->sge.reg_lock);
0348 break;
0349 case RDMA_CTRL_QP_SETUP:{
0350 struct rdma_ctrlqp_setup *rdma = data;
0351
0352 spin_lock_irq(&adapter->sge.reg_lock);
0353 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
0354 SGE_CNTXT_RDMA,
0355 ASYNC_NOTIF_RSPQ,
0356 rdma->base_addr, rdma->size,
0357 FW_RI_TID_START, 1, 0);
0358 spin_unlock_irq(&adapter->sge.reg_lock);
0359 break;
0360 }
0361 case RDMA_GET_MIB: {
0362 spin_lock(&adapter->stats_lock);
0363 t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
0364 spin_unlock(&adapter->stats_lock);
0365 break;
0366 }
0367 default:
0368 ret = -EOPNOTSUPP;
0369 }
0370 return ret;
0371 }
0372
0373 static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
0374 {
0375 struct adapter *adapter = tdev2adap(tdev);
0376 struct tid_range *tid;
0377 struct mtutab *mtup;
0378 struct iff_mac *iffmacp;
0379 struct ddp_params *ddpp;
0380 struct adap_ports *ports;
0381 struct ofld_page_info *rx_page_info;
0382 struct tp_params *tp = &adapter->params.tp;
0383 int i;
0384
0385 switch (req) {
0386 case GET_MAX_OUTSTANDING_WR:
0387 *(unsigned int *)data = FW_WR_NUM;
0388 break;
0389 case GET_WR_LEN:
0390 *(unsigned int *)data = WR_FLITS;
0391 break;
0392 case GET_TX_MAX_CHUNK:
0393 *(unsigned int *)data = 1 << 20;
0394 break;
0395 case GET_TID_RANGE:
0396 tid = data;
0397 tid->num = t3_mc5_size(&adapter->mc5) -
0398 adapter->params.mc5.nroutes -
0399 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
0400 tid->base = 0;
0401 break;
0402 case GET_STID_RANGE:
0403 tid = data;
0404 tid->num = adapter->params.mc5.nservers;
0405 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
0406 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
0407 break;
0408 case GET_L2T_CAPACITY:
0409 *(unsigned int *)data = 2048;
0410 break;
0411 case GET_MTUS:
0412 mtup = data;
0413 mtup->size = NMTUS;
0414 mtup->mtus = adapter->params.mtus;
0415 break;
0416 case GET_IFF_FROM_MAC:
0417 iffmacp = data;
0418 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
0419 iffmacp->vlan_tag &
0420 VLAN_VID_MASK);
0421 break;
0422 case GET_DDP_PARAMS:
0423 ddpp = data;
0424 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
0425 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
0426 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
0427 break;
0428 case GET_PORTS:
0429 ports = data;
0430 ports->nports = adapter->params.nports;
0431 for_each_port(adapter, i)
0432 ports->lldevs[i] = adapter->port[i];
0433 break;
0434 case ULP_ISCSI_GET_PARAMS:
0435 case ULP_ISCSI_SET_PARAMS:
0436 if (!offload_running(adapter))
0437 return -EAGAIN;
0438 return cxgb_ulp_iscsi_ctl(adapter, req, data);
0439 case RDMA_GET_PARAMS:
0440 case RDMA_CQ_OP:
0441 case RDMA_CQ_SETUP:
0442 case RDMA_CQ_DISABLE:
0443 case RDMA_CTRL_QP_SETUP:
0444 case RDMA_GET_MEM:
0445 case RDMA_GET_MIB:
0446 if (!offload_running(adapter))
0447 return -EAGAIN;
0448 return cxgb_rdma_ctl(adapter, req, data);
0449 case GET_RX_PAGE_INFO:
0450 rx_page_info = data;
0451 rx_page_info->page_size = tp->rx_pg_size;
0452 rx_page_info->num = tp->rx_num_pgs;
0453 break;
0454 case GET_ISCSI_IPV4ADDR: {
0455 struct iscsi_ipv4addr *p = data;
0456 struct port_info *pi = netdev_priv(p->dev);
0457 p->ipv4addr = pi->iscsi_ipv4addr;
0458 break;
0459 }
0460 case GET_EMBEDDED_INFO: {
0461 struct ch_embedded_info *e = data;
0462
0463 spin_lock(&adapter->stats_lock);
0464 t3_get_fw_version(adapter, &e->fw_vers);
0465 t3_get_tp_version(adapter, &e->tp_vers);
0466 spin_unlock(&adapter->stats_lock);
0467 break;
0468 }
0469 default:
0470 return -EOPNOTSUPP;
0471 }
0472 return 0;
0473 }
0474
0475
0476
0477
0478
0479
0480 static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
0481 int n)
0482 {
0483 while (n--)
0484 dev_kfree_skb_any(skbs[n]);
0485 return 0;
0486 }
0487
0488 static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
0489 {
0490 }
0491
0492 void cxgb3_set_dummy_ops(struct t3cdev *dev)
0493 {
0494 dev->recv = rx_offload_blackhole;
0495 dev->neigh_update = dummy_neigh_update;
0496 }
0497
0498
0499
0500
0501 void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
0502 {
0503 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
0504 union active_open_entry *p = atid2entry(t, atid);
0505 void *ctx = p->t3c_tid.ctx;
0506
0507 spin_lock_bh(&t->atid_lock);
0508 p->next = t->afree;
0509 t->afree = p;
0510 t->atids_in_use--;
0511 spin_unlock_bh(&t->atid_lock);
0512
0513 return ctx;
0514 }
0515
0516 EXPORT_SYMBOL(cxgb3_free_atid);
0517
0518
0519
0520
0521 void cxgb3_free_stid(struct t3cdev *tdev, int stid)
0522 {
0523 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
0524 union listen_entry *p = stid2entry(t, stid);
0525
0526 spin_lock_bh(&t->stid_lock);
0527 p->next = t->sfree;
0528 t->sfree = p;
0529 t->stids_in_use--;
0530 spin_unlock_bh(&t->stid_lock);
0531 }
0532
0533 EXPORT_SYMBOL(cxgb3_free_stid);
0534
0535 void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
0536 void *ctx, unsigned int tid)
0537 {
0538 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
0539
0540 t->tid_tab[tid].client = client;
0541 t->tid_tab[tid].ctx = ctx;
0542 atomic_inc(&t->tids_in_use);
0543 }
0544
0545 EXPORT_SYMBOL(cxgb3_insert_tid);
0546
0547
0548
0549
0550 static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
0551 {
0552 struct cpl_tid_release *req;
0553
0554 skb->priority = CPL_PRIORITY_SETUP;
0555 req = __skb_put(skb, sizeof(*req));
0556 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
0557 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
0558 }
0559
0560 static void t3_process_tid_release_list(struct work_struct *work)
0561 {
0562 struct t3c_data *td = container_of(work, struct t3c_data,
0563 tid_release_task);
0564 struct sk_buff *skb;
0565 struct t3cdev *tdev = td->dev;
0566
0567
0568 spin_lock_bh(&td->tid_release_lock);
0569 while (td->tid_release_list) {
0570 struct t3c_tid_entry *p = td->tid_release_list;
0571
0572 td->tid_release_list = p->ctx;
0573 spin_unlock_bh(&td->tid_release_lock);
0574
0575 skb = alloc_skb(sizeof(struct cpl_tid_release),
0576 GFP_KERNEL);
0577 if (!skb)
0578 skb = td->nofail_skb;
0579 if (!skb) {
0580 spin_lock_bh(&td->tid_release_lock);
0581 p->ctx = (void *)td->tid_release_list;
0582 td->tid_release_list = p;
0583 break;
0584 }
0585 mk_tid_release(skb, p - td->tid_maps.tid_tab);
0586 cxgb3_ofld_send(tdev, skb);
0587 p->ctx = NULL;
0588 if (skb == td->nofail_skb)
0589 td->nofail_skb =
0590 alloc_skb(sizeof(struct cpl_tid_release),
0591 GFP_KERNEL);
0592 spin_lock_bh(&td->tid_release_lock);
0593 }
0594 td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
0595 spin_unlock_bh(&td->tid_release_lock);
0596
0597 if (!td->nofail_skb)
0598 td->nofail_skb =
0599 alloc_skb(sizeof(struct cpl_tid_release),
0600 GFP_KERNEL);
0601 }
0602
0603
0604 void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
0605 {
0606 struct t3c_data *td = T3C_DATA(tdev);
0607 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
0608
0609 spin_lock_bh(&td->tid_release_lock);
0610 p->ctx = (void *)td->tid_release_list;
0611 p->client = NULL;
0612 td->tid_release_list = p;
0613 if (!p->ctx || td->release_list_incomplete)
0614 schedule_work(&td->tid_release_task);
0615 spin_unlock_bh(&td->tid_release_lock);
0616 }
0617
0618 EXPORT_SYMBOL(cxgb3_queue_tid_release);
0619
0620
0621
0622
0623
0624
0625
0626
0627 void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
0628 {
0629 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
0630
0631 BUG_ON(tid >= t->ntids);
0632 if (tdev->type == T3A)
0633 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
0634 else {
0635 struct sk_buff *skb;
0636
0637 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
0638 if (likely(skb)) {
0639 mk_tid_release(skb, tid);
0640 cxgb3_ofld_send(tdev, skb);
0641 t->tid_tab[tid].ctx = NULL;
0642 } else
0643 cxgb3_queue_tid_release(tdev, tid);
0644 }
0645 atomic_dec(&t->tids_in_use);
0646 }
0647
0648 EXPORT_SYMBOL(cxgb3_remove_tid);
0649
0650 int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
0651 void *ctx)
0652 {
0653 int atid = -1;
0654 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
0655
0656 spin_lock_bh(&t->atid_lock);
0657 if (t->afree &&
0658 t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
0659 t->ntids) {
0660 union active_open_entry *p = t->afree;
0661
0662 atid = (p - t->atid_tab) + t->atid_base;
0663 t->afree = p->next;
0664 p->t3c_tid.ctx = ctx;
0665 p->t3c_tid.client = client;
0666 t->atids_in_use++;
0667 }
0668 spin_unlock_bh(&t->atid_lock);
0669 return atid;
0670 }
0671
0672 EXPORT_SYMBOL(cxgb3_alloc_atid);
0673
0674 int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
0675 void *ctx)
0676 {
0677 int stid = -1;
0678 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
0679
0680 spin_lock_bh(&t->stid_lock);
0681 if (t->sfree) {
0682 union listen_entry *p = t->sfree;
0683
0684 stid = (p - t->stid_tab) + t->stid_base;
0685 t->sfree = p->next;
0686 p->t3c_tid.ctx = ctx;
0687 p->t3c_tid.client = client;
0688 t->stids_in_use++;
0689 }
0690 spin_unlock_bh(&t->stid_lock);
0691 return stid;
0692 }
0693
0694 EXPORT_SYMBOL(cxgb3_alloc_stid);
0695
0696
0697 struct t3cdev *dev2t3cdev(struct net_device *dev)
0698 {
0699 const struct port_info *pi = netdev_priv(dev);
0700
0701 return (struct t3cdev *)pi->adapter;
0702 }
0703
0704 EXPORT_SYMBOL(dev2t3cdev);
0705
0706 static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
0707 {
0708 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
0709
0710 if (rpl->status != CPL_ERR_NONE)
0711 pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
0712 rpl->status, GET_TID(rpl));
0713
0714 return CPL_RET_BUF_DONE;
0715 }
0716
0717 static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
0718 {
0719 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
0720
0721 if (rpl->status != CPL_ERR_NONE)
0722 pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
0723 rpl->status, GET_TID(rpl));
0724
0725 return CPL_RET_BUF_DONE;
0726 }
0727
0728 static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
0729 {
0730 struct cpl_rte_write_rpl *rpl = cplhdr(skb);
0731
0732 if (rpl->status != CPL_ERR_NONE)
0733 pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
0734 rpl->status, GET_TID(rpl));
0735
0736 return CPL_RET_BUF_DONE;
0737 }
0738
0739 static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
0740 {
0741 struct cpl_act_open_rpl *rpl = cplhdr(skb);
0742 unsigned int atid = G_TID(ntohl(rpl->atid));
0743 struct t3c_tid_entry *t3c_tid;
0744
0745 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
0746 if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
0747 t3c_tid->client->handlers &&
0748 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
0749 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
0750 t3c_tid->
0751 ctx);
0752 } else {
0753 pr_err("%s: received clientless CPL command 0x%x\n",
0754 dev->name, CPL_ACT_OPEN_RPL);
0755 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
0756 }
0757 }
0758
0759 static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
0760 {
0761 union opcode_tid *p = cplhdr(skb);
0762 unsigned int stid = G_TID(ntohl(p->opcode_tid));
0763 struct t3c_tid_entry *t3c_tid;
0764
0765 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
0766 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
0767 t3c_tid->client->handlers[p->opcode]) {
0768 return t3c_tid->client->handlers[p->opcode] (dev, skb,
0769 t3c_tid->ctx);
0770 } else {
0771 pr_err("%s: received clientless CPL command 0x%x\n",
0772 dev->name, p->opcode);
0773 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
0774 }
0775 }
0776
0777 static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
0778 {
0779 union opcode_tid *p = cplhdr(skb);
0780 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
0781 struct t3c_tid_entry *t3c_tid;
0782
0783 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
0784 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
0785 t3c_tid->client->handlers[p->opcode]) {
0786 return t3c_tid->client->handlers[p->opcode]
0787 (dev, skb, t3c_tid->ctx);
0788 } else {
0789 pr_err("%s: received clientless CPL command 0x%x\n",
0790 dev->name, p->opcode);
0791 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
0792 }
0793 }
0794
0795 static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
0796 {
0797 struct cpl_pass_accept_req *req = cplhdr(skb);
0798 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
0799 struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
0800 struct t3c_tid_entry *t3c_tid;
0801 unsigned int tid = GET_TID(req);
0802
0803 if (unlikely(tid >= t->ntids)) {
0804 printk("%s: passive open TID %u too large\n",
0805 dev->name, tid);
0806 t3_fatal_err(tdev2adap(dev));
0807 return CPL_RET_BUF_DONE;
0808 }
0809
0810 t3c_tid = lookup_stid(t, stid);
0811 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
0812 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
0813 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
0814 (dev, skb, t3c_tid->ctx);
0815 } else {
0816 pr_err("%s: received clientless CPL command 0x%x\n",
0817 dev->name, CPL_PASS_ACCEPT_REQ);
0818 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
0819 }
0820 }
0821
0822
0823
0824
0825
0826
0827
0828
0829 static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
0830 gfp_t gfp)
0831 {
0832 if (likely(!skb_cloned(skb))) {
0833 BUG_ON(skb->len < len);
0834 __skb_trim(skb, len);
0835 skb_get(skb);
0836 } else {
0837 skb = alloc_skb(len, gfp);
0838 if (skb)
0839 __skb_put(skb, len);
0840 }
0841 return skb;
0842 }
0843
0844 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
0845 {
0846 union opcode_tid *p = cplhdr(skb);
0847 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
0848 struct t3c_tid_entry *t3c_tid;
0849
0850 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
0851 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
0852 t3c_tid->client->handlers[p->opcode]) {
0853 return t3c_tid->client->handlers[p->opcode]
0854 (dev, skb, t3c_tid->ctx);
0855 } else {
0856 struct cpl_abort_req_rss *req = cplhdr(skb);
0857 struct cpl_abort_rpl *rpl;
0858 struct sk_buff *reply_skb;
0859 unsigned int tid = GET_TID(req);
0860 u8 cmd = req->status;
0861
0862 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
0863 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
0864 goto out;
0865
0866 reply_skb = cxgb3_get_cpl_reply_skb(skb,
0867 sizeof(struct
0868 cpl_abort_rpl),
0869 GFP_ATOMIC);
0870
0871 if (!reply_skb) {
0872 printk("do_abort_req_rss: couldn't get skb!\n");
0873 goto out;
0874 }
0875 reply_skb->priority = CPL_PRIORITY_DATA;
0876 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
0877 rpl = cplhdr(reply_skb);
0878 rpl->wr.wr_hi =
0879 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
0880 rpl->wr.wr_lo = htonl(V_WR_TID(tid));
0881 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
0882 rpl->cmd = cmd;
0883 cxgb3_ofld_send(dev, reply_skb);
0884 out:
0885 return CPL_RET_BUF_DONE;
0886 }
0887 }
0888
0889 static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
0890 {
0891 struct cpl_act_establish *req = cplhdr(skb);
0892 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
0893 struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
0894 struct t3c_tid_entry *t3c_tid;
0895 unsigned int tid = GET_TID(req);
0896
0897 if (unlikely(tid >= t->ntids)) {
0898 printk("%s: active establish TID %u too large\n",
0899 dev->name, tid);
0900 t3_fatal_err(tdev2adap(dev));
0901 return CPL_RET_BUF_DONE;
0902 }
0903
0904 t3c_tid = lookup_atid(t, atid);
0905 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
0906 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
0907 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
0908 (dev, skb, t3c_tid->ctx);
0909 } else {
0910 pr_err("%s: received clientless CPL command 0x%x\n",
0911 dev->name, CPL_ACT_ESTABLISH);
0912 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
0913 }
0914 }
0915
0916 static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
0917 {
0918 struct cpl_trace_pkt *p = cplhdr(skb);
0919
0920 skb->protocol = htons(0xffff);
0921 skb->dev = dev->lldev;
0922 skb_pull(skb, sizeof(*p));
0923 skb_reset_mac_header(skb);
0924 netif_receive_skb(skb);
0925 return 0;
0926 }
0927
0928
0929
0930
0931
0932
0933
0934 static inline u32 get_hwtid(struct sk_buff *skb)
0935 {
0936 return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
0937 }
0938
0939 static inline u32 get_opcode(struct sk_buff *skb)
0940 {
0941 return G_OPCODE(ntohl((__force __be32)skb->csum));
0942 }
0943
0944 static int do_term(struct t3cdev *dev, struct sk_buff *skb)
0945 {
0946 unsigned int hwtid = get_hwtid(skb);
0947 unsigned int opcode = get_opcode(skb);
0948 struct t3c_tid_entry *t3c_tid;
0949
0950 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
0951 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
0952 t3c_tid->client->handlers[opcode]) {
0953 return t3c_tid->client->handlers[opcode] (dev, skb,
0954 t3c_tid->ctx);
0955 } else {
0956 pr_err("%s: received clientless CPL command 0x%x\n",
0957 dev->name, opcode);
0958 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
0959 }
0960 }
0961
0962 static int nb_callback(struct notifier_block *self, unsigned long event,
0963 void *ctx)
0964 {
0965 switch (event) {
0966 case (NETEVENT_NEIGH_UPDATE):{
0967 cxgb_neigh_update((struct neighbour *)ctx);
0968 break;
0969 }
0970 case (NETEVENT_REDIRECT):{
0971 struct netevent_redirect *nr = ctx;
0972 cxgb_redirect(nr->old, nr->new, nr->neigh,
0973 nr->daddr);
0974 cxgb_neigh_update(nr->neigh);
0975 break;
0976 }
0977 default:
0978 break;
0979 }
0980 return 0;
0981 }
0982
0983 static struct notifier_block nb = {
0984 .notifier_call = nb_callback
0985 };
0986
0987
0988
0989
0990 static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
0991 {
0992 pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
0993 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
0994 }
0995
0996
0997
0998
0999 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
1000
1001
1002
1003
1004
1005 void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1006 {
1007 if (opcode < NUM_CPL_CMDS)
1008 cpl_handlers[opcode] = h ? h : do_bad_cpl;
1009 else
1010 pr_err("T3C: handler registration for opcode %x failed\n",
1011 opcode);
1012 }
1013
1014 EXPORT_SYMBOL(t3_register_cpl_handler);
1015
1016
1017
1018
1019 static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
1020 {
1021 while (n--) {
1022 struct sk_buff *skb = *skbs++;
1023 unsigned int opcode = get_opcode(skb);
1024 int ret = cpl_handlers[opcode] (dev, skb);
1025
1026 #if VALIDATE_TID
1027 if (ret & CPL_RET_UNKNOWN_TID) {
1028 union opcode_tid *p = cplhdr(skb);
1029
1030 pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
1031 dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
1032 }
1033 #endif
1034 if (ret & CPL_RET_BUF_DONE)
1035 kfree_skb(skb);
1036 }
1037 return 0;
1038 }
1039
1040
1041
1042
1043 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
1044 {
1045 int r;
1046
1047 local_bh_disable();
1048 r = dev->send(dev, skb);
1049 local_bh_enable();
1050 return r;
1051 }
1052
1053 EXPORT_SYMBOL(cxgb3_ofld_send);
1054
1055 static int is_offloading(struct net_device *dev)
1056 {
1057 struct adapter *adapter;
1058 int i;
1059
1060 read_lock_bh(&adapter_list_lock);
1061 list_for_each_entry(adapter, &adapter_list, adapter_list) {
1062 for_each_port(adapter, i) {
1063 if (dev == adapter->port[i]) {
1064 read_unlock_bh(&adapter_list_lock);
1065 return 1;
1066 }
1067 }
1068 }
1069 read_unlock_bh(&adapter_list_lock);
1070 return 0;
1071 }
1072
1073 static void cxgb_neigh_update(struct neighbour *neigh)
1074 {
1075 struct net_device *dev;
1076
1077 if (!neigh)
1078 return;
1079 dev = neigh->dev;
1080 if (dev && (is_offloading(dev))) {
1081 struct t3cdev *tdev = dev2t3cdev(dev);
1082
1083 BUG_ON(!tdev);
1084 t3_l2t_update(tdev, neigh);
1085 }
1086 }
1087
1088 static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1089 {
1090 struct sk_buff *skb;
1091 struct cpl_set_tcb_field *req;
1092
1093 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1094 if (!skb) {
1095 pr_err("%s: cannot allocate skb!\n", __func__);
1096 return;
1097 }
1098 skb->priority = CPL_PRIORITY_CONTROL;
1099 req = skb_put(skb, sizeof(*req));
1100 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1101 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1102 req->reply = 0;
1103 req->cpu_idx = 0;
1104 req->word = htons(W_TCB_L2T_IX);
1105 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1106 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
1107 tdev->send(tdev, skb);
1108 }
1109
1110 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
1111 struct neighbour *neigh,
1112 const void *daddr)
1113 {
1114 struct net_device *dev;
1115 struct tid_info *ti;
1116 struct t3cdev *tdev;
1117 u32 tid;
1118 int update_tcb;
1119 struct l2t_entry *e;
1120 struct t3c_tid_entry *te;
1121
1122 dev = neigh->dev;
1123
1124 if (!is_offloading(dev))
1125 return;
1126 tdev = dev2t3cdev(dev);
1127 BUG_ON(!tdev);
1128
1129
1130 e = t3_l2t_get(tdev, new, dev, daddr);
1131 if (!e) {
1132 pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
1133 return;
1134 }
1135
1136
1137 ti = &(T3C_DATA(tdev))->tid_maps;
1138 for (tid = 0; tid < ti->ntids; tid++) {
1139 te = lookup_tid(ti, tid);
1140 BUG_ON(!te);
1141 if (te && te->ctx && te->client && te->client->redirect) {
1142 update_tcb = te->client->redirect(te->ctx, old, new, e);
1143 if (update_tcb) {
1144 rcu_read_lock();
1145 l2t_hold(L2DATA(tdev), e);
1146 rcu_read_unlock();
1147 set_l2t_ix(tdev, tid, e);
1148 }
1149 }
1150 }
1151 l2t_release(tdev, e);
1152 }
1153
1154
1155
1156
1157 static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1158 unsigned int natids, unsigned int nstids,
1159 unsigned int atid_base, unsigned int stid_base)
1160 {
1161 unsigned long size = ntids * sizeof(*t->tid_tab) +
1162 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1163
1164 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1165 if (!t->tid_tab)
1166 return -ENOMEM;
1167
1168 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1169 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1170 t->ntids = ntids;
1171 t->nstids = nstids;
1172 t->stid_base = stid_base;
1173 t->sfree = NULL;
1174 t->natids = natids;
1175 t->atid_base = atid_base;
1176 t->afree = NULL;
1177 t->stids_in_use = t->atids_in_use = 0;
1178 atomic_set(&t->tids_in_use, 0);
1179 spin_lock_init(&t->stid_lock);
1180 spin_lock_init(&t->atid_lock);
1181
1182
1183
1184
1185 if (nstids) {
1186 while (--nstids)
1187 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1188 t->sfree = t->stid_tab;
1189 }
1190 if (natids) {
1191 while (--natids)
1192 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1193 t->afree = t->atid_tab;
1194 }
1195 return 0;
1196 }
1197
1198 static void free_tid_maps(struct tid_info *t)
1199 {
1200 kvfree(t->tid_tab);
1201 }
1202
1203 static inline void add_adapter(struct adapter *adap)
1204 {
1205 write_lock_bh(&adapter_list_lock);
1206 list_add_tail(&adap->adapter_list, &adapter_list);
1207 write_unlock_bh(&adapter_list_lock);
1208 }
1209
1210 static inline void remove_adapter(struct adapter *adap)
1211 {
1212 write_lock_bh(&adapter_list_lock);
1213 list_del(&adap->adapter_list);
1214 write_unlock_bh(&adapter_list_lock);
1215 }
1216
1217 int cxgb3_offload_activate(struct adapter *adapter)
1218 {
1219 struct t3cdev *dev = &adapter->tdev;
1220 int natids, err;
1221 struct t3c_data *t;
1222 struct tid_range stid_range, tid_range;
1223 struct mtutab mtutab;
1224 unsigned int l2t_capacity;
1225 struct l2t_data *l2td;
1226
1227 t = kzalloc(sizeof(*t), GFP_KERNEL);
1228 if (!t)
1229 return -ENOMEM;
1230
1231 err = -EOPNOTSUPP;
1232 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1233 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1234 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1235 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1236 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1237 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1238 goto out_free;
1239
1240 err = -ENOMEM;
1241 l2td = t3_init_l2t(l2t_capacity);
1242 if (!l2td)
1243 goto out_free;
1244
1245 natids = min(tid_range.num / 2, MAX_ATIDS);
1246 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1247 stid_range.num, ATID_BASE, stid_range.base);
1248 if (err)
1249 goto out_free_l2t;
1250
1251 t->mtus = mtutab.mtus;
1252 t->nmtus = mtutab.size;
1253
1254 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1255 spin_lock_init(&t->tid_release_lock);
1256 INIT_LIST_HEAD(&t->list_node);
1257 t->dev = dev;
1258
1259 RCU_INIT_POINTER(dev->l2opt, l2td);
1260 T3C_DATA(dev) = t;
1261 dev->recv = process_rx;
1262 dev->neigh_update = t3_l2t_update;
1263
1264
1265 if (list_empty(&adapter_list))
1266 register_netevent_notifier(&nb);
1267
1268 t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1269 t->release_list_incomplete = 0;
1270
1271 add_adapter(adapter);
1272 return 0;
1273
1274 out_free_l2t:
1275 kvfree(l2td);
1276 out_free:
1277 kfree(t);
1278 return err;
1279 }
1280
1281 static void clean_l2_data(struct rcu_head *head)
1282 {
1283 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1284 kvfree(d);
1285 }
1286
1287
1288 void cxgb3_offload_deactivate(struct adapter *adapter)
1289 {
1290 struct t3cdev *tdev = &adapter->tdev;
1291 struct t3c_data *t = T3C_DATA(tdev);
1292 struct l2t_data *d;
1293
1294 remove_adapter(adapter);
1295 if (list_empty(&adapter_list))
1296 unregister_netevent_notifier(&nb);
1297
1298 free_tid_maps(&t->tid_maps);
1299 T3C_DATA(tdev) = NULL;
1300 rcu_read_lock();
1301 d = L2DATA(tdev);
1302 rcu_read_unlock();
1303 RCU_INIT_POINTER(tdev->l2opt, NULL);
1304 call_rcu(&d->rcu_head, clean_l2_data);
1305 kfree_skb(t->nofail_skb);
1306 kfree(t);
1307 }
1308
1309 static inline void register_tdev(struct t3cdev *tdev)
1310 {
1311 static int unit;
1312
1313 mutex_lock(&cxgb3_db_lock);
1314 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1315 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1316 mutex_unlock(&cxgb3_db_lock);
1317 }
1318
1319 static inline void unregister_tdev(struct t3cdev *tdev)
1320 {
1321 mutex_lock(&cxgb3_db_lock);
1322 list_del(&tdev->ofld_dev_list);
1323 mutex_unlock(&cxgb3_db_lock);
1324 }
1325
1326 static inline int adap2type(struct adapter *adapter)
1327 {
1328 int type = 0;
1329
1330 switch (adapter->params.rev) {
1331 case T3_REV_A:
1332 type = T3A;
1333 break;
1334 case T3_REV_B:
1335 case T3_REV_B2:
1336 type = T3B;
1337 break;
1338 case T3_REV_C:
1339 type = T3C;
1340 break;
1341 }
1342 return type;
1343 }
1344
1345 void cxgb3_adapter_ofld(struct adapter *adapter)
1346 {
1347 struct t3cdev *tdev = &adapter->tdev;
1348
1349 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1350
1351 cxgb3_set_dummy_ops(tdev);
1352 tdev->send = t3_offload_tx;
1353 tdev->ctl = cxgb_offload_ctl;
1354 tdev->type = adap2type(adapter);
1355
1356 register_tdev(tdev);
1357 }
1358
1359 void cxgb3_adapter_unofld(struct adapter *adapter)
1360 {
1361 struct t3cdev *tdev = &adapter->tdev;
1362
1363 tdev->recv = NULL;
1364 tdev->neigh_update = NULL;
1365
1366 unregister_tdev(tdev);
1367 }
1368
1369 void __init cxgb3_offload_init(void)
1370 {
1371 int i;
1372
1373 for (i = 0; i < NUM_CPL_CMDS; ++i)
1374 cpl_handlers[i] = do_bad_cpl;
1375
1376 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1377 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1378 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
1379 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1380 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1381 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1382 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1383 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1384 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1385 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1386 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1387 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1388 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1389 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1390 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1391 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1392 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1393 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1394 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
1395 t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
1396 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1397 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1398 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1399 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1400 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1401 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1402 }