0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include "bnx2fc.h"
0017 static void bnx2fc_upld_timer(struct timer_list *t);
0018 static void bnx2fc_ofld_timer(struct timer_list *t);
0019 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
0020 struct fcoe_port *port,
0021 struct fc_rport_priv *rdata);
0022 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
0023 struct bnx2fc_rport *tgt);
0024 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
0025 struct bnx2fc_rport *tgt);
0026 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
0027 struct bnx2fc_rport *tgt);
0028 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
0029
0030 static void bnx2fc_upld_timer(struct timer_list *t)
0031 {
0032
0033 struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
0034
0035 BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
0036
0037 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
0038 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
0039 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
0040 wake_up_interruptible(&tgt->upld_wait);
0041 }
0042
0043 static void bnx2fc_ofld_timer(struct timer_list *t)
0044 {
0045
0046 struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
0047
0048 BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
0061 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
0062 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
0063 wake_up_interruptible(&tgt->ofld_wait);
0064 }
0065
0066 static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
0067 {
0068 timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
0069 mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
0070
0071 wait_event_interruptible(tgt->ofld_wait,
0072 (test_bit(
0073 BNX2FC_FLAG_OFLD_REQ_CMPL,
0074 &tgt->flags)));
0075 if (signal_pending(current))
0076 flush_signals(current);
0077 del_timer_sync(&tgt->ofld_timer);
0078 }
0079
0080 static void bnx2fc_offload_session(struct fcoe_port *port,
0081 struct bnx2fc_rport *tgt,
0082 struct fc_rport_priv *rdata)
0083 {
0084 struct fc_rport *rport = rdata->rport;
0085 struct bnx2fc_interface *interface = port->priv;
0086 struct bnx2fc_hba *hba = interface->hba;
0087 int rval;
0088 int i = 0;
0089
0090
0091
0092 rval = bnx2fc_init_tgt(tgt, port, rdata);
0093 if (rval) {
0094 printk(KERN_ERR PFX "Failed to allocate conn id for "
0095 "port_id (%6x)\n", rport->port_id);
0096 goto tgt_init_err;
0097 }
0098
0099
0100 rval = bnx2fc_alloc_session_resc(hba, tgt);
0101 if (rval) {
0102 printk(KERN_ERR PFX "Failed to allocate resources\n");
0103 goto ofld_err;
0104 }
0105
0106
0107
0108
0109
0110
0111 retry_ofld:
0112 clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
0113 rval = bnx2fc_send_session_ofld_req(port, tgt);
0114 if (rval) {
0115 printk(KERN_ERR PFX "ofld_req failed\n");
0116 goto ofld_err;
0117 }
0118
0119
0120
0121
0122
0123 bnx2fc_ofld_wait(tgt);
0124
0125 if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
0126 if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
0127 &tgt->flags)) {
0128 BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
0129 "retry ofld..%d\n", i++);
0130 msleep_interruptible(1000);
0131 if (i > 3) {
0132 i = 0;
0133 goto ofld_err;
0134 }
0135 goto retry_ofld;
0136 }
0137 goto ofld_err;
0138 }
0139 if (bnx2fc_map_doorbell(tgt)) {
0140 printk(KERN_ERR PFX "map doorbell failed - no mem\n");
0141 goto ofld_err;
0142 }
0143 clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
0144 rval = bnx2fc_send_session_enable_req(port, tgt);
0145 if (rval) {
0146 pr_err(PFX "enable session failed\n");
0147 goto ofld_err;
0148 }
0149 bnx2fc_ofld_wait(tgt);
0150 if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
0151 goto ofld_err;
0152 return;
0153
0154 ofld_err:
0155
0156 BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
0157 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
0158
0159 bnx2fc_free_session_resc(hba, tgt);
0160 tgt_init_err:
0161 if (tgt->fcoe_conn_id != -1)
0162 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
0163 fc_rport_logoff(rdata);
0164 }
0165
0166 void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
0167 {
0168 struct bnx2fc_cmd *io_req;
0169 struct bnx2fc_cmd *tmp;
0170 int rc;
0171 int i = 0;
0172 BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
0173 tgt->num_active_ios.counter);
0174
0175 spin_lock_bh(&tgt->tgt_lock);
0176 tgt->flush_in_prog = 1;
0177
0178 list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
0179 i++;
0180 list_del_init(&io_req->link);
0181 io_req->on_active_queue = 0;
0182 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
0183
0184 if (cancel_delayed_work(&io_req->timeout_work)) {
0185 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
0186 &io_req->req_flags)) {
0187
0188 BNX2FC_IO_DBG(io_req, "eh_abort for IO "
0189 "cleaned up\n");
0190 complete(&io_req->abts_done);
0191 }
0192 kref_put(&io_req->refcount,
0193 bnx2fc_cmd_release);
0194 }
0195
0196 set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
0197 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
0198
0199
0200 if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
0201 bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
0202 else {
0203 rc = bnx2fc_initiate_cleanup(io_req);
0204 BUG_ON(rc);
0205 }
0206 }
0207
0208 list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
0209 i++;
0210 list_del_init(&io_req->link);
0211 io_req->on_tmf_queue = 0;
0212 BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
0213 if (io_req->wait_for_abts_comp)
0214 complete(&io_req->abts_done);
0215 }
0216
0217 list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
0218 i++;
0219 list_del_init(&io_req->link);
0220 io_req->on_active_queue = 0;
0221
0222 BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
0223
0224 if (cancel_delayed_work(&io_req->timeout_work))
0225 kref_put(&io_req->refcount,
0226 bnx2fc_cmd_release);
0227
0228 if ((io_req->cb_func) && (io_req->cb_arg)) {
0229 io_req->cb_func(io_req->cb_arg);
0230 io_req->cb_arg = NULL;
0231 }
0232
0233
0234 if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
0235 bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
0236 else {
0237 rc = bnx2fc_initiate_cleanup(io_req);
0238 BUG_ON(rc);
0239 }
0240 }
0241
0242 list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
0243 i++;
0244 list_del_init(&io_req->link);
0245
0246 BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
0247
0248 if (cancel_delayed_work(&io_req->timeout_work)) {
0249 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
0250 &io_req->req_flags)) {
0251
0252 BNX2FC_IO_DBG(io_req, "eh_abort for IO "
0253 "in retire_q\n");
0254 if (io_req->wait_for_abts_comp)
0255 complete(&io_req->abts_done);
0256 }
0257 kref_put(&io_req->refcount, bnx2fc_cmd_release);
0258 }
0259
0260 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
0261 }
0262
0263 BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
0264 i = 0;
0265 spin_unlock_bh(&tgt->tgt_lock);
0266
0267 while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
0268 msleep(25);
0269 if (tgt->num_active_ios.counter != 0)
0270 printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
0271 " active_ios = %d\n",
0272 tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
0273 spin_lock_bh(&tgt->tgt_lock);
0274 tgt->flush_in_prog = 0;
0275 spin_unlock_bh(&tgt->tgt_lock);
0276 }
0277
0278 static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
0279 {
0280 timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
0281 mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
0282 wait_event_interruptible(tgt->upld_wait,
0283 (test_bit(
0284 BNX2FC_FLAG_UPLD_REQ_COMPL,
0285 &tgt->flags)));
0286 if (signal_pending(current))
0287 flush_signals(current);
0288 del_timer_sync(&tgt->upld_timer);
0289 }
0290
0291 static void bnx2fc_upload_session(struct fcoe_port *port,
0292 struct bnx2fc_rport *tgt)
0293 {
0294 struct bnx2fc_interface *interface = port->priv;
0295 struct bnx2fc_hba *hba = interface->hba;
0296
0297 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
0298 tgt->num_active_ios.counter);
0299
0300
0301
0302
0303
0304 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
0305 bnx2fc_send_session_disable_req(port, tgt);
0306
0307
0308
0309
0310
0311 BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
0312 bnx2fc_upld_wait(tgt);
0313
0314
0315
0316
0317
0318 BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
0319 tgt->flags);
0320 bnx2fc_flush_active_ios(tgt);
0321
0322
0323 if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
0324 BNX2FC_TGT_DBG(tgt, "send destroy req\n");
0325 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
0326 bnx2fc_send_session_destroy_req(hba, tgt);
0327
0328
0329 bnx2fc_upld_wait(tgt);
0330
0331 if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
0332 printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
0333
0334 BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
0335 tgt->flags);
0336
0337 } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
0338 printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
0339 " not sent to FW\n");
0340 } else {
0341 printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
0342 " not sent to FW\n");
0343 }
0344
0345
0346 bnx2fc_free_session_resc(hba, tgt);
0347 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
0348 }
0349
0350 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
0351 struct fcoe_port *port,
0352 struct fc_rport_priv *rdata)
0353 {
0354
0355 struct fc_rport *rport = rdata->rport;
0356 struct bnx2fc_interface *interface = port->priv;
0357 struct bnx2fc_hba *hba = interface->hba;
0358 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
0359 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
0360
0361 tgt->rport = rport;
0362 tgt->rdata = rdata;
0363 tgt->port = port;
0364
0365 if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
0366 BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
0367 tgt->fcoe_conn_id = -1;
0368 return -1;
0369 }
0370
0371 tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
0372 if (tgt->fcoe_conn_id == -1)
0373 return -1;
0374
0375 BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
0376
0377 tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
0378 tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
0379 tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
0380 atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
0381
0382
0383 tgt->sq_curr_toggle_bit = 1;
0384 tgt->cq_curr_toggle_bit = 1;
0385 tgt->sq_prod_idx = 0;
0386 tgt->cq_cons_idx = 0;
0387 tgt->rq_prod_idx = 0x8000;
0388 tgt->rq_cons_idx = 0;
0389 atomic_set(&tgt->num_active_ios, 0);
0390 tgt->retry_delay_timestamp = 0;
0391
0392 if (rdata->flags & FC_RP_FLAGS_RETRY &&
0393 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
0394 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
0395 tgt->dev_type = TYPE_TAPE;
0396 tgt->io_timeout = 0;
0397 } else {
0398 tgt->dev_type = TYPE_DISK;
0399 tgt->io_timeout = BNX2FC_IO_TIMEOUT;
0400 }
0401
0402
0403 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
0404 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
0405 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
0406
0407 rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
0408 (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
0409 (B577XX_FCOE_CONNECTION_TYPE <<
0410 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
0411 rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
0412 (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
0413
0414 spin_lock_init(&tgt->tgt_lock);
0415 spin_lock_init(&tgt->cq_lock);
0416
0417
0418 INIT_LIST_HEAD(&tgt->active_cmd_queue);
0419
0420
0421 INIT_LIST_HEAD(&tgt->io_retire_queue);
0422
0423 INIT_LIST_HEAD(&tgt->els_queue);
0424
0425
0426 INIT_LIST_HEAD(&tgt->active_tm_queue);
0427
0428 init_waitqueue_head(&tgt->ofld_wait);
0429 init_waitqueue_head(&tgt->upld_wait);
0430
0431 return 0;
0432 }
0433
0434
0435
0436
0437
0438
0439 void bnx2fc_rport_event_handler(struct fc_lport *lport,
0440 struct fc_rport_priv *rdata,
0441 enum fc_rport_event event)
0442 {
0443 struct fcoe_port *port = lport_priv(lport);
0444 struct bnx2fc_interface *interface = port->priv;
0445 struct bnx2fc_hba *hba = interface->hba;
0446 struct fc_rport *rport = rdata->rport;
0447 struct fc_rport_libfc_priv *rp;
0448 struct bnx2fc_rport *tgt;
0449 u32 port_id;
0450
0451 BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
0452 event, rdata->ids.port_id);
0453 switch (event) {
0454 case RPORT_EV_READY:
0455 if (!rport) {
0456 printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
0457 break;
0458 }
0459
0460 rp = rport->dd_data;
0461 if (rport->port_id == FC_FID_DIR_SERV) {
0462
0463
0464
0465
0466
0467
0468 printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
0469 rdata->ids.port_id);
0470 break;
0471 }
0472
0473 if (rdata->spp_type != FC_TYPE_FCP) {
0474 BNX2FC_HBA_DBG(lport, "not FCP type target."
0475 " not offloading\n");
0476 break;
0477 }
0478 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
0479 BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
0480 " not offloading\n");
0481 break;
0482 }
0483
0484
0485
0486
0487
0488 mutex_lock(&hba->hba_mutex);
0489 tgt = (struct bnx2fc_rport *)&rp[1];
0490
0491
0492 if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
0493 BNX2FC_TGT_DBG(tgt, "already offloaded\n");
0494 mutex_unlock(&hba->hba_mutex);
0495 return;
0496 }
0497
0498
0499
0500
0501
0502 bnx2fc_offload_session(port, tgt, rdata);
0503
0504 BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
0505 hba->num_ofld_sess);
0506
0507 if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
0508
0509 BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
0510
0511 hba->num_ofld_sess++;
0512
0513 set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
0514 } else {
0515
0516
0517
0518
0519
0520 BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
0521 "offloaded flag not set\n");
0522 }
0523 mutex_unlock(&hba->hba_mutex);
0524 break;
0525 case RPORT_EV_LOGO:
0526 case RPORT_EV_FAILED:
0527 case RPORT_EV_STOP:
0528 port_id = rdata->ids.port_id;
0529 if (port_id == FC_FID_DIR_SERV)
0530 break;
0531
0532 if (!rport) {
0533 printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
0534 port_id);
0535 break;
0536 }
0537 rp = rport->dd_data;
0538 mutex_lock(&hba->hba_mutex);
0539
0540
0541
0542
0543 tgt = (struct bnx2fc_rport *)&rp[1];
0544
0545 if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
0546 mutex_unlock(&hba->hba_mutex);
0547 break;
0548 }
0549 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
0550
0551 bnx2fc_upload_session(port, tgt);
0552 hba->num_ofld_sess--;
0553 BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
0554 hba->num_ofld_sess);
0555
0556
0557
0558
0559 if ((hba->wait_for_link_down) &&
0560 (hba->num_ofld_sess == 0)) {
0561 wake_up_interruptible(&hba->shutdown_wait);
0562 }
0563 mutex_unlock(&hba->hba_mutex);
0564
0565 break;
0566
0567 case RPORT_EV_NONE:
0568 break;
0569 }
0570 }
0571
0572
0573
0574
0575
0576
0577
0578 struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
0579 u32 port_id)
0580 {
0581 struct bnx2fc_interface *interface = port->priv;
0582 struct bnx2fc_hba *hba = interface->hba;
0583 struct bnx2fc_rport *tgt;
0584 struct fc_rport_priv *rdata;
0585 int i;
0586
0587 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
0588 tgt = hba->tgt_ofld_list[i];
0589 if ((tgt) && (tgt->port == port)) {
0590 rdata = tgt->rdata;
0591 if (rdata->ids.port_id == port_id) {
0592 if (rdata->rp_state != RPORT_ST_DELETE) {
0593 BNX2FC_TGT_DBG(tgt, "rport "
0594 "obtained\n");
0595 return tgt;
0596 } else {
0597 BNX2FC_TGT_DBG(tgt, "rport 0x%x "
0598 "is in DELETED state\n",
0599 rdata->ids.port_id);
0600 return NULL;
0601 }
0602 }
0603 }
0604 }
0605 return NULL;
0606 }
0607
0608
0609
0610
0611
0612
0613
0614
0615 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
0616 struct bnx2fc_rport *tgt)
0617 {
0618 u32 conn_id, next;
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628 spin_lock_bh(&hba->hba_lock);
0629 next = hba->next_conn_id;
0630 conn_id = hba->next_conn_id++;
0631 if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
0632 hba->next_conn_id = 0;
0633
0634 while (hba->tgt_ofld_list[conn_id] != NULL) {
0635 conn_id++;
0636 if (conn_id == BNX2FC_NUM_MAX_SESS)
0637 conn_id = 0;
0638
0639 if (conn_id == next) {
0640
0641 spin_unlock_bh(&hba->hba_lock);
0642 return -1;
0643 }
0644 }
0645 hba->tgt_ofld_list[conn_id] = tgt;
0646 tgt->fcoe_conn_id = conn_id;
0647 spin_unlock_bh(&hba->hba_lock);
0648 return conn_id;
0649 }
0650
0651 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
0652 {
0653
0654 spin_lock_bh(&hba->hba_lock);
0655 hba->tgt_ofld_list[conn_id] = NULL;
0656 spin_unlock_bh(&hba->hba_lock);
0657 }
0658
0659
0660
0661
0662 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
0663 struct bnx2fc_rport *tgt)
0664 {
0665 dma_addr_t page;
0666 int num_pages;
0667 u32 *pbl;
0668
0669
0670 tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
0671 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
0672 CNIC_PAGE_MASK;
0673
0674 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
0675 &tgt->sq_dma, GFP_KERNEL);
0676 if (!tgt->sq) {
0677 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
0678 tgt->sq_mem_size);
0679 goto mem_alloc_failure;
0680 }
0681
0682
0683 tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
0684 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
0685 CNIC_PAGE_MASK;
0686
0687 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
0688 &tgt->cq_dma, GFP_KERNEL);
0689 if (!tgt->cq) {
0690 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
0691 tgt->cq_mem_size);
0692 goto mem_alloc_failure;
0693 }
0694
0695
0696 tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
0697 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
0698 CNIC_PAGE_MASK;
0699
0700 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
0701 &tgt->rq_dma, GFP_KERNEL);
0702 if (!tgt->rq) {
0703 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
0704 tgt->rq_mem_size);
0705 goto mem_alloc_failure;
0706 }
0707
0708 tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
0709 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
0710 CNIC_PAGE_MASK;
0711
0712 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
0713 &tgt->rq_pbl_dma, GFP_KERNEL);
0714 if (!tgt->rq_pbl) {
0715 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
0716 tgt->rq_pbl_size);
0717 goto mem_alloc_failure;
0718 }
0719
0720 num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
0721 page = tgt->rq_dma;
0722 pbl = (u32 *)tgt->rq_pbl;
0723
0724 while (num_pages--) {
0725 *pbl = (u32)page;
0726 pbl++;
0727 *pbl = (u32)((u64)page >> 32);
0728 pbl++;
0729 page += CNIC_PAGE_SIZE;
0730 }
0731
0732
0733 tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
0734 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
0735 CNIC_PAGE_MASK;
0736
0737 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
0738 tgt->xferq_mem_size, &tgt->xferq_dma,
0739 GFP_KERNEL);
0740 if (!tgt->xferq) {
0741 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
0742 tgt->xferq_mem_size);
0743 goto mem_alloc_failure;
0744 }
0745
0746
0747 tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
0748 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
0749 CNIC_PAGE_MASK;
0750
0751 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
0752 tgt->confq_mem_size, &tgt->confq_dma,
0753 GFP_KERNEL);
0754 if (!tgt->confq) {
0755 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
0756 tgt->confq_mem_size);
0757 goto mem_alloc_failure;
0758 }
0759
0760 tgt->confq_pbl_size =
0761 (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
0762 tgt->confq_pbl_size =
0763 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
0764
0765 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
0766 tgt->confq_pbl_size,
0767 &tgt->confq_pbl_dma, GFP_KERNEL);
0768 if (!tgt->confq_pbl) {
0769 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
0770 tgt->confq_pbl_size);
0771 goto mem_alloc_failure;
0772 }
0773
0774 num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
0775 page = tgt->confq_dma;
0776 pbl = (u32 *)tgt->confq_pbl;
0777
0778 while (num_pages--) {
0779 *pbl = (u32)page;
0780 pbl++;
0781 *pbl = (u32)((u64)page >> 32);
0782 pbl++;
0783 page += CNIC_PAGE_SIZE;
0784 }
0785
0786
0787 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
0788
0789 tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
0790 tgt->conn_db_mem_size,
0791 &tgt->conn_db_dma, GFP_KERNEL);
0792 if (!tgt->conn_db) {
0793 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
0794 tgt->conn_db_mem_size);
0795 goto mem_alloc_failure;
0796 }
0797
0798
0799
0800 tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
0801 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
0802 CNIC_PAGE_MASK;
0803
0804 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
0805 &tgt->lcq_dma, GFP_KERNEL);
0806
0807 if (!tgt->lcq) {
0808 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
0809 tgt->lcq_mem_size);
0810 goto mem_alloc_failure;
0811 }
0812
0813 tgt->conn_db->rq_prod = 0x8000;
0814
0815 return 0;
0816
0817 mem_alloc_failure:
0818 return -ENOMEM;
0819 }
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
0830 struct bnx2fc_rport *tgt)
0831 {
0832 void __iomem *ctx_base_ptr;
0833
0834 BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
0835
0836 spin_lock_bh(&tgt->cq_lock);
0837 ctx_base_ptr = tgt->ctx_base;
0838 tgt->ctx_base = NULL;
0839
0840
0841 if (tgt->lcq) {
0842 dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
0843 tgt->lcq, tgt->lcq_dma);
0844 tgt->lcq = NULL;
0845 }
0846
0847 if (tgt->conn_db) {
0848 dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
0849 tgt->conn_db, tgt->conn_db_dma);
0850 tgt->conn_db = NULL;
0851 }
0852
0853 if (tgt->confq_pbl) {
0854 dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
0855 tgt->confq_pbl, tgt->confq_pbl_dma);
0856 tgt->confq_pbl = NULL;
0857 }
0858 if (tgt->confq) {
0859 dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
0860 tgt->confq, tgt->confq_dma);
0861 tgt->confq = NULL;
0862 }
0863
0864 if (tgt->xferq) {
0865 dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
0866 tgt->xferq, tgt->xferq_dma);
0867 tgt->xferq = NULL;
0868 }
0869
0870 if (tgt->rq_pbl) {
0871 dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
0872 tgt->rq_pbl, tgt->rq_pbl_dma);
0873 tgt->rq_pbl = NULL;
0874 }
0875 if (tgt->rq) {
0876 dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
0877 tgt->rq, tgt->rq_dma);
0878 tgt->rq = NULL;
0879 }
0880
0881 if (tgt->cq) {
0882 dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
0883 tgt->cq, tgt->cq_dma);
0884 tgt->cq = NULL;
0885 }
0886
0887 if (tgt->sq) {
0888 dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
0889 tgt->sq, tgt->sq_dma);
0890 tgt->sq = NULL;
0891 }
0892 spin_unlock_bh(&tgt->cq_lock);
0893
0894 if (ctx_base_ptr)
0895 iounmap(ctx_base_ptr);
0896 }