0001
0002
0003
0004
0005
0006 #include "qla_def.h"
0007 #include "qla_gbl.h"
0008 #include "qla_target.h"
0009
0010 #include <linux/moduleparam.h>
0011 #include <linux/vmalloc.h>
0012 #include <linux/slab.h>
0013 #include <linux/list.h>
0014
0015 #include <scsi/scsi_tcq.h>
0016 #include <scsi/scsicam.h>
0017 #include <linux/delay.h>
0018
0019 void
0020 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
0021 {
0022 if (vha->vp_idx && vha->timer_active) {
0023 del_timer_sync(&vha->timer);
0024 vha->timer_active = 0;
0025 }
0026 }
0027
0028 static uint32_t
0029 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
0030 {
0031 uint32_t vp_id;
0032 struct qla_hw_data *ha = vha->hw;
0033 unsigned long flags;
0034
0035
0036 mutex_lock(&ha->vport_lock);
0037 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
0038 if (vp_id > ha->max_npiv_vports) {
0039 ql_dbg(ql_dbg_vport, vha, 0xa000,
0040 "vp_id %d is bigger than max-supported %d.\n",
0041 vp_id, ha->max_npiv_vports);
0042 mutex_unlock(&ha->vport_lock);
0043 return vp_id;
0044 }
0045
0046 set_bit(vp_id, ha->vp_idx_map);
0047 ha->num_vhosts++;
0048 vha->vp_idx = vp_id;
0049
0050 spin_lock_irqsave(&ha->vport_slock, flags);
0051 list_add_tail(&vha->list, &ha->vp_list);
0052 spin_unlock_irqrestore(&ha->vport_slock, flags);
0053
0054 spin_lock_irqsave(&ha->hardware_lock, flags);
0055 qlt_update_vp_map(vha, SET_VP_IDX);
0056 spin_unlock_irqrestore(&ha->hardware_lock, flags);
0057
0058 mutex_unlock(&ha->vport_lock);
0059 return vp_id;
0060 }
0061
0062 void
0063 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
0064 {
0065 uint16_t vp_id;
0066 struct qla_hw_data *ha = vha->hw;
0067 unsigned long flags = 0;
0068 u32 i, bailout;
0069
0070 mutex_lock(&ha->vport_lock);
0071
0072
0073
0074
0075
0076
0077
0078 bailout = 0;
0079 for (i = 0; i < 500; i++) {
0080 spin_lock_irqsave(&ha->vport_slock, flags);
0081 if (atomic_read(&vha->vref_count) == 0) {
0082 list_del(&vha->list);
0083 qlt_update_vp_map(vha, RESET_VP_IDX);
0084 bailout = 1;
0085 }
0086 spin_unlock_irqrestore(&ha->vport_slock, flags);
0087
0088 if (bailout)
0089 break;
0090 else
0091 msleep(20);
0092 }
0093 if (!bailout) {
0094 ql_log(ql_log_info, vha, 0xfffa,
0095 "vha->vref_count=%u timeout\n", vha->vref_count.counter);
0096 spin_lock_irqsave(&ha->vport_slock, flags);
0097 list_del(&vha->list);
0098 qlt_update_vp_map(vha, RESET_VP_IDX);
0099 spin_unlock_irqrestore(&ha->vport_slock, flags);
0100 }
0101
0102 vp_id = vha->vp_idx;
0103 ha->num_vhosts--;
0104 clear_bit(vp_id, ha->vp_idx_map);
0105
0106 mutex_unlock(&ha->vport_lock);
0107 }
0108
0109 static scsi_qla_host_t *
0110 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
0111 {
0112 scsi_qla_host_t *vha;
0113 struct scsi_qla_host *tvha;
0114 unsigned long flags;
0115
0116 spin_lock_irqsave(&ha->vport_slock, flags);
0117
0118 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
0119 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
0120 spin_unlock_irqrestore(&ha->vport_slock, flags);
0121 return vha;
0122 }
0123 }
0124 spin_unlock_irqrestore(&ha->vport_slock, flags);
0125 return NULL;
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 static void
0142 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
0143 {
0144
0145
0146
0147
0148
0149
0150 fc_port_t *fcport;
0151
0152 list_for_each_entry(fcport, &vha->vp_fcports, list) {
0153 ql_dbg(ql_dbg_vport, vha, 0xa001,
0154 "Marking port dead, loop_id=0x%04x : %x.\n",
0155 fcport->loop_id, fcport->vha->vp_idx);
0156
0157 qla2x00_mark_device_lost(vha, fcport, 0);
0158 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
0159 }
0160 }
0161
0162 int
0163 qla24xx_disable_vp(scsi_qla_host_t *vha)
0164 {
0165 unsigned long flags;
0166 int ret = QLA_SUCCESS;
0167 fc_port_t *fcport;
0168
0169 if (vha->hw->flags.edif_enabled) {
0170 if (DBELL_ACTIVE(vha))
0171 qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
0172 FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN);
0173
0174 qla2x00_wait_for_sess_deletion(vha);
0175 }
0176
0177 if (vha->hw->flags.fw_started)
0178 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
0179
0180 atomic_set(&vha->loop_state, LOOP_DOWN);
0181 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
0182 list_for_each_entry(fcport, &vha->vp_fcports, list)
0183 fcport->logout_on_delete = 0;
0184
0185 if (!vha->hw->flags.edif_enabled)
0186 qla2x00_wait_for_sess_deletion(vha);
0187
0188
0189 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
0190 qlt_update_vp_map(vha, RESET_AL_PA);
0191 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
0192
0193 qla2x00_mark_vp_devices_dead(vha);
0194 atomic_set(&vha->vp_state, VP_FAILED);
0195 vha->flags.management_server_logged_in = 0;
0196 if (ret == QLA_SUCCESS) {
0197 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
0198 } else {
0199 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
0200 return -1;
0201 }
0202 return 0;
0203 }
0204
0205 int
0206 qla24xx_enable_vp(scsi_qla_host_t *vha)
0207 {
0208 int ret;
0209 struct qla_hw_data *ha = vha->hw;
0210 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
0211
0212
0213 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
0214 atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
0215 !(ha->current_topology & ISP_CFG_F)) {
0216 vha->vp_err_state = VP_ERR_PORTDWN;
0217 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
0218 ql_dbg(ql_dbg_taskm, vha, 0x800b,
0219 "%s skip enable. loop_state %x topo %x\n",
0220 __func__, base_vha->loop_state.counter,
0221 ha->current_topology);
0222
0223 goto enable_failed;
0224 }
0225
0226
0227 mutex_lock(&ha->vport_lock);
0228 ret = qla24xx_modify_vp_config(vha);
0229 mutex_unlock(&ha->vport_lock);
0230
0231 if (ret != QLA_SUCCESS) {
0232 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
0233 goto enable_failed;
0234 }
0235
0236 ql_dbg(ql_dbg_taskm, vha, 0x801a,
0237 "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
0238 return 0;
0239
0240 enable_failed:
0241 ql_dbg(ql_dbg_taskm, vha, 0x801b,
0242 "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
0243 return 1;
0244 }
0245
0246 static void
0247 qla24xx_configure_vp(scsi_qla_host_t *vha)
0248 {
0249 struct fc_vport *fc_vport;
0250 int ret;
0251
0252 fc_vport = vha->fc_vport;
0253
0254 ql_dbg(ql_dbg_vport, vha, 0xa002,
0255 "%s: change request #3.\n", __func__);
0256 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
0257 if (ret != QLA_SUCCESS) {
0258 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
0259 "receiving of RSCN requests: 0x%x.\n", ret);
0260 return;
0261 } else {
0262
0263 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
0264 }
0265
0266 vha->flags.online = 1;
0267 if (qla24xx_configure_vhba(vha))
0268 return;
0269
0270 atomic_set(&vha->vp_state, VP_ACTIVE);
0271 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
0272 }
0273
0274 void
0275 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
0276 {
0277 scsi_qla_host_t *vha, *tvp;
0278 struct qla_hw_data *ha = rsp->hw;
0279 int i = 0;
0280 unsigned long flags;
0281
0282 spin_lock_irqsave(&ha->vport_slock, flags);
0283 list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
0284 if (vha->vp_idx) {
0285 if (test_bit(VPORT_DELETE, &vha->dpc_flags))
0286 continue;
0287
0288 atomic_inc(&vha->vref_count);
0289 spin_unlock_irqrestore(&ha->vport_slock, flags);
0290
0291 switch (mb[0]) {
0292 case MBA_LIP_OCCURRED:
0293 case MBA_LOOP_UP:
0294 case MBA_LOOP_DOWN:
0295 case MBA_LIP_RESET:
0296 case MBA_POINT_TO_POINT:
0297 case MBA_CHG_IN_CONNECTION:
0298 ql_dbg(ql_dbg_async, vha, 0x5024,
0299 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
0300 i, *mb, vha);
0301 qla2x00_async_event(vha, rsp, mb);
0302 break;
0303 case MBA_PORT_UPDATE:
0304 case MBA_RSCN_UPDATE:
0305 if ((mb[3] & 0xff) == vha->vp_idx) {
0306 ql_dbg(ql_dbg_async, vha, 0x5024,
0307 "Async_event for VP[%d], mb=0x%x vha=%p\n",
0308 i, *mb, vha);
0309 qla2x00_async_event(vha, rsp, mb);
0310 }
0311 break;
0312 }
0313
0314 spin_lock_irqsave(&ha->vport_slock, flags);
0315 atomic_dec(&vha->vref_count);
0316 wake_up(&vha->vref_waitq);
0317 }
0318 i++;
0319 }
0320 spin_unlock_irqrestore(&ha->vport_slock, flags);
0321 }
0322
0323 int
0324 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
0325 {
0326 fc_port_t *fcport;
0327
0328
0329
0330
0331
0332
0333
0334 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
0335 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
0336 list_for_each_entry(fcport, &vha->vp_fcports, list)
0337 fcport->logout_on_delete = 0;
0338 }
0339
0340
0341
0342
0343
0344 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
0345 atomic_set(&vha->loop_state, LOOP_DOWN);
0346 qla2x00_mark_all_devices_lost(vha);
0347 } else {
0348 if (!atomic_read(&vha->loop_down_timer))
0349 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
0350 }
0351
0352 ql_dbg(ql_dbg_taskm, vha, 0x801d,
0353 "Scheduling enable of Vport %d.\n", vha->vp_idx);
0354
0355 return qla24xx_enable_vp(vha);
0356 }
0357
0358 static int
0359 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
0360 {
0361 struct qla_hw_data *ha = vha->hw;
0362 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
0363
0364 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
0365 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
0366
0367
0368 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
0369 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
0370
0371 ql_dbg(ql_dbg_dpc, vha, 0x4014,
0372 "Configure VP scheduled.\n");
0373 qla24xx_configure_vp(vha);
0374 ql_dbg(ql_dbg_dpc, vha, 0x4015,
0375 "Configure VP end.\n");
0376 return 0;
0377 }
0378 }
0379
0380 if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
0381 if (atomic_read(&vha->loop_state) == LOOP_READY) {
0382 qla24xx_process_purex_list(&vha->purex_list);
0383 clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
0384 }
0385 }
0386
0387 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
0388 ql_dbg(ql_dbg_dpc, vha, 0x4016,
0389 "FCPort update scheduled.\n");
0390 qla2x00_update_fcports(vha);
0391 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
0392 ql_dbg(ql_dbg_dpc, vha, 0x4017,
0393 "FCPort update end.\n");
0394 }
0395
0396 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
0397 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
0398 atomic_read(&vha->loop_state) != LOOP_DOWN) {
0399
0400 if (!vha->relogin_jif ||
0401 time_after_eq(jiffies, vha->relogin_jif)) {
0402 vha->relogin_jif = jiffies + HZ;
0403 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
0404
0405 ql_dbg(ql_dbg_dpc, vha, 0x4018,
0406 "Relogin needed scheduled.\n");
0407 qla24xx_post_relogin_work(vha);
0408 }
0409 }
0410
0411 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
0412 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
0413 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
0414 }
0415
0416 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
0417 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
0418 ql_dbg(ql_dbg_dpc, vha, 0x401a,
0419 "Loop resync scheduled.\n");
0420 qla2x00_loop_resync(vha);
0421 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
0422 ql_dbg(ql_dbg_dpc, vha, 0x401b,
0423 "Loop resync end.\n");
0424 }
0425 }
0426
0427 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
0428 "Exiting %s.\n", __func__);
0429 return 0;
0430 }
0431
0432 void
0433 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
0434 {
0435 struct qla_hw_data *ha = vha->hw;
0436 scsi_qla_host_t *vp, *tvp;
0437 unsigned long flags = 0;
0438
0439 if (vha->vp_idx)
0440 return;
0441 if (list_empty(&ha->vp_list))
0442 return;
0443
0444 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
0445
0446 if (!(ha->current_topology & ISP_CFG_F))
0447 return;
0448
0449 spin_lock_irqsave(&ha->vport_slock, flags);
0450 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
0451 if (vp->vp_idx) {
0452 atomic_inc(&vp->vref_count);
0453 spin_unlock_irqrestore(&ha->vport_slock, flags);
0454
0455 qla2x00_do_dpc_vp(vp);
0456
0457 spin_lock_irqsave(&ha->vport_slock, flags);
0458 atomic_dec(&vp->vref_count);
0459 }
0460 }
0461 spin_unlock_irqrestore(&ha->vport_slock, flags);
0462 }
0463
0464 int
0465 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
0466 {
0467 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
0468 struct qla_hw_data *ha = base_vha->hw;
0469 scsi_qla_host_t *vha;
0470 uint8_t port_name[WWN_SIZE];
0471
0472 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
0473 return VPCERR_UNSUPPORTED;
0474
0475
0476 if (!ha->flags.npiv_supported)
0477 return VPCERR_UNSUPPORTED;
0478
0479
0480 if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
0481 return VPCERR_NO_FABRIC_SUPP;
0482
0483
0484 u64_to_wwn(fc_vport->port_name, port_name);
0485 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
0486 return VPCERR_BAD_WWN;
0487 vha = qla24xx_find_vhost_by_name(ha, port_name);
0488 if (vha)
0489 return VPCERR_BAD_WWN;
0490
0491
0492 if (ha->num_vhosts > ha->max_npiv_vports) {
0493 ql_dbg(ql_dbg_vport, vha, 0xa004,
0494 "num_vhosts %ud is bigger "
0495 "than max_npiv_vports %ud.\n",
0496 ha->num_vhosts, ha->max_npiv_vports);
0497 return VPCERR_UNSUPPORTED;
0498 }
0499 return 0;
0500 }
0501
0502 scsi_qla_host_t *
0503 qla24xx_create_vhost(struct fc_vport *fc_vport)
0504 {
0505 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
0506 struct qla_hw_data *ha = base_vha->hw;
0507 scsi_qla_host_t *vha;
0508 struct scsi_host_template *sht = &qla2xxx_driver_template;
0509 struct Scsi_Host *host;
0510
0511 vha = qla2x00_create_host(sht, ha);
0512 if (!vha) {
0513 ql_log(ql_log_warn, vha, 0xa005,
0514 "scsi_host_alloc() failed for vport.\n");
0515 return(NULL);
0516 }
0517
0518 host = vha->host;
0519 fc_vport->dd_data = vha;
0520
0521 u64_to_wwn(fc_vport->node_name, vha->node_name);
0522 u64_to_wwn(fc_vport->port_name, vha->port_name);
0523
0524 vha->fc_vport = fc_vport;
0525 vha->device_flags = 0;
0526 vha->vp_idx = qla24xx_allocate_vp_id(vha);
0527 if (vha->vp_idx > ha->max_npiv_vports) {
0528 ql_dbg(ql_dbg_vport, vha, 0xa006,
0529 "Couldn't allocate vp_id.\n");
0530 goto create_vhost_failed;
0531 }
0532 vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
0533
0534 vha->dpc_flags = 0L;
0535 ha->dpc_active = 0;
0536 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
0537 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
0538
0539
0540
0541
0542
0543 set_bit(VP_SCR_NEEDED, &vha->vp_flags);
0544 atomic_set(&vha->loop_state, LOOP_DOWN);
0545 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
0546
0547 qla2x00_start_timer(vha, WATCH_INTERVAL);
0548
0549 vha->req = base_vha->req;
0550 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
0551 host->can_queue = base_vha->req->length + 128;
0552 host->cmd_per_lun = 3;
0553 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
0554 host->max_cmd_len = 32;
0555 else
0556 host->max_cmd_len = MAX_CMDSZ;
0557 host->max_channel = MAX_BUSES - 1;
0558 host->max_lun = ql2xmaxlun;
0559 host->unique_id = host->host_no;
0560 host->max_id = ha->max_fibre_devices;
0561 host->transportt = qla2xxx_transport_vport_template;
0562
0563 ql_dbg(ql_dbg_vport, vha, 0xa007,
0564 "Detect vport hba %ld at address = %p.\n",
0565 vha->host_no, vha);
0566
0567 vha->flags.init_done = 1;
0568
0569 mutex_lock(&ha->vport_lock);
0570 set_bit(vha->vp_idx, ha->vp_idx_map);
0571 ha->cur_vport_count++;
0572 mutex_unlock(&ha->vport_lock);
0573
0574 return vha;
0575
0576 create_vhost_failed:
0577 return NULL;
0578 }
0579
0580 static void
0581 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
0582 {
0583 struct qla_hw_data *ha = vha->hw;
0584 uint16_t que_id = req->id;
0585
0586 dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
0587 sizeof(request_t), req->ring, req->dma);
0588 req->ring = NULL;
0589 req->dma = 0;
0590 if (que_id) {
0591 ha->req_q_map[que_id] = NULL;
0592 mutex_lock(&ha->vport_lock);
0593 clear_bit(que_id, ha->req_qid_map);
0594 mutex_unlock(&ha->vport_lock);
0595 }
0596 kfree(req->outstanding_cmds);
0597 kfree(req);
0598 }
0599
0600 static void
0601 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
0602 {
0603 struct qla_hw_data *ha = vha->hw;
0604 uint16_t que_id = rsp->id;
0605
0606 if (rsp->msix && rsp->msix->have_irq) {
0607 free_irq(rsp->msix->vector, rsp->msix->handle);
0608 rsp->msix->have_irq = 0;
0609 rsp->msix->in_use = 0;
0610 rsp->msix->handle = NULL;
0611 }
0612 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
0613 sizeof(response_t), rsp->ring, rsp->dma);
0614 rsp->ring = NULL;
0615 rsp->dma = 0;
0616 if (que_id) {
0617 ha->rsp_q_map[que_id] = NULL;
0618 mutex_lock(&ha->vport_lock);
0619 clear_bit(que_id, ha->rsp_qid_map);
0620 mutex_unlock(&ha->vport_lock);
0621 }
0622 kfree(rsp);
0623 }
0624
0625 int
0626 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
0627 {
0628 int ret = QLA_SUCCESS;
0629
0630 if (req && vha->flags.qpairs_req_created) {
0631 req->options |= BIT_0;
0632 ret = qla25xx_init_req_que(vha, req);
0633 if (ret != QLA_SUCCESS)
0634 return QLA_FUNCTION_FAILED;
0635
0636 qla25xx_free_req_que(vha, req);
0637 }
0638
0639 return ret;
0640 }
0641
0642 int
0643 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
0644 {
0645 int ret = QLA_SUCCESS;
0646
0647 if (rsp && vha->flags.qpairs_rsp_created) {
0648 rsp->options |= BIT_0;
0649 ret = qla25xx_init_rsp_que(vha, rsp);
0650 if (ret != QLA_SUCCESS)
0651 return QLA_FUNCTION_FAILED;
0652
0653 qla25xx_free_rsp_que(vha, rsp);
0654 }
0655
0656 return ret;
0657 }
0658
0659
0660 int
0661 qla25xx_delete_queues(struct scsi_qla_host *vha)
0662 {
0663 int cnt, ret = 0;
0664 struct req_que *req = NULL;
0665 struct rsp_que *rsp = NULL;
0666 struct qla_hw_data *ha = vha->hw;
0667 struct qla_qpair *qpair, *tqpair;
0668
0669 if (ql2xmqsupport || ql2xnvmeenable) {
0670 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
0671 qp_list_elem)
0672 qla2xxx_delete_qpair(vha, qpair);
0673 } else {
0674
0675 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
0676 req = ha->req_q_map[cnt];
0677 if (req && test_bit(cnt, ha->req_qid_map)) {
0678 ret = qla25xx_delete_req_que(vha, req);
0679 if (ret != QLA_SUCCESS) {
0680 ql_log(ql_log_warn, vha, 0x00ea,
0681 "Couldn't delete req que %d.\n",
0682 req->id);
0683 return ret;
0684 }
0685 }
0686 }
0687
0688
0689 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
0690 rsp = ha->rsp_q_map[cnt];
0691 if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
0692 ret = qla25xx_delete_rsp_que(vha, rsp);
0693 if (ret != QLA_SUCCESS) {
0694 ql_log(ql_log_warn, vha, 0x00eb,
0695 "Couldn't delete rsp que %d.\n",
0696 rsp->id);
0697 return ret;
0698 }
0699 }
0700 }
0701 }
0702
0703 return ret;
0704 }
0705
0706 int
0707 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
0708 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
0709 {
0710 int ret = 0;
0711 struct req_que *req = NULL;
0712 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
0713 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
0714 uint16_t que_id = 0;
0715 device_reg_t *reg;
0716 uint32_t cnt;
0717
0718 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
0719 if (req == NULL) {
0720 ql_log(ql_log_fatal, base_vha, 0x00d9,
0721 "Failed to allocate memory for request queue.\n");
0722 goto failed;
0723 }
0724
0725 req->length = REQUEST_ENTRY_CNT_24XX;
0726 req->ring = dma_alloc_coherent(&ha->pdev->dev,
0727 (req->length + 1) * sizeof(request_t),
0728 &req->dma, GFP_KERNEL);
0729 if (req->ring == NULL) {
0730 ql_log(ql_log_fatal, base_vha, 0x00da,
0731 "Failed to allocate memory for request_ring.\n");
0732 goto que_failed;
0733 }
0734
0735 ret = qla2x00_alloc_outstanding_cmds(ha, req);
0736 if (ret != QLA_SUCCESS)
0737 goto que_failed;
0738
0739 mutex_lock(&ha->mq_lock);
0740 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
0741 if (que_id >= ha->max_req_queues) {
0742 mutex_unlock(&ha->mq_lock);
0743 ql_log(ql_log_warn, base_vha, 0x00db,
0744 "No resources to create additional request queue.\n");
0745 goto que_failed;
0746 }
0747 set_bit(que_id, ha->req_qid_map);
0748 ha->req_q_map[que_id] = req;
0749 req->rid = rid;
0750 req->vp_idx = vp_idx;
0751 req->qos = qos;
0752
0753 ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
0754 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
0755 que_id, req->rid, req->vp_idx, req->qos);
0756 ql_dbg(ql_dbg_init, base_vha, 0x00dc,
0757 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
0758 que_id, req->rid, req->vp_idx, req->qos);
0759 if (rsp_que < 0)
0760 req->rsp = NULL;
0761 else
0762 req->rsp = ha->rsp_q_map[rsp_que];
0763
0764 if (MSB(req->rid))
0765 options |= BIT_4;
0766
0767 if (LSB(req->rid))
0768 options |= BIT_5;
0769 req->options = options;
0770
0771 ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
0772 "options=0x%x.\n", req->options);
0773 ql_dbg(ql_dbg_init, base_vha, 0x00dd,
0774 "options=0x%x.\n", req->options);
0775 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
0776 req->outstanding_cmds[cnt] = NULL;
0777 req->current_outstanding_cmd = 1;
0778
0779 req->ring_ptr = req->ring;
0780 req->ring_index = 0;
0781 req->cnt = req->length;
0782 req->id = que_id;
0783 reg = ISP_QUE_REG(ha, que_id);
0784 req->req_q_in = ®->isp25mq.req_q_in;
0785 req->req_q_out = ®->isp25mq.req_q_out;
0786 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
0787 req->out_ptr = (uint16_t *)(req->ring + req->length);
0788 mutex_unlock(&ha->mq_lock);
0789 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
0790 "ring_ptr=%p ring_index=%d, "
0791 "cnt=%d id=%d max_q_depth=%d.\n",
0792 req->ring_ptr, req->ring_index,
0793 req->cnt, req->id, req->max_q_depth);
0794 ql_dbg(ql_dbg_init, base_vha, 0x00de,
0795 "ring_ptr=%p ring_index=%d, "
0796 "cnt=%d id=%d max_q_depth=%d.\n",
0797 req->ring_ptr, req->ring_index, req->cnt,
0798 req->id, req->max_q_depth);
0799
0800 if (startqp) {
0801 ret = qla25xx_init_req_que(base_vha, req);
0802 if (ret != QLA_SUCCESS) {
0803 ql_log(ql_log_fatal, base_vha, 0x00df,
0804 "%s failed.\n", __func__);
0805 mutex_lock(&ha->mq_lock);
0806 clear_bit(que_id, ha->req_qid_map);
0807 mutex_unlock(&ha->mq_lock);
0808 goto que_failed;
0809 }
0810 vha->flags.qpairs_req_created = 1;
0811 }
0812
0813 return req->id;
0814
0815 que_failed:
0816 qla25xx_free_req_que(base_vha, req);
0817 failed:
0818 return 0;
0819 }
0820
0821 static void qla_do_work(struct work_struct *work)
0822 {
0823 unsigned long flags;
0824 struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
0825 struct scsi_qla_host *vha = qpair->vha;
0826
0827 spin_lock_irqsave(&qpair->qp_lock, flags);
0828 qla24xx_process_response_queue(vha, qpair->rsp);
0829 spin_unlock_irqrestore(&qpair->qp_lock, flags);
0830
0831 }
0832
0833
0834 int
0835 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
0836 uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
0837 {
0838 int ret = 0;
0839 struct rsp_que *rsp = NULL;
0840 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
0841 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
0842 uint16_t que_id = 0;
0843 device_reg_t *reg;
0844
0845 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
0846 if (rsp == NULL) {
0847 ql_log(ql_log_warn, base_vha, 0x0066,
0848 "Failed to allocate memory for response queue.\n");
0849 goto failed;
0850 }
0851
0852 rsp->length = RESPONSE_ENTRY_CNT_MQ;
0853 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
0854 (rsp->length + 1) * sizeof(response_t),
0855 &rsp->dma, GFP_KERNEL);
0856 if (rsp->ring == NULL) {
0857 ql_log(ql_log_warn, base_vha, 0x00e1,
0858 "Failed to allocate memory for response ring.\n");
0859 goto que_failed;
0860 }
0861
0862 mutex_lock(&ha->mq_lock);
0863 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
0864 if (que_id >= ha->max_rsp_queues) {
0865 mutex_unlock(&ha->mq_lock);
0866 ql_log(ql_log_warn, base_vha, 0x00e2,
0867 "No resources to create additional request queue.\n");
0868 goto que_failed;
0869 }
0870 set_bit(que_id, ha->rsp_qid_map);
0871
0872 rsp->msix = qpair->msix;
0873
0874 ha->rsp_q_map[que_id] = rsp;
0875 rsp->rid = rid;
0876 rsp->vp_idx = vp_idx;
0877 rsp->hw = ha;
0878 ql_dbg(ql_dbg_init, base_vha, 0x00e4,
0879 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
0880 que_id, rsp->rid, rsp->vp_idx, rsp->hw);
0881
0882 if (MSB(rsp->rid))
0883 options |= BIT_4;
0884
0885 if (LSB(rsp->rid))
0886 options |= BIT_5;
0887
0888 if (!IS_MSIX_NACK_CAPABLE(ha))
0889 options |= BIT_6;
0890
0891
0892 options |= BIT_1;
0893
0894 rsp->options = options;
0895 rsp->id = que_id;
0896 reg = ISP_QUE_REG(ha, que_id);
0897 rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
0898 rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
0899 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
0900 mutex_unlock(&ha->mq_lock);
0901 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
0902 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
0903 rsp->options, rsp->id, rsp->rsp_q_in,
0904 rsp->rsp_q_out);
0905 ql_dbg(ql_dbg_init, base_vha, 0x00e5,
0906 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
0907 rsp->options, rsp->id, rsp->rsp_q_in,
0908 rsp->rsp_q_out);
0909
0910 ret = qla25xx_request_irq(ha, qpair, qpair->msix,
0911 ha->flags.disable_msix_handshake ?
0912 QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
0913 if (ret)
0914 goto que_failed;
0915
0916 if (startqp) {
0917 ret = qla25xx_init_rsp_que(base_vha, rsp);
0918 if (ret != QLA_SUCCESS) {
0919 ql_log(ql_log_fatal, base_vha, 0x00e7,
0920 "%s failed.\n", __func__);
0921 mutex_lock(&ha->mq_lock);
0922 clear_bit(que_id, ha->rsp_qid_map);
0923 mutex_unlock(&ha->mq_lock);
0924 goto que_failed;
0925 }
0926 vha->flags.qpairs_rsp_created = 1;
0927 }
0928 rsp->req = NULL;
0929
0930 qla2x00_init_response_q_entries(rsp);
0931 if (qpair->hw->wq)
0932 INIT_WORK(&qpair->q_work, qla_do_work);
0933 return rsp->id;
0934
0935 que_failed:
0936 qla25xx_free_rsp_que(base_vha, rsp);
0937 failed:
0938 return 0;
0939 }
0940
0941 static void qla_ctrlvp_sp_done(srb_t *sp, int res)
0942 {
0943 if (sp->comp)
0944 complete(sp->comp);
0945
0946 }
0947
0948
0949
0950
0951
0952
0953
0954
0955 int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
0956 {
0957 int rval = QLA_MEMORY_ALLOC_FAILED;
0958 struct qla_hw_data *ha = vha->hw;
0959 int vp_index = vha->vp_idx;
0960 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
0961 DECLARE_COMPLETION_ONSTACK(comp);
0962 srb_t *sp;
0963
0964 ql_dbg(ql_dbg_vport, vha, 0x10c1,
0965 "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
0966
0967 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
0968 return QLA_PARAMETER_ERROR;
0969
0970
0971 sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
0972 if (!sp)
0973 return rval;
0974
0975 sp->type = SRB_CTRL_VP;
0976 sp->name = "ctrl_vp";
0977 sp->comp = ∁
0978 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
0979 qla_ctrlvp_sp_done);
0980 sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
0981 sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
0982
0983 rval = qla2x00_start_sp(sp);
0984 if (rval != QLA_SUCCESS) {
0985 ql_dbg(ql_dbg_async, vha, 0xffff,
0986 "%s: %s Failed submission. %x.\n",
0987 __func__, sp->name, rval);
0988 goto done;
0989 }
0990
0991 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
0992 sp->name, sp->handle);
0993
0994 wait_for_completion(&comp);
0995 sp->comp = NULL;
0996
0997 rval = sp->rc;
0998 switch (rval) {
0999 case QLA_FUNCTION_TIMEOUT:
1000 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
1001 __func__, sp->name, rval);
1002 break;
1003 case QLA_SUCCESS:
1004 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
1005 __func__, sp->name);
1006 break;
1007 default:
1008 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
1009 __func__, sp->name, rval);
1010 break;
1011 }
1012 done:
1013
1014 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1015 return rval;
1016 }