0001
0002
0003
0004
0005
0006
0007 #include "qla_target.h"
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 static inline uint16_t
0018 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
0019 {
0020 uint16_t iocbs;
0021
0022 iocbs = 1;
0023 if (dsds > 1) {
0024 iocbs += (dsds - 1) / 5;
0025 if ((dsds - 1) % 5)
0026 iocbs++;
0027 }
0028 return iocbs;
0029 }
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 static __inline__ uint16_t
0042 qla2x00_debounce_register(volatile __le16 __iomem *addr)
0043 {
0044 volatile uint16_t first;
0045 volatile uint16_t second;
0046
0047 do {
0048 first = rd_reg_word(addr);
0049 barrier();
0050 cpu_relax();
0051 second = rd_reg_word(addr);
0052 } while (first != second);
0053
0054 return (first);
0055 }
0056
0057 static inline void
0058 qla2x00_poll(struct rsp_que *rsp)
0059 {
0060 struct qla_hw_data *ha = rsp->hw;
0061
0062 if (IS_P3P_TYPE(ha))
0063 qla82xx_poll(0, rsp);
0064 else
0065 ha->isp_ops->intr_handler(0, rsp);
0066 }
0067
0068 static inline uint8_t *
0069 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
0070 {
0071 uint32_t *ifcp = (uint32_t *) fcp;
0072 uint32_t *ofcp = (uint32_t *) fcp;
0073 uint32_t iter = bsize >> 2;
0074
0075 for (; iter ; iter--)
0076 *ofcp++ = swab32(*ifcp++);
0077
0078 return fcp;
0079 }
0080
0081 static inline void
0082 host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
0083 {
0084 uint32_t *isrc = (uint32_t *) src;
0085 __le32 *odest = (__le32 *) dst;
0086 uint32_t iter = bsize >> 2;
0087
0088 for ( ; iter--; isrc++)
0089 *odest++ = cpu_to_le32(*isrc);
0090 }
0091
0092 static inline void
0093 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
0094 {
0095 struct dsd_dma *dsd, *tdsd;
0096
0097
0098 list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
0099 dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
0100 dsd->dsd_list_dma);
0101 list_del(&dsd->list);
0102 kfree(dsd);
0103 }
0104 INIT_LIST_HEAD(&ctx->dsd_list);
0105 }
0106
0107 static inline void
0108 qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
0109 {
0110 int old_val;
0111 uint8_t shiftbits, mask;
0112
0113
0114 shiftbits = 4;
0115 mask = (1 << shiftbits) - 1;
0116
0117 fcport->disc_state = state;
0118 while (1) {
0119 old_val = atomic_read(&fcport->shadow_disc_state);
0120 if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
0121 old_val, (old_val << shiftbits) | state)) {
0122 ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
0123 "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
0124 fcport->port_name, port_dstate_str[old_val & mask],
0125 port_dstate_str[state], fcport->d_id.b24);
0126 return;
0127 }
0128 }
0129 }
0130
0131 static inline int
0132 qla2x00_hba_err_chk_enabled(srb_t *sp)
0133 {
0134
0135
0136
0137
0138
0139
0140
0141 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
0142 case SCSI_PROT_READ_STRIP:
0143 case SCSI_PROT_WRITE_INSERT:
0144 if (ql2xenablehba_err_chk >= 1)
0145 return 1;
0146 break;
0147 case SCSI_PROT_READ_PASS:
0148 case SCSI_PROT_WRITE_PASS:
0149 if (ql2xenablehba_err_chk >= 2)
0150 return 1;
0151 break;
0152 case SCSI_PROT_READ_INSERT:
0153 case SCSI_PROT_WRITE_STRIP:
0154 return 1;
0155 }
0156 return 0;
0157 }
0158
0159 static inline int
0160 qla2x00_reset_active(scsi_qla_host_t *vha)
0161 {
0162 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
0163
0164
0165 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
0166 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
0167 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
0168 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
0169 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
0170 }
0171
0172 static inline int
0173 qla2x00_chip_is_down(scsi_qla_host_t *vha)
0174 {
0175 return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
0176 }
0177
0178 static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
0179 struct qla_qpair *qpair, fc_port_t *fcport)
0180 {
0181 memset(sp, 0, sizeof(*sp));
0182 sp->fcport = fcport;
0183 sp->iocbs = 1;
0184 sp->vha = vha;
0185 sp->qpair = qpair;
0186 sp->cmd_type = TYPE_SRB;
0187
0188 kref_init(&sp->cmd_kref);
0189 INIT_LIST_HEAD(&sp->elem);
0190 }
0191
0192 static inline srb_t *
0193 qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
0194 fc_port_t *fcport, gfp_t flag)
0195 {
0196 srb_t *sp = NULL;
0197 uint8_t bail;
0198
0199 QLA_QPAIR_MARK_BUSY(qpair, bail);
0200 if (unlikely(bail))
0201 return NULL;
0202
0203 sp = mempool_alloc(qpair->srb_mempool, flag);
0204 if (sp)
0205 qla2xxx_init_sp(sp, vha, qpair, fcport);
0206 else
0207 QLA_QPAIR_MARK_NOT_BUSY(qpair);
0208 return sp;
0209 }
0210
0211 void qla2xxx_rel_done_warning(srb_t *sp, int res);
0212 void qla2xxx_rel_free_warning(srb_t *sp);
0213
0214 static inline void
0215 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
0216 {
0217 sp->qpair = NULL;
0218 sp->done = qla2xxx_rel_done_warning;
0219 sp->free = qla2xxx_rel_free_warning;
0220 mempool_free(sp, qpair->srb_mempool);
0221 QLA_QPAIR_MARK_NOT_BUSY(qpair);
0222 }
0223
0224 static inline srb_t *
0225 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
0226 {
0227 srb_t *sp = NULL;
0228 uint8_t bail;
0229 struct qla_qpair *qpair;
0230
0231 QLA_VHA_MARK_BUSY(vha, bail);
0232 if (unlikely(bail))
0233 return NULL;
0234
0235 qpair = vha->hw->base_qpair;
0236 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
0237 if (!sp)
0238 goto done;
0239
0240 sp->vha = vha;
0241 done:
0242 if (!sp)
0243 QLA_VHA_MARK_NOT_BUSY(vha);
0244 return sp;
0245 }
0246
0247 static inline void
0248 qla2x00_rel_sp(srb_t *sp)
0249 {
0250 QLA_VHA_MARK_NOT_BUSY(sp->vha);
0251 qla2xxx_rel_qpair_sp(sp->qpair, sp);
0252 }
0253
0254 static inline int
0255 qla2x00_gid_list_size(struct qla_hw_data *ha)
0256 {
0257 if (IS_QLAFX00(ha))
0258 return sizeof(uint32_t) * 32;
0259 else
0260 return sizeof(struct gid_list_info) * ha->max_fibre_devices;
0261 }
0262
0263 static inline void
0264 qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
0265 {
0266 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
0267 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
0268 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0269 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
0270 complete(&ha->mbx_intr_comp);
0271 }
0272 }
0273
0274 static inline void
0275 qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
0276 {
0277 u8 scope;
0278 u16 qual;
0279 #define SQ_SCOPE_MASK 0xc000
0280 #define SQ_SCOPE_SHIFT 14
0281 #define SQ_QUAL_MASK 0x3fff
0282
0283 #define SQ_MAX_WAIT_SEC 60
0284 #define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10)
0285
0286 if (!sts_qual)
0287 return;
0288
0289 scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
0290
0291 if (scope != 1 && scope != 2)
0292 return;
0293
0294
0295 if (fcport->retry_delay_timestamp &&
0296 time_before(jiffies, fcport->retry_delay_timestamp))
0297 return;
0298
0299 qual = sts_qual & SQ_QUAL_MASK;
0300 if (qual < 1 || qual > 0x3fef)
0301 return;
0302 qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
0303
0304
0305 fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
0306
0307 ql_log(ql_log_warn, fcport->vha, 0x5101,
0308 "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
0309 fcport->port_name, sts_qual, qual * 100);
0310 }
0311
0312 static inline bool
0313 qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
0314 {
0315 if (qla_ini_mode_enabled(vha) &&
0316 (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
0317 return true;
0318 else if (qla_tgt_mode_enabled(vha) &&
0319 (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
0320 return true;
0321 else if (qla_dual_mode_enabled(vha) &&
0322 ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
0323 return true;
0324 else
0325 return false;
0326 }
0327
0328 static inline void
0329 qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
0330 {
0331 qpair->cpuid = cpuid;
0332
0333 if (!list_empty(&qpair->hints_list)) {
0334 struct qla_qpair_hint *h;
0335
0336 list_for_each_entry(h, &qpair->hints_list, hint_elem)
0337 h->cpuid = qpair->cpuid;
0338 }
0339 }
0340
0341 static inline struct qla_qpair_hint *
0342 qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
0343 {
0344 struct qla_qpair_hint *h;
0345 u16 i;
0346
0347 for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
0348 h = &tgt->qphints[i];
0349 if (h->qpair == qpair)
0350 return h;
0351 }
0352
0353 return NULL;
0354 }
0355
0356 static inline void
0357 qla_83xx_start_iocbs(struct qla_qpair *qpair)
0358 {
0359 struct req_que *req = qpair->req;
0360
0361 req->ring_index++;
0362 if (req->ring_index == req->length) {
0363 req->ring_index = 0;
0364 req->ring_ptr = req->ring;
0365 } else
0366 req->ring_ptr++;
0367
0368 wrt_reg_dword(req->req_q_in, req->ring_index);
0369 }
0370
0371 static inline int
0372 qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
0373 {
0374 uint32_t data;
0375
0376 data =
0377 ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
0378
0379
0380 return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
0381 }
0382
0383 enum {
0384 RESOURCE_NONE,
0385 RESOURCE_INI,
0386 };
0387
0388 static inline int
0389 qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
0390 {
0391 u16 iocbs_used, i;
0392 struct qla_hw_data *ha = qp->vha->hw;
0393
0394 if (!ql2xenforce_iocb_limit) {
0395 iores->res_type = RESOURCE_NONE;
0396 return 0;
0397 }
0398
0399 if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
0400 qp->fwres.iocbs_used += iores->iocb_cnt;
0401 return 0;
0402 } else {
0403
0404 iocbs_used = ha->base_qpair->fwres.iocbs_used;
0405 for (i = 0; i < ha->max_qpairs; i++) {
0406 if (ha->queue_pair_map[i])
0407 iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
0408 }
0409
0410 if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
0411 qp->fwres.iocbs_used += iores->iocb_cnt;
0412 return 0;
0413 } else {
0414 iores->res_type = RESOURCE_NONE;
0415 return -ENOSPC;
0416 }
0417 }
0418 }
0419
0420 static inline void
0421 qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
0422 {
0423 switch (iores->res_type) {
0424 case RESOURCE_NONE:
0425 break;
0426 default:
0427 if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
0428 qp->fwres.iocbs_used -= iores->iocb_cnt;
0429 } else {
0430
0431 qp->fwres.iocbs_used = 0;
0432 }
0433 break;
0434 }
0435 iores->res_type = RESOURCE_NONE;
0436 }
0437
0438 #define ISP_REG_DISCONNECT 0xffffffffU
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454 static inline
0455 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
0456 {
0457 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0458 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
0459
0460 if (IS_P3P_TYPE(ha))
0461 return ((rd_reg_dword(®82->host_int)) == ISP_REG_DISCONNECT);
0462 else
0463 return ((rd_reg_dword(®->host_status)) ==
0464 ISP_REG_DISCONNECT);
0465 }
0466
0467 static inline
0468 bool qla_pci_disconnected(struct scsi_qla_host *vha,
0469 struct device_reg_24xx __iomem *reg)
0470 {
0471 uint32_t stat;
0472 bool ret = false;
0473
0474 stat = rd_reg_dword(®->host_status);
0475 if (stat == 0xffffffff) {
0476 ql_log(ql_log_info, vha, 0x8041,
0477 "detected PCI disconnect.\n");
0478 qla_schedule_eeh_work(vha);
0479 ret = true;
0480 }
0481 return ret;
0482 }
0483
0484 static inline bool
0485 fcport_is_smaller(fc_port_t *fcport)
0486 {
0487 if (wwn_to_u64(fcport->port_name) <
0488 wwn_to_u64(fcport->vha->port_name))
0489 return true;
0490 else
0491 return false;
0492 }
0493
0494 static inline bool
0495 fcport_is_bigger(fc_port_t *fcport)
0496 {
0497 return !fcport_is_smaller(fcport);
0498 }