0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #include <linux/kernel.h>
0047 #include <linux/module.h>
0048 #include <linux/errno.h>
0049 #include <linux/init.h>
0050 #include <linux/slab.h>
0051 #include <linux/types.h>
0052 #include <linux/pci.h>
0053 #include <linux/kdev_t.h>
0054 #include <linux/blkdev.h>
0055 #include <linux/delay.h>
0056 #include <linux/interrupt.h>
0057 #include <linux/dma-mapping.h>
0058 #include <linux/io.h>
0059 #include <linux/time.h>
0060 #include <linux/ktime.h>
0061 #include <linux/kthread.h>
0062 #include <asm/page.h> /* To get host page size per arch */
0063 #include <linux/aer.h>
0064
0065
0066 #include "mpt3sas_base.h"
0067
0068 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
0069
0070
0071 #define FAULT_POLLING_INTERVAL 1000
0072
0073
0074 #define MAX_HBA_QUEUE_DEPTH 30000
0075 #define MAX_CHAIN_DEPTH 100000
0076 static int max_queue_depth = -1;
0077 module_param(max_queue_depth, int, 0444);
0078 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
0079
0080 static int max_sgl_entries = -1;
0081 module_param(max_sgl_entries, int, 0444);
0082 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
0083
0084 static int msix_disable = -1;
0085 module_param(msix_disable, int, 0444);
0086 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
0087
0088 static int smp_affinity_enable = 1;
0089 module_param(smp_affinity_enable, int, 0444);
0090 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
0091
0092 static int max_msix_vectors = -1;
0093 module_param(max_msix_vectors, int, 0444);
0094 MODULE_PARM_DESC(max_msix_vectors,
0095 " max msix vectors");
0096
0097 static int irqpoll_weight = -1;
0098 module_param(irqpoll_weight, int, 0444);
0099 MODULE_PARM_DESC(irqpoll_weight,
0100 "irq poll weight (default= one fourth of HBA queue depth)");
0101
0102 static int mpt3sas_fwfault_debug;
0103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
0104 " enable detection of firmware fault and halt firmware - (default=0)");
0105
0106 static int perf_mode = -1;
0107 module_param(perf_mode, int, 0444);
0108 MODULE_PARM_DESC(perf_mode,
0109 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
0110 "0 - balanced: high iops mode is enabled &\n\t\t"
0111 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
0112 "1 - iops: high iops mode is disabled &\n\t\t"
0113 "interrupt coalescing is enabled on all queues,\n\t\t"
0114 "2 - latency: high iops mode is disabled &\n\t\t"
0115 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
0116 "\t\tdefault - default perf_mode is 'balanced'"
0117 );
0118
0119 static int poll_queues;
0120 module_param(poll_queues, int, 0444);
0121 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
0122 "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
0123 "when poll_queues are enabled then &\n\t\t"
0124 "perf_mode is set to latency mode. &\n\t\t"
0125 );
0126
0127 enum mpt3sas_perf_mode {
0128 MPT_PERF_MODE_DEFAULT = -1,
0129 MPT_PERF_MODE_BALANCED = 0,
0130 MPT_PERF_MODE_IOPS = 1,
0131 MPT_PERF_MODE_LATENCY = 2,
0132 };
0133
0134 static int
0135 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
0136 u32 ioc_state, int timeout);
0137 static int
0138 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
0139 static void
0140 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154 u8
0155 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
0156 u8 status, void *mpi_request, int sz)
0157 {
0158 u8 issue_reset = 0;
0159
0160 if (!(status & MPT3_CMD_RESET))
0161 issue_reset = 1;
0162
0163 ioc_err(ioc, "Command %s\n",
0164 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
0165 _debug_dump_mf(mpi_request, sz);
0166
0167 return issue_reset;
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177 static int
0178 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
0179 {
0180 int ret = param_set_int(val, kp);
0181 struct MPT3SAS_ADAPTER *ioc;
0182
0183 if (ret)
0184 return ret;
0185
0186
0187 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
0188 spin_lock(&gioc_lock);
0189 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
0190 ioc->fwfault_debug = mpt3sas_fwfault_debug;
0191 spin_unlock(&gioc_lock);
0192 return 0;
0193 }
0194 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
0195 param_get_int, &mpt3sas_fwfault_debug, 0644);
0196
0197
0198
0199
0200
0201
0202
0203
0204 static inline u32
0205 _base_readl_aero(const volatile void __iomem *addr)
0206 {
0207 u32 i = 0, ret_val;
0208
0209 do {
0210 ret_val = readl(addr);
0211 i++;
0212 } while (ret_val == 0 && i < 3);
0213
0214 return ret_val;
0215 }
0216
0217 static inline u32
0218 _base_readl(const volatile void __iomem *addr)
0219 {
0220 return readl(addr);
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 static void
0232 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
0233 u32 index)
0234 {
0235
0236
0237
0238
0239
0240 u16 cmd_credit = ioc->facts.RequestCredit + 1;
0241 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
0242 MPI_FRAME_START_OFFSET +
0243 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
0244
0245 writel(reply, reply_free_iomem);
0246 }
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 static void
0257 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
0258 {
0259 int i;
0260 u32 *src_virt_mem = (u32 *)src;
0261
0262 for (i = 0; i < size/4; i++)
0263 writel((u32)src_virt_mem[i],
0264 (void __iomem *)dst_iomem + (i * 4));
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274 static void
0275 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
0276 {
0277 int i;
0278 u32 *src_virt_mem = (u32 *)(src);
0279
0280 for (i = 0; i < size/4; i++)
0281 writel((u32)src_virt_mem[i],
0282 (void __iomem *)dst_iomem + (i * 4));
0283 }
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 static inline void __iomem*
0296 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
0297 u8 sge_chain_count)
0298 {
0299 void __iomem *base_chain, *chain_virt;
0300 u16 cmd_credit = ioc->facts.RequestCredit + 1;
0301
0302 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
0303 (cmd_credit * ioc->request_sz) +
0304 REPLY_FREE_POOL_SIZE;
0305 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
0306 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
0307 return chain_virt;
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 static inline phys_addr_t
0322 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
0323 u8 sge_chain_count)
0324 {
0325 phys_addr_t base_chain_phys, chain_phys;
0326 u16 cmd_credit = ioc->facts.RequestCredit + 1;
0327
0328 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
0329 (cmd_credit * ioc->request_sz) +
0330 REPLY_FREE_POOL_SIZE;
0331 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
0332 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
0333 return chain_phys;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 static void __iomem *
0348 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
0349 {
0350 u16 cmd_credit = ioc->facts.RequestCredit + 1;
0351
0352 void __iomem *chain_end = _base_get_chain(ioc,
0353 cmd_credit + 1,
0354 ioc->facts.MaxChainDepth);
0355 return chain_end + (smid * 64 * 1024);
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368 static phys_addr_t
0369 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
0370 {
0371 u16 cmd_credit = ioc->facts.RequestCredit + 1;
0372 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
0373 cmd_credit + 1,
0374 ioc->facts.MaxChainDepth);
0375 return chain_end_phys + (smid * 64 * 1024);
0376 }
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 static void *
0390 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
0391 dma_addr_t chain_buffer_dma)
0392 {
0393 u16 index, j;
0394 struct chain_tracker *ct;
0395
0396 for (index = 0; index < ioc->scsiio_depth; index++) {
0397 for (j = 0; j < ioc->chains_needed_per_io; j++) {
0398 ct = &ioc->chain_lookup[index].chains_per_smid[j];
0399 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
0400 return ct->chain_buffer;
0401 }
0402 }
0403 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
0404 return NULL;
0405 }
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
0418 void *mpi_request, u16 smid)
0419 {
0420 Mpi2SGESimple32_t *sgel, *sgel_next;
0421 u32 sgl_flags, sge_chain_count = 0;
0422 bool is_write = false;
0423 u16 i = 0;
0424 void __iomem *buffer_iomem;
0425 phys_addr_t buffer_iomem_phys;
0426 void __iomem *buff_ptr;
0427 phys_addr_t buff_ptr_phys;
0428 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
0429 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
0430 phys_addr_t dst_addr_phys;
0431 MPI2RequestHeader_t *request_hdr;
0432 struct scsi_cmnd *scmd;
0433 struct scatterlist *sg_scmd = NULL;
0434 int is_scsiio_req = 0;
0435
0436 request_hdr = (MPI2RequestHeader_t *) mpi_request;
0437
0438 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
0439 Mpi25SCSIIORequest_t *scsiio_request =
0440 (Mpi25SCSIIORequest_t *)mpi_request;
0441 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
0442 is_scsiio_req = 1;
0443 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
0444 Mpi2ConfigRequest_t *config_req =
0445 (Mpi2ConfigRequest_t *)mpi_request;
0446 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
0447 } else
0448 return;
0449
0450
0451
0452
0453
0454
0455 if (is_scsiio_req) {
0456
0457 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
0458 if (scmd == NULL) {
0459 ioc_err(ioc, "scmd is NULL\n");
0460 return;
0461 }
0462
0463
0464 sg_scmd = scsi_sglist(scmd);
0465 }
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
0483 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
0484
0485 buff_ptr = buffer_iomem;
0486 buff_ptr_phys = buffer_iomem_phys;
0487 WARN_ON(buff_ptr_phys > U32_MAX);
0488
0489 if (le32_to_cpu(sgel->FlagsLength) &
0490 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
0491 is_write = true;
0492
0493 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
0494
0495 sgl_flags =
0496 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
0497
0498 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
0499 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
0500
0501
0502
0503
0504
0505 sgel_next =
0506 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
0507 le32_to_cpu(sgel->Address));
0508 if (sgel_next == NULL)
0509 return;
0510
0511
0512
0513
0514 dst_chain_addr[sge_chain_count] =
0515 _base_get_chain(ioc,
0516 smid, sge_chain_count);
0517 src_chain_addr[sge_chain_count] =
0518 (void *) sgel_next;
0519 dst_addr_phys = _base_get_chain_phys(ioc,
0520 smid, sge_chain_count);
0521 WARN_ON(dst_addr_phys > U32_MAX);
0522 sgel->Address =
0523 cpu_to_le32(lower_32_bits(dst_addr_phys));
0524 sgel = sgel_next;
0525 sge_chain_count++;
0526 break;
0527 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
0528 if (is_write) {
0529 if (is_scsiio_req) {
0530 _base_clone_to_sys_mem(buff_ptr,
0531 sg_virt(sg_scmd),
0532 (le32_to_cpu(sgel->FlagsLength) &
0533 0x00ffffff));
0534
0535
0536
0537
0538 sgel->Address =
0539 cpu_to_le32((u32)buff_ptr_phys);
0540 } else {
0541 _base_clone_to_sys_mem(buff_ptr,
0542 ioc->config_vaddr,
0543 (le32_to_cpu(sgel->FlagsLength) &
0544 0x00ffffff));
0545 sgel->Address =
0546 cpu_to_le32((u32)buff_ptr_phys);
0547 }
0548 }
0549 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
0550 0x00ffffff);
0551 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
0552 0x00ffffff);
0553 if ((le32_to_cpu(sgel->FlagsLength) &
0554 (MPI2_SGE_FLAGS_END_OF_BUFFER
0555 << MPI2_SGE_FLAGS_SHIFT)))
0556 goto eob_clone_chain;
0557 else {
0558
0559
0560
0561
0562
0563
0564 if (is_scsiio_req) {
0565 sg_scmd = sg_next(sg_scmd);
0566 if (sg_scmd)
0567 sgel++;
0568 else
0569 goto eob_clone_chain;
0570 }
0571 }
0572 break;
0573 }
0574 }
0575
0576 eob_clone_chain:
0577 for (i = 0; i < sge_chain_count; i++) {
0578 if (is_scsiio_req)
0579 _base_clone_to_sys_mem(dst_chain_addr[i],
0580 src_chain_addr[i], ioc->request_sz);
0581 }
0582 }
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 static int mpt3sas_remove_dead_ioc_func(void *arg)
0593 {
0594 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
0595 struct pci_dev *pdev;
0596
0597 if (!ioc)
0598 return -1;
0599
0600 pdev = ioc->pdev;
0601 if (!pdev)
0602 return -1;
0603 pci_stop_and_remove_bus_device_locked(pdev);
0604 return 0;
0605 }
0606
0607
0608
0609
0610
0611
0612
0613 static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
0614 {
0615 Mpi26IoUnitControlRequest_t *mpi_request;
0616 Mpi26IoUnitControlReply_t *mpi_reply;
0617 u16 smid;
0618 ktime_t current_time;
0619 u64 TimeStamp = 0;
0620 u8 issue_reset = 0;
0621
0622 mutex_lock(&ioc->scsih_cmds.mutex);
0623 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
0624 ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
0625 goto out;
0626 }
0627 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
0628 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
0629 if (!smid) {
0630 ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
0631 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
0632 goto out;
0633 }
0634 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
0635 ioc->scsih_cmds.smid = smid;
0636 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
0637 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
0638 mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
0639 mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
0640 current_time = ktime_get_real();
0641 TimeStamp = ktime_to_ms(current_time);
0642 mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
0643 mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
0644 init_completion(&ioc->scsih_cmds.done);
0645 ioc->put_smid_default(ioc, smid);
0646 dinitprintk(ioc, ioc_info(ioc,
0647 "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
0648 TimeStamp));
0649 wait_for_completion_timeout(&ioc->scsih_cmds.done,
0650 MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
0651 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
0652 mpt3sas_check_cmd_timeout(ioc,
0653 ioc->scsih_cmds.status, mpi_request,
0654 sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
0655 goto issue_host_reset;
0656 }
0657 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
0658 mpi_reply = ioc->scsih_cmds.reply;
0659 dinitprintk(ioc, ioc_info(ioc,
0660 "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
0661 le16_to_cpu(mpi_reply->IOCStatus),
0662 le32_to_cpu(mpi_reply->IOCLogInfo)));
0663 }
0664 issue_host_reset:
0665 if (issue_reset)
0666 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
0667 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
0668 out:
0669 mutex_unlock(&ioc->scsih_cmds.mutex);
0670 }
0671
0672
0673
0674
0675
0676
0677
0678 static void
0679 _base_fault_reset_work(struct work_struct *work)
0680 {
0681 struct MPT3SAS_ADAPTER *ioc =
0682 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
0683 unsigned long flags;
0684 u32 doorbell;
0685 int rc;
0686 struct task_struct *p;
0687
0688
0689 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
0690 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
0691 ioc->pci_error_recovery)
0692 goto rearm_timer;
0693 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
0694
0695 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
0696 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
0697 ioc_err(ioc, "SAS host is non-operational !!!!\n");
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707 if (ioc->non_operational_loop++ < 5) {
0708 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
0709 flags);
0710 goto rearm_timer;
0711 }
0712
0713
0714
0715
0716
0717
0718
0719
0720 mpt3sas_base_pause_mq_polling(ioc);
0721 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
0722
0723
0724
0725
0726 ioc->remove_host = 1;
0727
0728 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
0729 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
0730 if (IS_ERR(p))
0731 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
0732 __func__);
0733 else
0734 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
0735 __func__);
0736 return;
0737 }
0738
0739 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
0740 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
0741 ioc->manu_pg11.CoreDumpTOSec :
0742 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
0743
0744 timeout /= (FAULT_POLLING_INTERVAL/1000);
0745
0746 if (ioc->ioc_coredump_loop == 0) {
0747 mpt3sas_print_coredump_info(ioc,
0748 doorbell & MPI2_DOORBELL_DATA_MASK);
0749
0750 spin_lock_irqsave(
0751 &ioc->ioc_reset_in_progress_lock, flags);
0752 ioc->shost_recovery = 1;
0753 spin_unlock_irqrestore(
0754 &ioc->ioc_reset_in_progress_lock, flags);
0755 mpt3sas_base_mask_interrupts(ioc);
0756 mpt3sas_base_pause_mq_polling(ioc);
0757 _base_clear_outstanding_commands(ioc);
0758 }
0759
0760 ioc_info(ioc, "%s: CoreDump loop %d.",
0761 __func__, ioc->ioc_coredump_loop);
0762
0763
0764 if (ioc->ioc_coredump_loop++ < timeout) {
0765 spin_lock_irqsave(
0766 &ioc->ioc_reset_in_progress_lock, flags);
0767 goto rearm_timer;
0768 }
0769 }
0770
0771 if (ioc->ioc_coredump_loop) {
0772 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
0773 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
0774 __func__, ioc->ioc_coredump_loop);
0775 else
0776 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
0777 __func__, ioc->ioc_coredump_loop);
0778 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
0779 }
0780 ioc->non_operational_loop = 0;
0781 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
0782 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
0783 ioc_warn(ioc, "%s: hard reset: %s\n",
0784 __func__, rc == 0 ? "success" : "failed");
0785 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
0786 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
0787 mpt3sas_print_fault_code(ioc, doorbell &
0788 MPI2_DOORBELL_DATA_MASK);
0789 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
0790 MPI2_IOC_STATE_COREDUMP)
0791 mpt3sas_print_coredump_info(ioc, doorbell &
0792 MPI2_DOORBELL_DATA_MASK);
0793 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
0794 MPI2_IOC_STATE_OPERATIONAL)
0795 return;
0796 }
0797 ioc->ioc_coredump_loop = 0;
0798 if (ioc->time_sync_interval &&
0799 ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
0800 ioc->timestamp_update_count = 0;
0801 _base_sync_drv_fw_timestamp(ioc);
0802 }
0803 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
0804 rearm_timer:
0805 if (ioc->fault_reset_work_q)
0806 queue_delayed_work(ioc->fault_reset_work_q,
0807 &ioc->fault_reset_work,
0808 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
0809 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
0810 }
0811
0812
0813
0814
0815
0816
0817
0818 void
0819 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
0820 {
0821 unsigned long flags;
0822
0823 if (ioc->fault_reset_work_q)
0824 return;
0825
0826 ioc->timestamp_update_count = 0;
0827
0828
0829 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
0830 snprintf(ioc->fault_reset_work_q_name,
0831 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
0832 ioc->driver_name, ioc->id);
0833 ioc->fault_reset_work_q =
0834 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
0835 if (!ioc->fault_reset_work_q) {
0836 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
0837 return;
0838 }
0839 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
0840 if (ioc->fault_reset_work_q)
0841 queue_delayed_work(ioc->fault_reset_work_q,
0842 &ioc->fault_reset_work,
0843 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
0844 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
0845 }
0846
0847
0848
0849
0850
0851
0852
0853 void
0854 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
0855 {
0856 unsigned long flags;
0857 struct workqueue_struct *wq;
0858
0859 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
0860 wq = ioc->fault_reset_work_q;
0861 ioc->fault_reset_work_q = NULL;
0862 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
0863 if (wq) {
0864 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
0865 flush_workqueue(wq);
0866 destroy_workqueue(wq);
0867 }
0868 }
0869
0870
0871
0872
0873
0874
0875 void
0876 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
0877 {
0878 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
0879 }
0880
0881
0882
0883
0884
0885
0886
0887
0888 void
0889 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
0890 {
0891 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 int
0903 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
0904 const char *caller)
0905 {
0906 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
0907 ioc->manu_pg11.CoreDumpTOSec :
0908 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
0909
0910 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
0911 timeout);
0912
0913 if (ioc_state)
0914 ioc_err(ioc,
0915 "%s: CoreDump timed out. (ioc_state=0x%x)\n",
0916 caller, ioc_state);
0917 else
0918 ioc_info(ioc,
0919 "%s: CoreDump completed. (ioc_state=0x%x)\n",
0920 caller, ioc_state);
0921
0922 return ioc_state;
0923 }
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934 void
0935 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
0936 {
0937 u32 doorbell;
0938
0939 if (!ioc->fwfault_debug)
0940 return;
0941
0942 dump_stack();
0943
0944 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
0945 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
0946 mpt3sas_print_fault_code(ioc, doorbell &
0947 MPI2_DOORBELL_DATA_MASK);
0948 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
0949 MPI2_IOC_STATE_COREDUMP) {
0950 mpt3sas_print_coredump_info(ioc, doorbell &
0951 MPI2_DOORBELL_DATA_MASK);
0952 } else {
0953 writel(0xC0FFEE00, &ioc->chip->Doorbell);
0954 ioc_err(ioc, "Firmware is halted due to command timeout\n");
0955 }
0956
0957 if (ioc->fwfault_debug == 2)
0958 for (;;)
0959 ;
0960 else
0961 panic("panic in %s\n", __func__);
0962 }
0963
0964
0965
0966
0967
0968
0969
0970 static void
0971 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
0972 MPI2RequestHeader_t *request_hdr)
0973 {
0974 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
0975 MPI2_IOCSTATUS_MASK;
0976 char *desc = NULL;
0977 u16 frame_sz;
0978 char *func_str = NULL;
0979
0980
0981 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
0982 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
0983 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
0984 return;
0985
0986 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
0987 return;
0988
0989
0990
0991
0992
0993 if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
0994 Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
0995
0996 if ((rqst->ExtPageType ==
0997 MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
0998 !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
0999 return;
1000 }
1001 }
1002
1003 switch (ioc_status) {
1004
1005
1006
1007
1008
1009 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1010 desc = "invalid function";
1011 break;
1012 case MPI2_IOCSTATUS_BUSY:
1013 desc = "busy";
1014 break;
1015 case MPI2_IOCSTATUS_INVALID_SGL:
1016 desc = "invalid sgl";
1017 break;
1018 case MPI2_IOCSTATUS_INTERNAL_ERROR:
1019 desc = "internal error";
1020 break;
1021 case MPI2_IOCSTATUS_INVALID_VPID:
1022 desc = "invalid vpid";
1023 break;
1024 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
1025 desc = "insufficient resources";
1026 break;
1027 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
1028 desc = "insufficient power";
1029 break;
1030 case MPI2_IOCSTATUS_INVALID_FIELD:
1031 desc = "invalid field";
1032 break;
1033 case MPI2_IOCSTATUS_INVALID_STATE:
1034 desc = "invalid state";
1035 break;
1036 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
1037 desc = "op state not supported";
1038 break;
1039
1040
1041
1042
1043
1044 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
1045 desc = "config invalid action";
1046 break;
1047 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
1048 desc = "config invalid type";
1049 break;
1050 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
1051 desc = "config invalid page";
1052 break;
1053 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
1054 desc = "config invalid data";
1055 break;
1056 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
1057 desc = "config no defaults";
1058 break;
1059 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
1060 desc = "config can't commit";
1061 break;
1062
1063
1064
1065
1066
1067 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1068 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1069 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1070 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1071 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1072 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1073 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1074 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1075 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1076 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1077 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1078 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1079 break;
1080
1081
1082
1083
1084
1085 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1086 desc = "eedp guard error";
1087 break;
1088 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1089 desc = "eedp ref tag error";
1090 break;
1091 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1092 desc = "eedp app tag error";
1093 break;
1094
1095
1096
1097
1098
1099 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1100 desc = "target invalid io index";
1101 break;
1102 case MPI2_IOCSTATUS_TARGET_ABORTED:
1103 desc = "target aborted";
1104 break;
1105 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1106 desc = "target no conn retryable";
1107 break;
1108 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1109 desc = "target no connection";
1110 break;
1111 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1112 desc = "target xfer count mismatch";
1113 break;
1114 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1115 desc = "target data offset error";
1116 break;
1117 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1118 desc = "target too much write data";
1119 break;
1120 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1121 desc = "target iu too short";
1122 break;
1123 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1124 desc = "target ack nak timeout";
1125 break;
1126 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1127 desc = "target nak received";
1128 break;
1129
1130
1131
1132
1133
1134 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1135 desc = "smp request failed";
1136 break;
1137 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1138 desc = "smp data overrun";
1139 break;
1140
1141
1142
1143
1144
1145 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1146 desc = "diagnostic released";
1147 break;
1148 default:
1149 break;
1150 }
1151
1152 if (!desc)
1153 return;
1154
1155 switch (request_hdr->Function) {
1156 case MPI2_FUNCTION_CONFIG:
1157 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1158 func_str = "config_page";
1159 break;
1160 case MPI2_FUNCTION_SCSI_TASK_MGMT:
1161 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1162 func_str = "task_mgmt";
1163 break;
1164 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1165 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1166 func_str = "sas_iounit_ctl";
1167 break;
1168 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1169 frame_sz = sizeof(Mpi2SepRequest_t);
1170 func_str = "enclosure";
1171 break;
1172 case MPI2_FUNCTION_IOC_INIT:
1173 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1174 func_str = "ioc_init";
1175 break;
1176 case MPI2_FUNCTION_PORT_ENABLE:
1177 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1178 func_str = "port_enable";
1179 break;
1180 case MPI2_FUNCTION_SMP_PASSTHROUGH:
1181 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1182 func_str = "smp_passthru";
1183 break;
1184 case MPI2_FUNCTION_NVME_ENCAPSULATED:
1185 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1186 ioc->sge_size;
1187 func_str = "nvme_encapsulated";
1188 break;
1189 default:
1190 frame_sz = 32;
1191 func_str = "unknown";
1192 break;
1193 }
1194
1195 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1196 desc, ioc_status, request_hdr, func_str);
1197
1198 _debug_dump_mf(request_hdr, frame_sz/4);
1199 }
1200
1201
1202
1203
1204
1205
1206 static void
1207 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1208 Mpi2EventNotificationReply_t *mpi_reply)
1209 {
1210 char *desc = NULL;
1211 u16 event;
1212
1213 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1214 return;
1215
1216 event = le16_to_cpu(mpi_reply->Event);
1217
1218 switch (event) {
1219 case MPI2_EVENT_LOG_DATA:
1220 desc = "Log Data";
1221 break;
1222 case MPI2_EVENT_STATE_CHANGE:
1223 desc = "Status Change";
1224 break;
1225 case MPI2_EVENT_HARD_RESET_RECEIVED:
1226 desc = "Hard Reset Received";
1227 break;
1228 case MPI2_EVENT_EVENT_CHANGE:
1229 desc = "Event Change";
1230 break;
1231 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1232 desc = "Device Status Change";
1233 break;
1234 case MPI2_EVENT_IR_OPERATION_STATUS:
1235 if (!ioc->hide_ir_msg)
1236 desc = "IR Operation Status";
1237 break;
1238 case MPI2_EVENT_SAS_DISCOVERY:
1239 {
1240 Mpi2EventDataSasDiscovery_t *event_data =
1241 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1242 ioc_info(ioc, "Discovery: (%s)",
1243 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1244 "start" : "stop");
1245 if (event_data->DiscoveryStatus)
1246 pr_cont(" discovery_status(0x%08x)",
1247 le32_to_cpu(event_data->DiscoveryStatus));
1248 pr_cont("\n");
1249 return;
1250 }
1251 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1252 desc = "SAS Broadcast Primitive";
1253 break;
1254 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1255 desc = "SAS Init Device Status Change";
1256 break;
1257 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1258 desc = "SAS Init Table Overflow";
1259 break;
1260 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1261 desc = "SAS Topology Change List";
1262 break;
1263 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1264 desc = "SAS Enclosure Device Status Change";
1265 break;
1266 case MPI2_EVENT_IR_VOLUME:
1267 if (!ioc->hide_ir_msg)
1268 desc = "IR Volume";
1269 break;
1270 case MPI2_EVENT_IR_PHYSICAL_DISK:
1271 if (!ioc->hide_ir_msg)
1272 desc = "IR Physical Disk";
1273 break;
1274 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1275 if (!ioc->hide_ir_msg)
1276 desc = "IR Configuration Change List";
1277 break;
1278 case MPI2_EVENT_LOG_ENTRY_ADDED:
1279 if (!ioc->hide_ir_msg)
1280 desc = "Log Entry Added";
1281 break;
1282 case MPI2_EVENT_TEMP_THRESHOLD:
1283 desc = "Temperature Threshold";
1284 break;
1285 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1286 desc = "Cable Event";
1287 break;
1288 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1289 desc = "SAS Device Discovery Error";
1290 break;
1291 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1292 desc = "PCIE Device Status Change";
1293 break;
1294 case MPI2_EVENT_PCIE_ENUMERATION:
1295 {
1296 Mpi26EventDataPCIeEnumeration_t *event_data =
1297 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1298 ioc_info(ioc, "PCIE Enumeration: (%s)",
1299 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1300 "start" : "stop");
1301 if (event_data->EnumerationStatus)
1302 pr_cont("enumeration_status(0x%08x)",
1303 le32_to_cpu(event_data->EnumerationStatus));
1304 pr_cont("\n");
1305 return;
1306 }
1307 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1308 desc = "PCIE Topology Change List";
1309 break;
1310 }
1311
1312 if (!desc)
1313 return;
1314
1315 ioc_info(ioc, "%s\n", desc);
1316 }
1317
1318
1319
1320
1321
1322
1323 static void
1324 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
1325 {
1326 union loginfo_type {
1327 u32 loginfo;
1328 struct {
1329 u32 subcode:16;
1330 u32 code:8;
1331 u32 originator:4;
1332 u32 bus_type:4;
1333 } dw;
1334 };
1335 union loginfo_type sas_loginfo;
1336 char *originator_str = NULL;
1337
1338 sas_loginfo.loginfo = log_info;
1339 if (sas_loginfo.dw.bus_type != 3 )
1340 return;
1341
1342
1343 if (log_info == 0x31170000)
1344 return;
1345
1346
1347 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1348 0x31140000 || log_info == 0x31130000))
1349 return;
1350
1351 switch (sas_loginfo.dw.originator) {
1352 case 0:
1353 originator_str = "IOP";
1354 break;
1355 case 1:
1356 originator_str = "PL";
1357 break;
1358 case 2:
1359 if (!ioc->hide_ir_msg)
1360 originator_str = "IR";
1361 else
1362 originator_str = "WarpDrive";
1363 break;
1364 }
1365
1366 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1367 log_info,
1368 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378 static void
1379 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1380 u32 reply)
1381 {
1382 MPI2DefaultReply_t *mpi_reply;
1383 u16 ioc_status;
1384 u32 loginfo = 0;
1385
1386 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1387 if (unlikely(!mpi_reply)) {
1388 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1389 __FILE__, __LINE__, __func__);
1390 return;
1391 }
1392 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1393
1394 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1395 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1396 _base_sas_ioc_info(ioc, mpi_reply,
1397 mpt3sas_base_get_msg_frame(ioc, smid));
1398 }
1399
1400 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1401 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1402 _base_sas_log_info(ioc, loginfo);
1403 }
1404
1405 if (ioc_status || loginfo) {
1406 ioc_status &= MPI2_IOCSTATUS_MASK;
1407 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1408 }
1409 }
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422 u8
1423 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1424 u32 reply)
1425 {
1426 MPI2DefaultReply_t *mpi_reply;
1427
1428 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1429 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1430 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1431
1432 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1433 return 1;
1434
1435 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1436 if (mpi_reply) {
1437 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1438 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1439 }
1440 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1441
1442 complete(&ioc->base_cmds.done);
1443 return 1;
1444 }
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456 static u8
1457 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1458 {
1459 Mpi2EventNotificationReply_t *mpi_reply;
1460 Mpi2EventAckRequest_t *ack_request;
1461 u16 smid;
1462 struct _event_ack_list *delayed_event_ack;
1463
1464 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1465 if (!mpi_reply)
1466 return 1;
1467 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1468 return 1;
1469
1470 _base_display_event_data(ioc, mpi_reply);
1471
1472 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1473 goto out;
1474 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1475 if (!smid) {
1476 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1477 GFP_ATOMIC);
1478 if (!delayed_event_ack)
1479 goto out;
1480 INIT_LIST_HEAD(&delayed_event_ack->list);
1481 delayed_event_ack->Event = mpi_reply->Event;
1482 delayed_event_ack->EventContext = mpi_reply->EventContext;
1483 list_add_tail(&delayed_event_ack->list,
1484 &ioc->delayed_event_ack_list);
1485 dewtprintk(ioc,
1486 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1487 le16_to_cpu(mpi_reply->Event)));
1488 goto out;
1489 }
1490
1491 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1492 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1493 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1494 ack_request->Event = mpi_reply->Event;
1495 ack_request->EventContext = mpi_reply->EventContext;
1496 ack_request->VF_ID = 0;
1497 ack_request->VP_ID = 0;
1498 ioc->put_smid_default(ioc, smid);
1499
1500 out:
1501
1502
1503 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1504
1505
1506 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1507
1508 return 1;
1509 }
1510
1511 static struct scsiio_tracker *
1512 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1513 {
1514 struct scsi_cmnd *cmd;
1515
1516 if (WARN_ON(!smid) ||
1517 WARN_ON(smid >= ioc->hi_priority_smid))
1518 return NULL;
1519
1520 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1521 if (cmd)
1522 return scsi_cmd_priv(cmd);
1523
1524 return NULL;
1525 }
1526
1527
1528
1529
1530
1531
1532
1533
1534 static u8
1535 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1536 {
1537 int i;
1538 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1539 u8 cb_idx = 0xFF;
1540
1541 if (smid < ioc->hi_priority_smid) {
1542 struct scsiio_tracker *st;
1543
1544 if (smid < ctl_smid) {
1545 st = _get_st_from_smid(ioc, smid);
1546 if (st)
1547 cb_idx = st->cb_idx;
1548 } else if (smid == ctl_smid)
1549 cb_idx = ioc->ctl_cb_idx;
1550 } else if (smid < ioc->internal_smid) {
1551 i = smid - ioc->hi_priority_smid;
1552 cb_idx = ioc->hpr_lookup[i].cb_idx;
1553 } else if (smid <= ioc->hba_queue_depth) {
1554 i = smid - ioc->internal_smid;
1555 cb_idx = ioc->internal_lookup[i].cb_idx;
1556 }
1557 return cb_idx;
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571 void
1572 mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1573 {
1574 int iopoll_q_count =
1575 ioc->reply_queue_count - ioc->iopoll_q_start_index;
1576 int qid;
1577
1578 for (qid = 0; qid < iopoll_q_count; qid++)
1579 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
1580
1581
1582
1583
1584 for (qid = 0; qid < iopoll_q_count; qid++) {
1585 while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
1586 cpu_relax();
1587 udelay(500);
1588 }
1589 }
1590 }
1591
1592
1593
1594
1595
1596
1597
1598 void
1599 mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1600 {
1601 int iopoll_q_count =
1602 ioc->reply_queue_count - ioc->iopoll_q_start_index;
1603 int qid;
1604
1605 for (qid = 0; qid < iopoll_q_count; qid++)
1606 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
1607 }
1608
1609
1610
1611
1612
1613
1614
1615 void
1616 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1617 {
1618 u32 him_register;
1619
1620 ioc->mask_interrupts = 1;
1621 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1622 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1623 writel(him_register, &ioc->chip->HostInterruptMask);
1624 ioc->base_readl(&ioc->chip->HostInterruptMask);
1625 }
1626
1627
1628
1629
1630
1631
1632
1633 void
1634 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1635 {
1636 u32 him_register;
1637
1638 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1639 him_register &= ~MPI2_HIM_RIM;
1640 writel(him_register, &ioc->chip->HostInterruptMask);
1641 ioc->mask_interrupts = 0;
1642 }
1643
1644 union reply_descriptor {
1645 u64 word;
1646 struct {
1647 u32 low;
1648 u32 high;
1649 } u;
1650 };
1651
1652 static u32 base_mod64(u64 dividend, u32 divisor)
1653 {
1654 u32 remainder;
1655
1656 if (!divisor)
1657 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1658 remainder = do_div(dividend, divisor);
1659 return remainder;
1660 }
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670 static int
1671 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1672 {
1673 union reply_descriptor rd;
1674 u64 completed_cmds;
1675 u8 request_descript_type;
1676 u16 smid;
1677 u8 cb_idx;
1678 u32 reply;
1679 u8 msix_index = reply_q->msix_index;
1680 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1681 Mpi2ReplyDescriptorsUnion_t *rpf;
1682 u8 rc;
1683
1684 completed_cmds = 0;
1685 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1686 return completed_cmds;
1687
1688 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1689 request_descript_type = rpf->Default.ReplyFlags
1690 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1691 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1692 atomic_dec(&reply_q->busy);
1693 return completed_cmds;
1694 }
1695
1696 cb_idx = 0xFF;
1697 do {
1698 rd.word = le64_to_cpu(rpf->Words);
1699 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1700 goto out;
1701 reply = 0;
1702 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1703 if (request_descript_type ==
1704 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1705 request_descript_type ==
1706 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1707 request_descript_type ==
1708 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1709 cb_idx = _base_get_cb_idx(ioc, smid);
1710 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1711 (likely(mpt_callbacks[cb_idx] != NULL))) {
1712 rc = mpt_callbacks[cb_idx](ioc, smid,
1713 msix_index, 0);
1714 if (rc)
1715 mpt3sas_base_free_smid(ioc, smid);
1716 }
1717 } else if (request_descript_type ==
1718 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1719 reply = le32_to_cpu(
1720 rpf->AddressReply.ReplyFrameAddress);
1721 if (reply > ioc->reply_dma_max_address ||
1722 reply < ioc->reply_dma_min_address)
1723 reply = 0;
1724 if (smid) {
1725 cb_idx = _base_get_cb_idx(ioc, smid);
1726 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1727 (likely(mpt_callbacks[cb_idx] != NULL))) {
1728 rc = mpt_callbacks[cb_idx](ioc, smid,
1729 msix_index, reply);
1730 if (reply)
1731 _base_display_reply_info(ioc,
1732 smid, msix_index, reply);
1733 if (rc)
1734 mpt3sas_base_free_smid(ioc,
1735 smid);
1736 }
1737 } else {
1738 _base_async_event(ioc, msix_index, reply);
1739 }
1740
1741
1742 if (reply) {
1743 ioc->reply_free_host_index =
1744 (ioc->reply_free_host_index ==
1745 (ioc->reply_free_queue_depth - 1)) ?
1746 0 : ioc->reply_free_host_index + 1;
1747 ioc->reply_free[ioc->reply_free_host_index] =
1748 cpu_to_le32(reply);
1749 if (ioc->is_mcpu_endpoint)
1750 _base_clone_reply_to_sys_mem(ioc,
1751 reply,
1752 ioc->reply_free_host_index);
1753 writel(ioc->reply_free_host_index,
1754 &ioc->chip->ReplyFreeHostIndex);
1755 }
1756 }
1757
1758 rpf->Words = cpu_to_le64(ULLONG_MAX);
1759 reply_q->reply_post_host_index =
1760 (reply_q->reply_post_host_index ==
1761 (ioc->reply_post_queue_depth - 1)) ? 0 :
1762 reply_q->reply_post_host_index + 1;
1763 request_descript_type =
1764 reply_q->reply_post_free[reply_q->reply_post_host_index].
1765 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1766 completed_cmds++;
1767
1768
1769
1770
1771
1772 if (completed_cmds >= ioc->thresh_hold) {
1773 if (ioc->combined_reply_queue) {
1774 writel(reply_q->reply_post_host_index |
1775 ((msix_index & 7) <<
1776 MPI2_RPHI_MSIX_INDEX_SHIFT),
1777 ioc->replyPostRegisterIndex[msix_index/8]);
1778 } else {
1779 writel(reply_q->reply_post_host_index |
1780 (msix_index <<
1781 MPI2_RPHI_MSIX_INDEX_SHIFT),
1782 &ioc->chip->ReplyPostHostIndex);
1783 }
1784 if (!reply_q->is_iouring_poll_q &&
1785 !reply_q->irq_poll_scheduled) {
1786 reply_q->irq_poll_scheduled = true;
1787 irq_poll_sched(&reply_q->irqpoll);
1788 }
1789 atomic_dec(&reply_q->busy);
1790 return completed_cmds;
1791 }
1792 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1793 goto out;
1794 if (!reply_q->reply_post_host_index)
1795 rpf = reply_q->reply_post_free;
1796 else
1797 rpf++;
1798 } while (1);
1799
1800 out:
1801
1802 if (!completed_cmds) {
1803 atomic_dec(&reply_q->busy);
1804 return completed_cmds;
1805 }
1806
1807 if (ioc->is_warpdrive) {
1808 writel(reply_q->reply_post_host_index,
1809 ioc->reply_post_host_index[msix_index]);
1810 atomic_dec(&reply_q->busy);
1811 return completed_cmds;
1812 }
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 if (ioc->combined_reply_queue)
1830 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1831 MPI2_RPHI_MSIX_INDEX_SHIFT),
1832 ioc->replyPostRegisterIndex[msix_index/8]);
1833 else
1834 writel(reply_q->reply_post_host_index | (msix_index <<
1835 MPI2_RPHI_MSIX_INDEX_SHIFT),
1836 &ioc->chip->ReplyPostHostIndex);
1837 atomic_dec(&reply_q->busy);
1838 return completed_cmds;
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848 int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
1849 {
1850 struct MPT3SAS_ADAPTER *ioc =
1851 (struct MPT3SAS_ADAPTER *)shost->hostdata;
1852 struct adapter_reply_queue *reply_q;
1853 int num_entries = 0;
1854 int qid = queue_num - ioc->iopoll_q_start_index;
1855
1856 if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
1857 !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
1858 return 0;
1859
1860 reply_q = ioc->io_uring_poll_queues[qid].reply_q;
1861
1862 num_entries = _base_process_reply_queue(reply_q);
1863 atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
1864
1865 return num_entries;
1866 }
1867
1868
1869
1870
1871
1872
1873
1874
1875 static irqreturn_t
1876 _base_interrupt(int irq, void *bus_id)
1877 {
1878 struct adapter_reply_queue *reply_q = bus_id;
1879 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1880
1881 if (ioc->mask_interrupts)
1882 return IRQ_NONE;
1883 if (reply_q->irq_poll_scheduled)
1884 return IRQ_HANDLED;
1885 return ((_base_process_reply_queue(reply_q) > 0) ?
1886 IRQ_HANDLED : IRQ_NONE);
1887 }
1888
1889
1890
1891
1892
1893
1894
1895
1896 static int
1897 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1898 {
1899 struct adapter_reply_queue *reply_q;
1900 int num_entries = 0;
1901
1902 reply_q = container_of(irqpoll, struct adapter_reply_queue,
1903 irqpoll);
1904 if (reply_q->irq_line_enable) {
1905 disable_irq_nosync(reply_q->os_irq);
1906 reply_q->irq_line_enable = false;
1907 }
1908 num_entries = _base_process_reply_queue(reply_q);
1909 if (num_entries < budget) {
1910 irq_poll_complete(irqpoll);
1911 reply_q->irq_poll_scheduled = false;
1912 reply_q->irq_line_enable = true;
1913 enable_irq(reply_q->os_irq);
1914
1915
1916
1917
1918
1919
1920 _base_process_reply_queue(reply_q);
1921 }
1922
1923 return num_entries;
1924 }
1925
1926
1927
1928
1929
1930
1931
1932 static void
1933 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1934 {
1935 struct adapter_reply_queue *reply_q, *next;
1936
1937 if (list_empty(&ioc->reply_queue_list))
1938 return;
1939
1940 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1941 if (reply_q->is_iouring_poll_q)
1942 continue;
1943 irq_poll_init(&reply_q->irqpoll,
1944 ioc->hba_queue_depth/4, _base_irqpoll);
1945 reply_q->irq_poll_scheduled = false;
1946 reply_q->irq_line_enable = true;
1947 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1948 reply_q->msix_index);
1949 }
1950 }
1951
1952
1953
1954
1955
1956
1957
1958 static inline int
1959 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1960 {
1961 return (ioc->facts.IOCCapabilities &
1962 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1963 }
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 void
1975 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1976 {
1977 struct adapter_reply_queue *reply_q;
1978
1979
1980
1981
1982 if (!_base_is_controller_msix_enabled(ioc))
1983 return;
1984
1985 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1986 if (ioc->shost_recovery || ioc->remove_host ||
1987 ioc->pci_error_recovery)
1988 return;
1989
1990 if (reply_q->msix_index == 0)
1991 continue;
1992
1993 if (reply_q->is_iouring_poll_q) {
1994 _base_process_reply_queue(reply_q);
1995 continue;
1996 }
1997
1998 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1999 if (reply_q->irq_poll_scheduled) {
2000
2001
2002
2003 irq_poll_disable(&reply_q->irqpoll);
2004 irq_poll_enable(&reply_q->irqpoll);
2005
2006
2007
2008 if (reply_q->irq_poll_scheduled) {
2009 reply_q->irq_poll_scheduled = false;
2010 reply_q->irq_line_enable = true;
2011 enable_irq(reply_q->os_irq);
2012 }
2013 }
2014
2015 if (poll)
2016 _base_process_reply_queue(reply_q);
2017 }
2018 }
2019
2020
2021
2022
2023
2024 void
2025 mpt3sas_base_release_callback_handler(u8 cb_idx)
2026 {
2027 mpt_callbacks[cb_idx] = NULL;
2028 }
2029
2030
2031
2032
2033
2034
2035
2036 u8
2037 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
2038 {
2039 u8 cb_idx;
2040
2041 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
2042 if (mpt_callbacks[cb_idx] == NULL)
2043 break;
2044
2045 mpt_callbacks[cb_idx] = cb_func;
2046 return cb_idx;
2047 }
2048
2049
2050
2051
2052 void
2053 mpt3sas_base_initialize_callback_handler(void)
2054 {
2055 u8 cb_idx;
2056
2057 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
2058 mpt3sas_base_release_callback_handler(cb_idx);
2059 }
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071 static void
2072 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2073 {
2074 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
2075 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
2076 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
2077 MPI2_SGE_FLAGS_SHIFT);
2078 ioc->base_add_sg_single(paddr, flags_length, -1);
2079 }
2080
2081
2082
2083
2084
2085
2086
2087 static void
2088 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2089 {
2090 Mpi2SGESimple32_t *sgel = paddr;
2091
2092 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
2093 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2094 sgel->FlagsLength = cpu_to_le32(flags_length);
2095 sgel->Address = cpu_to_le32(dma_addr);
2096 }
2097
2098
2099
2100
2101
2102
2103
2104
2105 static void
2106 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2107 {
2108 Mpi2SGESimple64_t *sgel = paddr;
2109
2110 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2111 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2112 sgel->FlagsLength = cpu_to_le32(flags_length);
2113 sgel->Address = cpu_to_le64(dma_addr);
2114 }
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 static struct chain_tracker *
2125 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
2126 struct scsi_cmnd *scmd)
2127 {
2128 struct chain_tracker *chain_req;
2129 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2130 u16 smid = st->smid;
2131 u8 chain_offset =
2132 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
2133
2134 if (chain_offset == ioc->chains_needed_per_io)
2135 return NULL;
2136
2137 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
2138 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
2139 return chain_req;
2140 }
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152 static void
2153 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
2154 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2155 size_t data_in_sz)
2156 {
2157 u32 sgl_flags;
2158
2159 if (!data_out_sz && !data_in_sz) {
2160 _base_build_zero_len_sge(ioc, psge);
2161 return;
2162 }
2163
2164 if (data_out_sz && data_in_sz) {
2165
2166 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2167 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2168 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2169 ioc->base_add_sg_single(psge, sgl_flags |
2170 data_out_sz, data_out_dma);
2171
2172
2173 psge += ioc->sge_size;
2174
2175
2176 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2177 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2178 MPI2_SGE_FLAGS_END_OF_LIST);
2179 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2180 ioc->base_add_sg_single(psge, sgl_flags |
2181 data_in_sz, data_in_dma);
2182 } else if (data_out_sz) {
2183 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2184 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2185 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2186 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2187 ioc->base_add_sg_single(psge, sgl_flags |
2188 data_out_sz, data_out_dma);
2189 } else if (data_in_sz) {
2190 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2191 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2192 MPI2_SGE_FLAGS_END_OF_LIST);
2193 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2194 ioc->base_add_sg_single(psge, sgl_flags |
2195 data_in_sz, data_in_dma);
2196 }
2197 }
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 static void
2257 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2258 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2259 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2260 size_t data_in_sz)
2261 {
2262 int prp_size = NVME_PRP_SIZE;
2263 __le64 *prp_entry, *prp1_entry, *prp2_entry;
2264 __le64 *prp_page;
2265 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2266 u32 offset, entry_len;
2267 u32 page_mask_result, page_mask;
2268 size_t length;
2269 struct mpt3sas_nvme_cmd *nvme_cmd =
2270 (void *)nvme_encap_request->NVMe_Command;
2271
2272
2273
2274
2275
2276 if (!data_in_sz && !data_out_sz)
2277 return;
2278 prp1_entry = &nvme_cmd->prp1;
2279 prp2_entry = &nvme_cmd->prp2;
2280 prp_entry = prp1_entry;
2281
2282
2283
2284
2285 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2286 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2287
2288
2289
2290
2291
2292 page_mask = ioc->page_size - 1;
2293 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2294 if (!page_mask_result) {
2295
2296 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2297 prp_page_dma = prp_page_dma + prp_size;
2298 }
2299
2300
2301
2302
2303
2304 prp_entry_dma = prp_page_dma;
2305
2306
2307 if (data_in_sz) {
2308 dma_addr = data_in_dma;
2309 length = data_in_sz;
2310 } else {
2311 dma_addr = data_out_dma;
2312 length = data_out_sz;
2313 }
2314
2315
2316 while (length) {
2317
2318
2319
2320
2321 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2322 if (!page_mask_result) {
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335 prp_entry_dma++;
2336 *prp_entry = cpu_to_le64(prp_entry_dma);
2337 prp_entry++;
2338 }
2339
2340
2341 offset = dma_addr & page_mask;
2342 entry_len = ioc->page_size - offset;
2343
2344 if (prp_entry == prp1_entry) {
2345
2346
2347
2348
2349 *prp1_entry = cpu_to_le64(dma_addr);
2350
2351
2352
2353
2354
2355 prp_entry = prp2_entry;
2356 } else if (prp_entry == prp2_entry) {
2357
2358
2359
2360
2361
2362 if (length > ioc->page_size) {
2363
2364
2365
2366
2367
2368
2369 *prp2_entry = cpu_to_le64(prp_entry_dma);
2370
2371
2372
2373
2374
2375 prp_entry = prp_page;
2376 } else {
2377
2378
2379
2380
2381 *prp2_entry = cpu_to_le64(dma_addr);
2382 }
2383 } else {
2384
2385
2386
2387
2388
2389
2390
2391 *prp_entry = cpu_to_le64(dma_addr);
2392 prp_entry++;
2393 prp_entry_dma++;
2394 }
2395
2396
2397
2398
2399
2400 dma_addr += entry_len;
2401
2402
2403 if (entry_len > length)
2404 length = 0;
2405 else
2406 length -= entry_len;
2407 }
2408 }
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423 static void
2424 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2425 struct scsi_cmnd *scmd,
2426 Mpi25SCSIIORequest_t *mpi_request,
2427 u16 smid, int sge_count)
2428 {
2429 int sge_len, num_prp_in_chain = 0;
2430 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2431 __le64 *curr_buff;
2432 dma_addr_t msg_dma, sge_addr, offset;
2433 u32 page_mask, page_mask_result;
2434 struct scatterlist *sg_scmd;
2435 u32 first_prp_len;
2436 int data_len = scsi_bufflen(scmd);
2437 u32 nvme_pg_size;
2438
2439 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452 page_mask = nvme_pg_size - 1;
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2465
2466
2467
2468
2469 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2470 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2471
2472
2473
2474
2475
2476
2477
2478 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2479 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2480
2481 main_chain_element->Address = cpu_to_le64(msg_dma);
2482 main_chain_element->NextChainOffset = 0;
2483 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2484 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2485 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2486
2487
2488 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2489 sg_scmd = scsi_sglist(scmd);
2490 sge_addr = sg_dma_address(sg_scmd);
2491 sge_len = sg_dma_len(sg_scmd);
2492
2493 offset = sge_addr & page_mask;
2494 first_prp_len = nvme_pg_size - offset;
2495
2496 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2497 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2498
2499 data_len -= first_prp_len;
2500
2501 if (sge_len > first_prp_len) {
2502 sge_addr += first_prp_len;
2503 sge_len -= first_prp_len;
2504 } else if (data_len && (sge_len == first_prp_len)) {
2505 sg_scmd = sg_next(sg_scmd);
2506 sge_addr = sg_dma_address(sg_scmd);
2507 sge_len = sg_dma_len(sg_scmd);
2508 }
2509
2510 for (;;) {
2511 offset = sge_addr & page_mask;
2512
2513
2514 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2515 if (unlikely(!page_mask_result)) {
2516 scmd_printk(KERN_NOTICE,
2517 scmd, "page boundary curr_buff: 0x%p\n",
2518 curr_buff);
2519 msg_dma += 8;
2520 *curr_buff = cpu_to_le64(msg_dma);
2521 curr_buff++;
2522 num_prp_in_chain++;
2523 }
2524
2525 *curr_buff = cpu_to_le64(sge_addr);
2526 curr_buff++;
2527 msg_dma += 8;
2528 num_prp_in_chain++;
2529
2530 sge_addr += nvme_pg_size;
2531 sge_len -= nvme_pg_size;
2532 data_len -= nvme_pg_size;
2533
2534 if (data_len <= 0)
2535 break;
2536
2537 if (sge_len > 0)
2538 continue;
2539
2540 sg_scmd = sg_next(sg_scmd);
2541 sge_addr = sg_dma_address(sg_scmd);
2542 sge_len = sg_dma_len(sg_scmd);
2543 }
2544
2545 main_chain_element->Length =
2546 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2547 return;
2548 }
2549
2550 static bool
2551 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2552 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2553 {
2554 u32 data_length = 0;
2555 bool build_prp = true;
2556
2557 data_length = scsi_bufflen(scmd);
2558 if (pcie_device &&
2559 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2560 build_prp = false;
2561 return build_prp;
2562 }
2563
2564
2565
2566
2567 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2568 build_prp = false;
2569
2570 return build_prp;
2571 }
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588 static int
2589 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2590 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2591 struct _pcie_device *pcie_device)
2592 {
2593 int sges_left;
2594
2595
2596 sges_left = scsi_dma_map(scmd);
2597 if (sges_left < 0)
2598 return 1;
2599
2600
2601 if (!base_is_prp_possible(ioc, pcie_device,
2602 scmd, sges_left)) {
2603
2604 goto out;
2605 }
2606
2607
2608
2609
2610 base_make_prp_nvme(ioc, scmd, mpi_request,
2611 smid, sges_left);
2612
2613 return 0;
2614 out:
2615 scsi_dma_unmap(scmd);
2616 return 1;
2617 }
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627 static void
2628 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2629 dma_addr_t dma_addr)
2630 {
2631 Mpi25IeeeSgeChain64_t *sgel = paddr;
2632
2633 sgel->Flags = flags;
2634 sgel->NextChainOffset = chain_offset;
2635 sgel->Length = cpu_to_le32(length);
2636 sgel->Address = cpu_to_le64(dma_addr);
2637 }
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648 static void
2649 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2650 {
2651 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2652 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2653 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2654
2655 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2656 }
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672 static int
2673 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2674 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2675 {
2676 Mpi2SCSIIORequest_t *mpi_request;
2677 dma_addr_t chain_dma;
2678 struct scatterlist *sg_scmd;
2679 void *sg_local, *chain;
2680 u32 chain_offset;
2681 u32 chain_length;
2682 u32 chain_flags;
2683 int sges_left;
2684 u32 sges_in_segment;
2685 u32 sgl_flags;
2686 u32 sgl_flags_last_element;
2687 u32 sgl_flags_end_buffer;
2688 struct chain_tracker *chain_req;
2689
2690 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2691
2692
2693 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2694 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2695 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2696 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2697 << MPI2_SGE_FLAGS_SHIFT;
2698 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2699 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2700 << MPI2_SGE_FLAGS_SHIFT;
2701 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2702
2703 sg_scmd = scsi_sglist(scmd);
2704 sges_left = scsi_dma_map(scmd);
2705 if (sges_left < 0)
2706 return -ENOMEM;
2707
2708 sg_local = &mpi_request->SGL;
2709 sges_in_segment = ioc->max_sges_in_main_message;
2710 if (sges_left <= sges_in_segment)
2711 goto fill_in_last_segment;
2712
2713 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2714 (sges_in_segment * ioc->sge_size))/4;
2715
2716
2717 while (sges_in_segment) {
2718 if (sges_in_segment == 1)
2719 ioc->base_add_sg_single(sg_local,
2720 sgl_flags_last_element | sg_dma_len(sg_scmd),
2721 sg_dma_address(sg_scmd));
2722 else
2723 ioc->base_add_sg_single(sg_local, sgl_flags |
2724 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2725 sg_scmd = sg_next(sg_scmd);
2726 sg_local += ioc->sge_size;
2727 sges_left--;
2728 sges_in_segment--;
2729 }
2730
2731
2732 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2733 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2734 if (!chain_req)
2735 return -1;
2736 chain = chain_req->chain_buffer;
2737 chain_dma = chain_req->chain_buffer_dma;
2738 do {
2739 sges_in_segment = (sges_left <=
2740 ioc->max_sges_in_chain_message) ? sges_left :
2741 ioc->max_sges_in_chain_message;
2742 chain_offset = (sges_left == sges_in_segment) ?
2743 0 : (sges_in_segment * ioc->sge_size)/4;
2744 chain_length = sges_in_segment * ioc->sge_size;
2745 if (chain_offset) {
2746 chain_offset = chain_offset <<
2747 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2748 chain_length += ioc->sge_size;
2749 }
2750 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2751 chain_length, chain_dma);
2752 sg_local = chain;
2753 if (!chain_offset)
2754 goto fill_in_last_segment;
2755
2756
2757 while (sges_in_segment) {
2758 if (sges_in_segment == 1)
2759 ioc->base_add_sg_single(sg_local,
2760 sgl_flags_last_element |
2761 sg_dma_len(sg_scmd),
2762 sg_dma_address(sg_scmd));
2763 else
2764 ioc->base_add_sg_single(sg_local, sgl_flags |
2765 sg_dma_len(sg_scmd),
2766 sg_dma_address(sg_scmd));
2767 sg_scmd = sg_next(sg_scmd);
2768 sg_local += ioc->sge_size;
2769 sges_left--;
2770 sges_in_segment--;
2771 }
2772
2773 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2774 if (!chain_req)
2775 return -1;
2776 chain = chain_req->chain_buffer;
2777 chain_dma = chain_req->chain_buffer_dma;
2778 } while (1);
2779
2780
2781 fill_in_last_segment:
2782
2783
2784 while (sges_left) {
2785 if (sges_left == 1)
2786 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2787 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2788 else
2789 ioc->base_add_sg_single(sg_local, sgl_flags |
2790 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2791 sg_scmd = sg_next(sg_scmd);
2792 sg_local += ioc->sge_size;
2793 sges_left--;
2794 }
2795
2796 return 0;
2797 }
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813 static int
2814 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2815 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2816 {
2817 Mpi25SCSIIORequest_t *mpi_request;
2818 dma_addr_t chain_dma;
2819 struct scatterlist *sg_scmd;
2820 void *sg_local, *chain;
2821 u32 chain_offset;
2822 u32 chain_length;
2823 int sges_left;
2824 u32 sges_in_segment;
2825 u8 simple_sgl_flags;
2826 u8 simple_sgl_flags_last;
2827 u8 chain_sgl_flags;
2828 struct chain_tracker *chain_req;
2829
2830 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2831
2832
2833 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2834 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2835 simple_sgl_flags_last = simple_sgl_flags |
2836 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2837 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2838 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2839
2840
2841 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2842 smid, scmd, pcie_device) == 0)) {
2843
2844 return 0;
2845 }
2846
2847 sg_scmd = scsi_sglist(scmd);
2848 sges_left = scsi_dma_map(scmd);
2849 if (sges_left < 0)
2850 return -ENOMEM;
2851
2852 sg_local = &mpi_request->SGL;
2853 sges_in_segment = (ioc->request_sz -
2854 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2855 if (sges_left <= sges_in_segment)
2856 goto fill_in_last_segment;
2857
2858 mpi_request->ChainOffset = (sges_in_segment - 1 ) +
2859 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2860
2861
2862 while (sges_in_segment > 1) {
2863 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2864 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2865 sg_scmd = sg_next(sg_scmd);
2866 sg_local += ioc->sge_size_ieee;
2867 sges_left--;
2868 sges_in_segment--;
2869 }
2870
2871
2872 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2873 if (!chain_req)
2874 return -1;
2875 chain = chain_req->chain_buffer;
2876 chain_dma = chain_req->chain_buffer_dma;
2877 do {
2878 sges_in_segment = (sges_left <=
2879 ioc->max_sges_in_chain_message) ? sges_left :
2880 ioc->max_sges_in_chain_message;
2881 chain_offset = (sges_left == sges_in_segment) ?
2882 0 : sges_in_segment;
2883 chain_length = sges_in_segment * ioc->sge_size_ieee;
2884 if (chain_offset)
2885 chain_length += ioc->sge_size_ieee;
2886 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2887 chain_offset, chain_length, chain_dma);
2888
2889 sg_local = chain;
2890 if (!chain_offset)
2891 goto fill_in_last_segment;
2892
2893
2894 while (sges_in_segment) {
2895 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2896 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2897 sg_scmd = sg_next(sg_scmd);
2898 sg_local += ioc->sge_size_ieee;
2899 sges_left--;
2900 sges_in_segment--;
2901 }
2902
2903 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2904 if (!chain_req)
2905 return -1;
2906 chain = chain_req->chain_buffer;
2907 chain_dma = chain_req->chain_buffer_dma;
2908 } while (1);
2909
2910
2911 fill_in_last_segment:
2912
2913
2914 while (sges_left > 0) {
2915 if (sges_left == 1)
2916 _base_add_sg_single_ieee(sg_local,
2917 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2918 sg_dma_address(sg_scmd));
2919 else
2920 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2921 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2922 sg_scmd = sg_next(sg_scmd);
2923 sg_local += ioc->sge_size_ieee;
2924 sges_left--;
2925 }
2926
2927 return 0;
2928 }
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939 static void
2940 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2941 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2942 size_t data_in_sz)
2943 {
2944 u8 sgl_flags;
2945
2946 if (!data_out_sz && !data_in_sz) {
2947 _base_build_zero_len_sge_ieee(ioc, psge);
2948 return;
2949 }
2950
2951 if (data_out_sz && data_in_sz) {
2952
2953 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2954 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2955 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2956 data_out_dma);
2957
2958
2959 psge += ioc->sge_size_ieee;
2960
2961
2962 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2963 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2964 data_in_dma);
2965 } else if (data_out_sz) {
2966 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2967 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2968 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2969 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2970 data_out_dma);
2971 } else if (data_in_sz) {
2972 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2973 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2974 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2975 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2976 data_in_dma);
2977 }
2978 }
2979
2980 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2981
2982
2983
2984
2985
2986
2987
2988
2989 static int
2990 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2991 {
2992 struct sysinfo s;
2993
2994 if (ioc->is_mcpu_endpoint ||
2995 sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2996 dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
2997 ioc->dma_mask = 32;
2998
2999 else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
3000 ioc->dma_mask = 63;
3001 else
3002 ioc->dma_mask = 64;
3003
3004 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
3005 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
3006 return -ENODEV;
3007
3008 if (ioc->dma_mask > 32) {
3009 ioc->base_add_sg_single = &_base_add_sg_single_64;
3010 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
3011 } else {
3012 ioc->base_add_sg_single = &_base_add_sg_single_32;
3013 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
3014 }
3015
3016 si_meminfo(&s);
3017 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
3018 ioc->dma_mask, convert_to_kb(s.totalram));
3019
3020 return 0;
3021 }
3022
3023
3024
3025
3026
3027
3028
3029
3030 static int
3031 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3032 {
3033 int base;
3034 u16 message_control;
3035
3036
3037
3038
3039 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
3040 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
3041 return -EINVAL;
3042 }
3043
3044 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
3045 if (!base) {
3046 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
3047 return -EINVAL;
3048 }
3049
3050
3051
3052 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
3053 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
3054 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
3055 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
3056 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
3057 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
3058 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
3059 ioc->msix_vector_count = 1;
3060 else {
3061 pci_read_config_word(ioc->pdev, base + 2, &message_control);
3062 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
3063 }
3064 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
3065 ioc->msix_vector_count));
3066 return 0;
3067 }
3068
3069
3070
3071
3072
3073
3074
3075 void
3076 mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
3077 {
3078 unsigned int irq;
3079 struct adapter_reply_queue *reply_q, *next;
3080
3081 if (list_empty(&ioc->reply_queue_list))
3082 return;
3083
3084 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
3085 list_del(&reply_q->list);
3086 if (reply_q->is_iouring_poll_q) {
3087 kfree(reply_q);
3088 continue;
3089 }
3090
3091 if (ioc->smp_affinity_enable) {
3092 irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
3093 irq_update_affinity_hint(irq, NULL);
3094 }
3095 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
3096 reply_q);
3097 kfree(reply_q);
3098 }
3099 }
3100
3101
3102
3103
3104
3105
3106
3107
3108 static int
3109 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
3110 {
3111 struct pci_dev *pdev = ioc->pdev;
3112 struct adapter_reply_queue *reply_q;
3113 int r, qid;
3114
3115 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
3116 if (!reply_q) {
3117 ioc_err(ioc, "unable to allocate memory %zu!\n",
3118 sizeof(struct adapter_reply_queue));
3119 return -ENOMEM;
3120 }
3121 reply_q->ioc = ioc;
3122 reply_q->msix_index = index;
3123
3124 atomic_set(&reply_q->busy, 0);
3125
3126 if (index >= ioc->iopoll_q_start_index) {
3127 qid = index - ioc->iopoll_q_start_index;
3128 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
3129 ioc->driver_name, ioc->id, qid);
3130 reply_q->is_iouring_poll_q = 1;
3131 ioc->io_uring_poll_queues[qid].reply_q = reply_q;
3132 goto out;
3133 }
3134
3135
3136 if (ioc->msix_enable)
3137 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
3138 ioc->driver_name, ioc->id, index);
3139 else
3140 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
3141 ioc->driver_name, ioc->id);
3142 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
3143 IRQF_SHARED, reply_q->name, reply_q);
3144 if (r) {
3145 pr_err("%s: unable to allocate interrupt %d!\n",
3146 reply_q->name, pci_irq_vector(pdev, index));
3147 kfree(reply_q);
3148 return -EBUSY;
3149 }
3150 out:
3151 INIT_LIST_HEAD(&reply_q->list);
3152 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
3153 return 0;
3154 }
3155
3156
3157
3158
3159
3160
3161
3162 static void
3163 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
3164 {
3165 unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
3166 struct adapter_reply_queue *reply_q;
3167 int iopoll_q_count = ioc->reply_queue_count -
3168 ioc->iopoll_q_start_index;
3169 const struct cpumask *mask;
3170
3171 if (!_base_is_controller_msix_enabled(ioc))
3172 return;
3173
3174 if (ioc->msix_load_balance)
3175 return;
3176
3177 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
3178
3179 nr_cpus = num_online_cpus();
3180 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
3181 ioc->facts.MaxMSIxVectors);
3182 if (!nr_msix)
3183 return;
3184
3185 if (ioc->smp_affinity_enable) {
3186
3187
3188
3189
3190
3191 if (ioc->high_iops_queues) {
3192 mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
3193 for (index = 0; index < ioc->high_iops_queues;
3194 index++) {
3195 irq = pci_irq_vector(ioc->pdev, index);
3196 irq_set_affinity_and_hint(irq, mask);
3197 }
3198 }
3199
3200 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3201 const cpumask_t *mask;
3202
3203 if (reply_q->msix_index < ioc->high_iops_queues ||
3204 reply_q->msix_index >= ioc->iopoll_q_start_index)
3205 continue;
3206
3207 mask = pci_irq_get_affinity(ioc->pdev,
3208 reply_q->msix_index);
3209 if (!mask) {
3210 ioc_warn(ioc, "no affinity for msi %x\n",
3211 reply_q->msix_index);
3212 goto fall_back;
3213 }
3214
3215 for_each_cpu_and(cpu, mask, cpu_online_mask) {
3216 if (cpu >= ioc->cpu_msix_table_sz)
3217 break;
3218 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3219 }
3220 }
3221 return;
3222 }
3223
3224 fall_back:
3225 cpu = cpumask_first(cpu_online_mask);
3226 nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
3227 index = 0;
3228
3229 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3230 unsigned int i, group = nr_cpus / nr_msix;
3231
3232 if (reply_q->msix_index < ioc->high_iops_queues ||
3233 reply_q->msix_index >= ioc->iopoll_q_start_index)
3234 continue;
3235
3236 if (cpu >= nr_cpus)
3237 break;
3238
3239 if (index < nr_cpus % nr_msix)
3240 group++;
3241
3242 for (i = 0 ; i < group ; i++) {
3243 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3244 cpu = cpumask_next(cpu, cpu_online_mask);
3245 }
3246 index++;
3247 }
3248 }
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264 static void
3265 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3266 int hba_msix_vector_count)
3267 {
3268 u16 lnksta, speed;
3269
3270
3271
3272
3273 if (perf_mode == MPT_PERF_MODE_IOPS ||
3274 perf_mode == MPT_PERF_MODE_LATENCY ||
3275 ioc->io_uring_poll_queues) {
3276 ioc->high_iops_queues = 0;
3277 return;
3278 }
3279
3280 if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3281
3282 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3283 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3284
3285 if (speed < 0x4) {
3286 ioc->high_iops_queues = 0;
3287 return;
3288 }
3289 }
3290
3291 if (!reset_devices && ioc->is_aero_ioc &&
3292 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3293 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3294 max_msix_vectors == -1)
3295 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3296 else
3297 ioc->high_iops_queues = 0;
3298 }
3299
3300
3301
3302
3303
3304
3305 void
3306 mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3307 {
3308 if (!ioc->msix_enable)
3309 return;
3310 pci_free_irq_vectors(ioc->pdev);
3311 ioc->msix_enable = 0;
3312 kfree(ioc->io_uring_poll_queues);
3313 }
3314
3315
3316
3317
3318
3319
3320 static int
3321 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3322 {
3323 int i, irq_flags = PCI_IRQ_MSIX;
3324 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3325 struct irq_affinity *descp = &desc;
3326
3327
3328
3329
3330 int nr_msix_vectors = ioc->iopoll_q_start_index;
3331
3332
3333 if (ioc->smp_affinity_enable)
3334 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
3335 else
3336 descp = NULL;
3337
3338 ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
3339 ioc->reply_queue_count, nr_msix_vectors);
3340
3341 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3342 ioc->high_iops_queues,
3343 nr_msix_vectors, irq_flags, descp);
3344
3345 return i;
3346 }
3347
3348
3349
3350
3351
3352
3353 static int
3354 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3355 {
3356 int r;
3357 int i, local_max_msix_vectors;
3358 u8 try_msix = 0;
3359 int iopoll_q_count = 0;
3360
3361 ioc->msix_load_balance = false;
3362
3363 if (msix_disable == -1 || msix_disable == 0)
3364 try_msix = 1;
3365
3366 if (!try_msix)
3367 goto try_ioapic;
3368
3369 if (_base_check_enable_msix(ioc) != 0)
3370 goto try_ioapic;
3371
3372 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3373 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3374 ioc->cpu_count, max_msix_vectors);
3375
3376 ioc->reply_queue_count =
3377 min_t(int, ioc->cpu_count, ioc->msix_vector_count);
3378
3379 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3380 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3381 else
3382 local_max_msix_vectors = max_msix_vectors;
3383
3384 if (local_max_msix_vectors == 0)
3385 goto try_ioapic;
3386
3387
3388
3389
3390
3391 if (!ioc->combined_reply_queue &&
3392 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3393 ioc_info(ioc,
3394 "combined ReplyQueue is off, Enabling msix load balance\n");
3395 ioc->msix_load_balance = true;
3396 }
3397
3398
3399
3400
3401
3402 if (ioc->msix_load_balance)
3403 ioc->smp_affinity_enable = 0;
3404
3405 if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
3406 ioc->shost->host_tagset = 0;
3407
3408
3409
3410
3411 if (ioc->shost->host_tagset)
3412 iopoll_q_count = poll_queues;
3413
3414 if (iopoll_q_count) {
3415 ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
3416 sizeof(struct io_uring_poll_queue), GFP_KERNEL);
3417 if (!ioc->io_uring_poll_queues)
3418 iopoll_q_count = 0;
3419 }
3420
3421 if (ioc->is_aero_ioc)
3422 _base_check_and_enable_high_iops_queues(ioc,
3423 ioc->msix_vector_count);
3424
3425
3426
3427
3428
3429 ioc->reply_queue_count = min_t(int,
3430 ioc->reply_queue_count + ioc->high_iops_queues,
3431 ioc->msix_vector_count);
3432
3433
3434
3435
3436
3437 if (local_max_msix_vectors > 0)
3438 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3439 ioc->reply_queue_count);
3440
3441
3442
3443
3444 if (iopoll_q_count) {
3445 if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
3446 iopoll_q_count = 0;
3447 ioc->reply_queue_count = min_t(int,
3448 ioc->reply_queue_count + iopoll_q_count,
3449 ioc->msix_vector_count);
3450 }
3451
3452
3453
3454
3455 ioc->iopoll_q_start_index =
3456 ioc->reply_queue_count - iopoll_q_count;
3457
3458 r = _base_alloc_irq_vectors(ioc);
3459 if (r < 0) {
3460 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3461 goto try_ioapic;
3462 }
3463
3464
3465
3466
3467
3468
3469 if (r < ioc->iopoll_q_start_index) {
3470 ioc->reply_queue_count = r + iopoll_q_count;
3471 ioc->iopoll_q_start_index =
3472 ioc->reply_queue_count - iopoll_q_count;
3473 }
3474
3475 ioc->msix_enable = 1;
3476 for (i = 0; i < ioc->reply_queue_count; i++) {
3477 r = _base_request_irq(ioc, i);
3478 if (r) {
3479 mpt3sas_base_free_irq(ioc);
3480 mpt3sas_base_disable_msix(ioc);
3481 goto try_ioapic;
3482 }
3483 }
3484
3485 ioc_info(ioc, "High IOPs queues : %s\n",
3486 ioc->high_iops_queues ? "enabled" : "disabled");
3487
3488 return 0;
3489
3490
3491 try_ioapic:
3492 ioc->high_iops_queues = 0;
3493 ioc_info(ioc, "High IOPs queues : disabled\n");
3494 ioc->reply_queue_count = 1;
3495 ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
3496 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3497 if (r < 0) {
3498 dfailprintk(ioc,
3499 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3500 r));
3501 } else
3502 r = _base_request_irq(ioc, 0);
3503
3504 return r;
3505 }
3506
3507
3508
3509
3510
3511 static void
3512 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3513 {
3514 struct pci_dev *pdev = ioc->pdev;
3515
3516 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3517
3518 mpt3sas_base_free_irq(ioc);
3519 mpt3sas_base_disable_msix(ioc);
3520
3521 kfree(ioc->replyPostRegisterIndex);
3522 ioc->replyPostRegisterIndex = NULL;
3523
3524
3525 if (ioc->chip_phys) {
3526 iounmap(ioc->chip);
3527 ioc->chip_phys = 0;
3528 }
3529
3530 if (pci_is_enabled(pdev)) {
3531 pci_release_selected_regions(ioc->pdev, ioc->bars);
3532 pci_disable_pcie_error_reporting(pdev);
3533 pci_disable_device(pdev);
3534 }
3535 }
3536
3537 static int
3538 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3539
3540
3541
3542
3543
3544
3545
3546
3547 int
3548 mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3549 {
3550 u32 ioc_state;
3551 int rc = -EFAULT;
3552
3553 dinitprintk(ioc, pr_info("%s\n", __func__));
3554 if (ioc->pci_error_recovery)
3555 return 0;
3556 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3557 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3558
3559 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3560 mpt3sas_print_fault_code(ioc, ioc_state &
3561 MPI2_DOORBELL_DATA_MASK);
3562 mpt3sas_base_mask_interrupts(ioc);
3563 rc = _base_diag_reset(ioc);
3564 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3565 MPI2_IOC_STATE_COREDUMP) {
3566 mpt3sas_print_coredump_info(ioc, ioc_state &
3567 MPI2_DOORBELL_DATA_MASK);
3568 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3569 mpt3sas_base_mask_interrupts(ioc);
3570 rc = _base_diag_reset(ioc);
3571 }
3572
3573 return rc;
3574 }
3575
3576
3577
3578
3579
3580
3581
3582 int
3583 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3584 {
3585 struct pci_dev *pdev = ioc->pdev;
3586 u32 memap_sz;
3587 u32 pio_sz;
3588 int i, r = 0, rc;
3589 u64 pio_chip = 0;
3590 phys_addr_t chip_phys = 0;
3591 struct adapter_reply_queue *reply_q;
3592 int iopoll_q_count = 0;
3593
3594 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3595
3596 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3597 if (pci_enable_device_mem(pdev)) {
3598 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3599 ioc->bars = 0;
3600 return -ENODEV;
3601 }
3602
3603
3604 if (pci_request_selected_regions(pdev, ioc->bars,
3605 ioc->driver_name)) {
3606 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3607 ioc->bars = 0;
3608 r = -ENODEV;
3609 goto out_fail;
3610 }
3611
3612
3613 pci_enable_pcie_error_reporting(pdev);
3614
3615 pci_set_master(pdev);
3616
3617
3618 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3619 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3620 r = -ENODEV;
3621 goto out_fail;
3622 }
3623
3624 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3625 (!memap_sz || !pio_sz); i++) {
3626 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3627 if (pio_sz)
3628 continue;
3629 pio_chip = (u64)pci_resource_start(pdev, i);
3630 pio_sz = pci_resource_len(pdev, i);
3631 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3632 if (memap_sz)
3633 continue;
3634 ioc->chip_phys = pci_resource_start(pdev, i);
3635 chip_phys = ioc->chip_phys;
3636 memap_sz = pci_resource_len(pdev, i);
3637 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3638 }
3639 }
3640
3641 if (ioc->chip == NULL) {
3642 ioc_err(ioc,
3643 "unable to map adapter memory! or resource not found\n");
3644 r = -EINVAL;
3645 goto out_fail;
3646 }
3647
3648 mpt3sas_base_mask_interrupts(ioc);
3649
3650 r = _base_get_ioc_facts(ioc);
3651 if (r) {
3652 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
3653 if (rc || (_base_get_ioc_facts(ioc)))
3654 goto out_fail;
3655 }
3656
3657 if (!ioc->rdpq_array_enable_assigned) {
3658 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3659 ioc->rdpq_array_enable_assigned = 1;
3660 }
3661
3662 r = _base_enable_msix(ioc);
3663 if (r)
3664 goto out_fail;
3665
3666 iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
3667 for (i = 0; i < iopoll_q_count; i++) {
3668 atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
3669 atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
3670 }
3671
3672 if (!ioc->is_driver_loading)
3673 _base_init_irqpolls(ioc);
3674
3675
3676
3677 if (ioc->combined_reply_queue) {
3678
3679
3680
3681
3682
3683
3684 ioc->replyPostRegisterIndex = kcalloc(
3685 ioc->combined_reply_index_count,
3686 sizeof(resource_size_t *), GFP_KERNEL);
3687 if (!ioc->replyPostRegisterIndex) {
3688 ioc_err(ioc,
3689 "allocation for replyPostRegisterIndex failed!\n");
3690 r = -ENOMEM;
3691 goto out_fail;
3692 }
3693
3694 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3695 ioc->replyPostRegisterIndex[i] =
3696 (resource_size_t __iomem *)
3697 ((u8 __force *)&ioc->chip->Doorbell +
3698 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3699 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3700 }
3701 }
3702
3703 if (ioc->is_warpdrive) {
3704 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3705 &ioc->chip->ReplyPostHostIndex;
3706
3707 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3708 ioc->reply_post_host_index[i] =
3709 (resource_size_t __iomem *)
3710 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3711 * 4)));
3712 }
3713
3714 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3715 if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
3716 pr_info("%s: enabled: index: %d\n",
3717 reply_q->name, reply_q->msix_index);
3718 continue;
3719 }
3720
3721 pr_info("%s: %s enabled: IRQ %d\n",
3722 reply_q->name,
3723 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3724 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3725 }
3726
3727 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3728 &chip_phys, ioc->chip, memap_sz);
3729 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3730 (unsigned long long)pio_chip, pio_sz);
3731
3732
3733 pci_save_state(pdev);
3734 return 0;
3735
3736 out_fail:
3737 mpt3sas_base_unmap_resources(ioc);
3738 return r;
3739 }
3740
3741
3742
3743
3744
3745
3746
3747
3748 void *
3749 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3750 {
3751 return (void *)(ioc->request + (smid * ioc->request_sz));
3752 }
3753
3754
3755
3756
3757
3758
3759
3760
3761 void *
3762 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3763 {
3764 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3765 }
3766
3767
3768
3769
3770
3771
3772
3773
3774 __le32
3775 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3776 {
3777 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3778 SCSI_SENSE_BUFFERSIZE));
3779 }
3780
3781
3782
3783
3784
3785
3786
3787
3788 void *
3789 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3790 {
3791 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3792 }
3793
3794
3795
3796
3797
3798
3799
3800
3801 dma_addr_t
3802 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3803 {
3804 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3805 }
3806
3807
3808
3809
3810
3811
3812
3813
3814 void *
3815 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3816 {
3817 if (!phys_addr)
3818 return NULL;
3819 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3820 }
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831 static inline u8
3832 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3833 struct scsi_cmnd *scmd)
3834 {
3835
3836 if (ioc->msix_load_balance)
3837 return ioc->reply_queue_count ?
3838 base_mod64(atomic64_add_return(1,
3839 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3840
3841 if (scmd && ioc->shost->nr_hw_queues > 1) {
3842 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
3843
3844 return blk_mq_unique_tag_to_hwq(tag) +
3845 ioc->high_iops_queues;
3846 }
3847
3848 return ioc->cpu_msix_table[raw_smp_processor_id()];
3849 }
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861 static inline u8
3862 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3863 struct scsi_cmnd *scmd)
3864 {
3865
3866
3867
3868
3869
3870
3871 if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3872 return base_mod64((
3873 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3874 MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3875 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3876
3877 return _base_get_msix_index(ioc, scmd);
3878 }
3879
3880
3881
3882
3883
3884
3885
3886
3887 u16
3888 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3889 {
3890 unsigned long flags;
3891 struct request_tracker *request;
3892 u16 smid;
3893
3894 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3895 if (list_empty(&ioc->internal_free_list)) {
3896 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3897 ioc_err(ioc, "%s: smid not available\n", __func__);
3898 return 0;
3899 }
3900
3901 request = list_entry(ioc->internal_free_list.next,
3902 struct request_tracker, tracker_list);
3903 request->cb_idx = cb_idx;
3904 smid = request->smid;
3905 list_del(&request->tracker_list);
3906 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3907 return smid;
3908 }
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918 u16
3919 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3920 struct scsi_cmnd *scmd)
3921 {
3922 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3923 u16 smid;
3924 u32 tag, unique_tag;
3925
3926 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
3927 tag = blk_mq_unique_tag_to_tag(unique_tag);
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939 ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
3940
3941 smid = tag + 1;
3942 request->cb_idx = cb_idx;
3943 request->smid = smid;
3944 request->scmd = scmd;
3945 INIT_LIST_HEAD(&request->chain_list);
3946 return smid;
3947 }
3948
3949
3950
3951
3952
3953
3954
3955
3956 u16
3957 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3958 {
3959 unsigned long flags;
3960 struct request_tracker *request;
3961 u16 smid;
3962
3963 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3964 if (list_empty(&ioc->hpr_free_list)) {
3965 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3966 return 0;
3967 }
3968
3969 request = list_entry(ioc->hpr_free_list.next,
3970 struct request_tracker, tracker_list);
3971 request->cb_idx = cb_idx;
3972 smid = request->smid;
3973 list_del(&request->tracker_list);
3974 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3975 return smid;
3976 }
3977
3978 static void
3979 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3980 {
3981
3982
3983
3984 if (ioc->shost_recovery && ioc->pending_io_count) {
3985 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3986 if (ioc->pending_io_count == 0)
3987 wake_up(&ioc->reset_wq);
3988 }
3989 }
3990
3991 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3992 struct scsiio_tracker *st)
3993 {
3994 if (WARN_ON(st->smid == 0))
3995 return;
3996 st->cb_idx = 0xFF;
3997 st->direct_io = 0;
3998 st->scmd = NULL;
3999 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
4000 st->smid = 0;
4001 }
4002
4003
4004
4005
4006
4007
4008 void
4009 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4010 {
4011 unsigned long flags;
4012 int i;
4013
4014 if (smid < ioc->hi_priority_smid) {
4015 struct scsiio_tracker *st;
4016 void *request;
4017
4018 st = _get_st_from_smid(ioc, smid);
4019 if (!st) {
4020 _base_recovery_check(ioc);
4021 return;
4022 }
4023
4024
4025 request = mpt3sas_base_get_msg_frame(ioc, smid);
4026 memset(request, 0, ioc->request_sz);
4027
4028 mpt3sas_base_clear_st(ioc, st);
4029 _base_recovery_check(ioc);
4030 ioc->io_queue_num[smid - 1] = 0;
4031 return;
4032 }
4033
4034 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4035 if (smid < ioc->internal_smid) {
4036
4037 i = smid - ioc->hi_priority_smid;
4038 ioc->hpr_lookup[i].cb_idx = 0xFF;
4039 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
4040 } else if (smid <= ioc->hba_queue_depth) {
4041
4042 i = smid - ioc->internal_smid;
4043 ioc->internal_lookup[i].cb_idx = 0xFF;
4044 list_add(&ioc->internal_lookup[i].tracker_list,
4045 &ioc->internal_free_list);
4046 }
4047 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4048 }
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060 static inline void
4061 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
4062 spinlock_t *writeq_lock)
4063 {
4064 unsigned long flags;
4065
4066 spin_lock_irqsave(writeq_lock, flags);
4067 __raw_writel((u32)(b), addr);
4068 __raw_writel((u32)(b >> 32), (addr + 4));
4069 spin_unlock_irqrestore(writeq_lock, flags);
4070 }
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082 #if defined(writeq) && defined(CONFIG_64BIT)
4083 static inline void
4084 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
4085 {
4086 wmb();
4087 __raw_writeq(b, addr);
4088 barrier();
4089 }
4090 #else
4091 static inline void
4092 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
4093 {
4094 _base_mpi_ep_writeq(b, addr, writeq_lock);
4095 }
4096 #endif
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106 static u8
4107 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4108 {
4109 struct scsiio_tracker *st = NULL;
4110
4111 if (smid < ioc->hi_priority_smid)
4112 st = _get_st_from_smid(ioc, smid);
4113
4114 if (st == NULL)
4115 return _base_get_msix_index(ioc, NULL);
4116
4117 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
4118 return st->msix_io;
4119 }
4120
4121
4122
4123
4124
4125
4126
4127 static void
4128 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
4129 u16 smid, u16 handle)
4130 {
4131 Mpi2RequestDescriptorUnion_t descriptor;
4132 u64 *request = (u64 *)&descriptor;
4133 void *mpi_req_iomem;
4134 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4135
4136 _clone_sg_entries(ioc, (void *) mfp, smid);
4137 mpi_req_iomem = (void __force *)ioc->chip +
4138 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4139 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4140 ioc->request_sz);
4141 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4142 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4143 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4144 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4145 descriptor.SCSIIO.LMID = 0;
4146 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4147 &ioc->scsi_lookup_lock);
4148 }
4149
4150
4151
4152
4153
4154
4155
4156 static void
4157 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
4158 {
4159 Mpi2RequestDescriptorUnion_t descriptor;
4160 u64 *request = (u64 *)&descriptor;
4161
4162
4163 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4164 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4165 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4166 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4167 descriptor.SCSIIO.LMID = 0;
4168 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4169 &ioc->scsi_lookup_lock);
4170 }
4171
4172
4173
4174
4175
4176
4177
4178 static void
4179 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4180 u16 handle)
4181 {
4182 Mpi2RequestDescriptorUnion_t descriptor;
4183 u64 *request = (u64 *)&descriptor;
4184
4185 descriptor.SCSIIO.RequestFlags =
4186 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4187 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4188 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4189 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4190 descriptor.SCSIIO.LMID = 0;
4191 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4192 &ioc->scsi_lookup_lock);
4193 }
4194
4195
4196
4197
4198
4199
4200
4201 static void
4202 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4203 u16 msix_task)
4204 {
4205 Mpi2RequestDescriptorUnion_t descriptor;
4206 void *mpi_req_iomem;
4207 u64 *request;
4208
4209 if (ioc->is_mcpu_endpoint) {
4210 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4211
4212
4213 mpi_req_iomem = (void __force *)ioc->chip
4214 + MPI_FRAME_START_OFFSET
4215 + (smid * ioc->request_sz);
4216 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4217 ioc->request_sz);
4218 }
4219
4220 request = (u64 *)&descriptor;
4221
4222 descriptor.HighPriority.RequestFlags =
4223 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4224 descriptor.HighPriority.MSIxIndex = msix_task;
4225 descriptor.HighPriority.SMID = cpu_to_le16(smid);
4226 descriptor.HighPriority.LMID = 0;
4227 descriptor.HighPriority.Reserved1 = 0;
4228 if (ioc->is_mcpu_endpoint)
4229 _base_mpi_ep_writeq(*request,
4230 &ioc->chip->RequestDescriptorPostLow,
4231 &ioc->scsi_lookup_lock);
4232 else
4233 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4234 &ioc->scsi_lookup_lock);
4235 }
4236
4237
4238
4239
4240
4241
4242
4243 void
4244 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4245 {
4246 Mpi2RequestDescriptorUnion_t descriptor;
4247 u64 *request = (u64 *)&descriptor;
4248
4249 descriptor.Default.RequestFlags =
4250 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
4251 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4252 descriptor.Default.SMID = cpu_to_le16(smid);
4253 descriptor.Default.LMID = 0;
4254 descriptor.Default.DescriptorTypeDependent = 0;
4255 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4256 &ioc->scsi_lookup_lock);
4257 }
4258
4259
4260
4261
4262
4263
4264 static void
4265 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4266 {
4267 Mpi2RequestDescriptorUnion_t descriptor;
4268 void *mpi_req_iomem;
4269 u64 *request;
4270
4271 if (ioc->is_mcpu_endpoint) {
4272 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4273
4274 _clone_sg_entries(ioc, (void *) mfp, smid);
4275
4276 mpi_req_iomem = (void __force *)ioc->chip +
4277 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4278 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4279 ioc->request_sz);
4280 }
4281 request = (u64 *)&descriptor;
4282 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4283 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4284 descriptor.Default.SMID = cpu_to_le16(smid);
4285 descriptor.Default.LMID = 0;
4286 descriptor.Default.DescriptorTypeDependent = 0;
4287 if (ioc->is_mcpu_endpoint)
4288 _base_mpi_ep_writeq(*request,
4289 &ioc->chip->RequestDescriptorPostLow,
4290 &ioc->scsi_lookup_lock);
4291 else
4292 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4293 &ioc->scsi_lookup_lock);
4294 }
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305 static void
4306 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4307 u16 handle)
4308 {
4309 Mpi26AtomicRequestDescriptor_t descriptor;
4310 u32 *request = (u32 *)&descriptor;
4311
4312 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4313 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4314 descriptor.SMID = cpu_to_le16(smid);
4315
4316 writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
4317 }
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327 static void
4328 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4329 u16 handle)
4330 {
4331 Mpi26AtomicRequestDescriptor_t descriptor;
4332 u32 *request = (u32 *)&descriptor;
4333
4334 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4335 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4336 descriptor.SMID = cpu_to_le16(smid);
4337
4338 writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
4339 }
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350 static void
4351 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4352 u16 msix_task)
4353 {
4354 Mpi26AtomicRequestDescriptor_t descriptor;
4355 u32 *request = (u32 *)&descriptor;
4356
4357 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4358 descriptor.MSIxIndex = msix_task;
4359 descriptor.SMID = cpu_to_le16(smid);
4360
4361 writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
4362 }
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372 static void
4373 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4374 {
4375 Mpi26AtomicRequestDescriptor_t descriptor;
4376 u32 *request = (u32 *)&descriptor;
4377
4378 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4379 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4380 descriptor.SMID = cpu_to_le16(smid);
4381
4382 writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
4383 }
4384
4385
4386
4387
4388
4389 static void
4390 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4391 {
4392 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4393 return;
4394
4395 switch (ioc->pdev->subsystem_vendor) {
4396 case PCI_VENDOR_ID_INTEL:
4397 switch (ioc->pdev->device) {
4398 case MPI2_MFGPAGE_DEVID_SAS2008:
4399 switch (ioc->pdev->subsystem_device) {
4400 case MPT2SAS_INTEL_RMS2LL080_SSDID:
4401 ioc_info(ioc, "%s\n",
4402 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4403 break;
4404 case MPT2SAS_INTEL_RMS2LL040_SSDID:
4405 ioc_info(ioc, "%s\n",
4406 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4407 break;
4408 case MPT2SAS_INTEL_SSD910_SSDID:
4409 ioc_info(ioc, "%s\n",
4410 MPT2SAS_INTEL_SSD910_BRANDING);
4411 break;
4412 default:
4413 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4414 ioc->pdev->subsystem_device);
4415 break;
4416 }
4417 break;
4418 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4419 switch (ioc->pdev->subsystem_device) {
4420 case MPT2SAS_INTEL_RS25GB008_SSDID:
4421 ioc_info(ioc, "%s\n",
4422 MPT2SAS_INTEL_RS25GB008_BRANDING);
4423 break;
4424 case MPT2SAS_INTEL_RMS25JB080_SSDID:
4425 ioc_info(ioc, "%s\n",
4426 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4427 break;
4428 case MPT2SAS_INTEL_RMS25JB040_SSDID:
4429 ioc_info(ioc, "%s\n",
4430 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4431 break;
4432 case MPT2SAS_INTEL_RMS25KB080_SSDID:
4433 ioc_info(ioc, "%s\n",
4434 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4435 break;
4436 case MPT2SAS_INTEL_RMS25KB040_SSDID:
4437 ioc_info(ioc, "%s\n",
4438 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4439 break;
4440 case MPT2SAS_INTEL_RMS25LB040_SSDID:
4441 ioc_info(ioc, "%s\n",
4442 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4443 break;
4444 case MPT2SAS_INTEL_RMS25LB080_SSDID:
4445 ioc_info(ioc, "%s\n",
4446 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4447 break;
4448 default:
4449 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4450 ioc->pdev->subsystem_device);
4451 break;
4452 }
4453 break;
4454 case MPI25_MFGPAGE_DEVID_SAS3008:
4455 switch (ioc->pdev->subsystem_device) {
4456 case MPT3SAS_INTEL_RMS3JC080_SSDID:
4457 ioc_info(ioc, "%s\n",
4458 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4459 break;
4460
4461 case MPT3SAS_INTEL_RS3GC008_SSDID:
4462 ioc_info(ioc, "%s\n",
4463 MPT3SAS_INTEL_RS3GC008_BRANDING);
4464 break;
4465 case MPT3SAS_INTEL_RS3FC044_SSDID:
4466 ioc_info(ioc, "%s\n",
4467 MPT3SAS_INTEL_RS3FC044_BRANDING);
4468 break;
4469 case MPT3SAS_INTEL_RS3UC080_SSDID:
4470 ioc_info(ioc, "%s\n",
4471 MPT3SAS_INTEL_RS3UC080_BRANDING);
4472 break;
4473 default:
4474 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4475 ioc->pdev->subsystem_device);
4476 break;
4477 }
4478 break;
4479 default:
4480 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4481 ioc->pdev->subsystem_device);
4482 break;
4483 }
4484 break;
4485 case PCI_VENDOR_ID_DELL:
4486 switch (ioc->pdev->device) {
4487 case MPI2_MFGPAGE_DEVID_SAS2008:
4488 switch (ioc->pdev->subsystem_device) {
4489 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4490 ioc_info(ioc, "%s\n",
4491 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4492 break;
4493 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4494 ioc_info(ioc, "%s\n",
4495 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4496 break;
4497 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4498 ioc_info(ioc, "%s\n",
4499 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4500 break;
4501 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4502 ioc_info(ioc, "%s\n",
4503 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4504 break;
4505 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4506 ioc_info(ioc, "%s\n",
4507 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4508 break;
4509 case MPT2SAS_DELL_PERC_H200_SSDID:
4510 ioc_info(ioc, "%s\n",
4511 MPT2SAS_DELL_PERC_H200_BRANDING);
4512 break;
4513 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4514 ioc_info(ioc, "%s\n",
4515 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4516 break;
4517 default:
4518 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4519 ioc->pdev->subsystem_device);
4520 break;
4521 }
4522 break;
4523 case MPI25_MFGPAGE_DEVID_SAS3008:
4524 switch (ioc->pdev->subsystem_device) {
4525 case MPT3SAS_DELL_12G_HBA_SSDID:
4526 ioc_info(ioc, "%s\n",
4527 MPT3SAS_DELL_12G_HBA_BRANDING);
4528 break;
4529 default:
4530 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4531 ioc->pdev->subsystem_device);
4532 break;
4533 }
4534 break;
4535 default:
4536 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4537 ioc->pdev->subsystem_device);
4538 break;
4539 }
4540 break;
4541 case PCI_VENDOR_ID_CISCO:
4542 switch (ioc->pdev->device) {
4543 case MPI25_MFGPAGE_DEVID_SAS3008:
4544 switch (ioc->pdev->subsystem_device) {
4545 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4546 ioc_info(ioc, "%s\n",
4547 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4548 break;
4549 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4550 ioc_info(ioc, "%s\n",
4551 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4552 break;
4553 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4554 ioc_info(ioc, "%s\n",
4555 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4556 break;
4557 default:
4558 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4559 ioc->pdev->subsystem_device);
4560 break;
4561 }
4562 break;
4563 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4564 switch (ioc->pdev->subsystem_device) {
4565 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4566 ioc_info(ioc, "%s\n",
4567 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4568 break;
4569 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4570 ioc_info(ioc, "%s\n",
4571 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4572 break;
4573 default:
4574 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4575 ioc->pdev->subsystem_device);
4576 break;
4577 }
4578 break;
4579 default:
4580 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4581 ioc->pdev->subsystem_device);
4582 break;
4583 }
4584 break;
4585 case MPT2SAS_HP_3PAR_SSVID:
4586 switch (ioc->pdev->device) {
4587 case MPI2_MFGPAGE_DEVID_SAS2004:
4588 switch (ioc->pdev->subsystem_device) {
4589 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4590 ioc_info(ioc, "%s\n",
4591 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4592 break;
4593 default:
4594 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4595 ioc->pdev->subsystem_device);
4596 break;
4597 }
4598 break;
4599 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4600 switch (ioc->pdev->subsystem_device) {
4601 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4602 ioc_info(ioc, "%s\n",
4603 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4604 break;
4605 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4606 ioc_info(ioc, "%s\n",
4607 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4608 break;
4609 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4610 ioc_info(ioc, "%s\n",
4611 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4612 break;
4613 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4614 ioc_info(ioc, "%s\n",
4615 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4616 break;
4617 default:
4618 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4619 ioc->pdev->subsystem_device);
4620 break;
4621 }
4622 break;
4623 default:
4624 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4625 ioc->pdev->subsystem_device);
4626 break;
4627 }
4628 break;
4629 default:
4630 break;
4631 }
4632 }
4633
4634
4635
4636
4637
4638
4639
4640
4641 static int
4642 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4643 {
4644 Mpi2FWImageHeader_t *fw_img_hdr;
4645 Mpi26ComponentImageHeader_t *cmp_img_hdr;
4646 Mpi25FWUploadRequest_t *mpi_request;
4647 Mpi2FWUploadReply_t mpi_reply;
4648 int r = 0, issue_diag_reset = 0;
4649 u32 package_version = 0;
4650 void *fwpkg_data = NULL;
4651 dma_addr_t fwpkg_data_dma;
4652 u16 smid, ioc_status;
4653 size_t data_length;
4654
4655 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4656
4657 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4658 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4659 return -EAGAIN;
4660 }
4661
4662 data_length = sizeof(Mpi2FWImageHeader_t);
4663 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4664 &fwpkg_data_dma, GFP_KERNEL);
4665 if (!fwpkg_data) {
4666 ioc_err(ioc,
4667 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4668 __FILE__, __LINE__, __func__);
4669 return -ENOMEM;
4670 }
4671
4672 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4673 if (!smid) {
4674 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4675 r = -EAGAIN;
4676 goto out;
4677 }
4678
4679 ioc->base_cmds.status = MPT3_CMD_PENDING;
4680 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4681 ioc->base_cmds.smid = smid;
4682 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4683 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4684 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4685 mpi_request->ImageSize = cpu_to_le32(data_length);
4686 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4687 data_length);
4688 init_completion(&ioc->base_cmds.done);
4689 ioc->put_smid_default(ioc, smid);
4690
4691 wait_for_completion_timeout(&ioc->base_cmds.done,
4692 FW_IMG_HDR_READ_TIMEOUT*HZ);
4693 ioc_info(ioc, "%s: complete\n", __func__);
4694 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4695 ioc_err(ioc, "%s: timeout\n", __func__);
4696 _debug_dump_mf(mpi_request,
4697 sizeof(Mpi25FWUploadRequest_t)/4);
4698 issue_diag_reset = 1;
4699 } else {
4700 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4701 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4702 memcpy(&mpi_reply, ioc->base_cmds.reply,
4703 sizeof(Mpi2FWUploadReply_t));
4704 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4705 MPI2_IOCSTATUS_MASK;
4706 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4707 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4708 if (le32_to_cpu(fw_img_hdr->Signature) ==
4709 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4710 cmp_img_hdr =
4711 (Mpi26ComponentImageHeader_t *)
4712 (fwpkg_data);
4713 package_version =
4714 le32_to_cpu(
4715 cmp_img_hdr->ApplicationSpecific);
4716 } else
4717 package_version =
4718 le32_to_cpu(
4719 fw_img_hdr->PackageVersion.Word);
4720 if (package_version)
4721 ioc_info(ioc,
4722 "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4723 ((package_version) & 0xFF000000) >> 24,
4724 ((package_version) & 0x00FF0000) >> 16,
4725 ((package_version) & 0x0000FF00) >> 8,
4726 (package_version) & 0x000000FF);
4727 } else {
4728 _debug_dump_mf(&mpi_reply,
4729 sizeof(Mpi2FWUploadReply_t)/4);
4730 }
4731 }
4732 }
4733 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4734 out:
4735 if (fwpkg_data)
4736 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4737 fwpkg_data_dma);
4738 if (issue_diag_reset) {
4739 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
4740 return -EFAULT;
4741 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
4742 return -EFAULT;
4743 r = -EAGAIN;
4744 }
4745 return r;
4746 }
4747
4748
4749
4750
4751
4752 static void
4753 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4754 {
4755 int i = 0;
4756 char desc[17] = {0};
4757 u32 iounit_pg1_flags;
4758 u32 bios_version;
4759
4760 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4761 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4762 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4763 desc,
4764 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4765 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4766 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4767 ioc->facts.FWVersion.Word & 0x000000FF,
4768 ioc->pdev->revision,
4769 (bios_version & 0xFF000000) >> 24,
4770 (bios_version & 0x00FF0000) >> 16,
4771 (bios_version & 0x0000FF00) >> 8,
4772 bios_version & 0x000000FF);
4773
4774 _base_display_OEMs_branding(ioc);
4775
4776 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4777 pr_info("%sNVMe", i ? "," : "");
4778 i++;
4779 }
4780
4781 ioc_info(ioc, "Protocol=(");
4782
4783 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4784 pr_cont("Initiator");
4785 i++;
4786 }
4787
4788 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4789 pr_cont("%sTarget", i ? "," : "");
4790 i++;
4791 }
4792
4793 i = 0;
4794 pr_cont("), Capabilities=(");
4795
4796 if (!ioc->hide_ir_msg) {
4797 if (ioc->facts.IOCCapabilities &
4798 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4799 pr_cont("Raid");
4800 i++;
4801 }
4802 }
4803
4804 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4805 pr_cont("%sTLR", i ? "," : "");
4806 i++;
4807 }
4808
4809 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4810 pr_cont("%sMulticast", i ? "," : "");
4811 i++;
4812 }
4813
4814 if (ioc->facts.IOCCapabilities &
4815 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4816 pr_cont("%sBIDI Target", i ? "," : "");
4817 i++;
4818 }
4819
4820 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4821 pr_cont("%sEEDP", i ? "," : "");
4822 i++;
4823 }
4824
4825 if (ioc->facts.IOCCapabilities &
4826 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4827 pr_cont("%sSnapshot Buffer", i ? "," : "");
4828 i++;
4829 }
4830
4831 if (ioc->facts.IOCCapabilities &
4832 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4833 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4834 i++;
4835 }
4836
4837 if (ioc->facts.IOCCapabilities &
4838 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4839 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4840 i++;
4841 }
4842
4843 if (ioc->facts.IOCCapabilities &
4844 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4845 pr_cont("%sTask Set Full", i ? "," : "");
4846 i++;
4847 }
4848
4849 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4850 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4851 pr_cont("%sNCQ", i ? "," : "");
4852 i++;
4853 }
4854
4855 pr_cont(")\n");
4856 }
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868 void
4869 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4870 u16 device_missing_delay, u8 io_missing_delay)
4871 {
4872 u16 dmd, dmd_new, dmd_orignal;
4873 u8 io_missing_delay_original;
4874 u16 sz;
4875 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4876 Mpi2ConfigReply_t mpi_reply;
4877 u8 num_phys = 0;
4878 u16 ioc_status;
4879
4880 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4881 if (!num_phys)
4882 return;
4883
4884 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4885 sizeof(Mpi2SasIOUnit1PhyData_t));
4886 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4887 if (!sas_iounit_pg1) {
4888 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4889 __FILE__, __LINE__, __func__);
4890 goto out;
4891 }
4892 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4893 sas_iounit_pg1, sz))) {
4894 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4895 __FILE__, __LINE__, __func__);
4896 goto out;
4897 }
4898 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4899 MPI2_IOCSTATUS_MASK;
4900 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4901 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4902 __FILE__, __LINE__, __func__);
4903 goto out;
4904 }
4905
4906
4907 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4908 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4909 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4910 else
4911 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4912 dmd_orignal = dmd;
4913 if (device_missing_delay > 0x7F) {
4914 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4915 device_missing_delay;
4916 dmd = dmd / 16;
4917 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4918 } else
4919 dmd = device_missing_delay;
4920 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4921
4922
4923 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4924 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4925
4926 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4927 sz)) {
4928 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4929 dmd_new = (dmd &
4930 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4931 else
4932 dmd_new =
4933 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4934 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4935 dmd_orignal, dmd_new);
4936 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4937 io_missing_delay_original,
4938 io_missing_delay);
4939 ioc->device_missing_delay = dmd_new;
4940 ioc->io_missing_delay = io_missing_delay;
4941 }
4942
4943 out:
4944 kfree(sas_iounit_pg1);
4945 }
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955 static int
4956 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4957 {
4958 Mpi2IOCPage1_t ioc_pg1;
4959 Mpi2ConfigReply_t mpi_reply;
4960 int rc;
4961
4962 rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4963 if (rc)
4964 return rc;
4965 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4966
4967 switch (perf_mode) {
4968 case MPT_PERF_MODE_DEFAULT:
4969 case MPT_PERF_MODE_BALANCED:
4970 if (ioc->high_iops_queues) {
4971 ioc_info(ioc,
4972 "Enable interrupt coalescing only for first\t"
4973 "%d reply queues\n",
4974 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4985 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4986 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4987 if (rc)
4988 return rc;
4989 ioc_info(ioc, "performance mode: balanced\n");
4990 return 0;
4991 }
4992 fallthrough;
4993 case MPT_PERF_MODE_LATENCY:
4994
4995
4996
4997
4998 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4999 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
5000 ioc_pg1.ProductSpecific = 0;
5001 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5002 if (rc)
5003 return rc;
5004 ioc_info(ioc, "performance mode: latency\n");
5005 break;
5006 case MPT_PERF_MODE_IOPS:
5007
5008
5009
5010 ioc_info(ioc,
5011 "performance mode: iops with coalescing timeout: 0x%x\n",
5012 le32_to_cpu(ioc_pg1.CoalescingTimeout));
5013 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
5014 ioc_pg1.ProductSpecific = 0;
5015 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5016 if (rc)
5017 return rc;
5018 break;
5019 }
5020 return 0;
5021 }
5022
5023
5024
5025
5026
5027
5028
5029
5030 static int
5031 _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5032 {
5033 Mpi26DriverTriggerPage2_t trigger_pg2;
5034 struct SL_WH_EVENT_TRIGGER_T *event_tg;
5035 MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
5036 Mpi2ConfigReply_t mpi_reply;
5037 int r = 0, i = 0;
5038 u16 count = 0;
5039 u16 ioc_status;
5040
5041 r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
5042 &trigger_pg2);
5043 if (r)
5044 return r;
5045
5046 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5047 MPI2_IOCSTATUS_MASK;
5048 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5049 dinitprintk(ioc,
5050 ioc_err(ioc,
5051 "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
5052 __func__, ioc_status));
5053 return 0;
5054 }
5055
5056 if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
5057 count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
5058 count = min_t(u16, NUM_VALID_ENTRIES, count);
5059 ioc->diag_trigger_event.ValidEntries = count;
5060
5061 event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
5062 mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
5063 for (i = 0; i < count; i++) {
5064 event_tg->EventValue = le16_to_cpu(
5065 mpi_event_tg->MPIEventCode);
5066 event_tg->LogEntryQualifier = le16_to_cpu(
5067 mpi_event_tg->MPIEventCodeSpecific);
5068 event_tg++;
5069 mpi_event_tg++;
5070 }
5071 }
5072 return 0;
5073 }
5074
5075
5076
5077
5078
5079
5080
5081
5082 static int
5083 _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5084 {
5085 Mpi26DriverTriggerPage3_t trigger_pg3;
5086 struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
5087 MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
5088 Mpi2ConfigReply_t mpi_reply;
5089 int r = 0, i = 0;
5090 u16 count = 0;
5091 u16 ioc_status;
5092
5093 r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
5094 &trigger_pg3);
5095 if (r)
5096 return r;
5097
5098 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5099 MPI2_IOCSTATUS_MASK;
5100 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5101 dinitprintk(ioc,
5102 ioc_err(ioc,
5103 "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
5104 __func__, ioc_status));
5105 return 0;
5106 }
5107
5108 if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
5109 count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
5110 count = min_t(u16, NUM_VALID_ENTRIES, count);
5111 ioc->diag_trigger_scsi.ValidEntries = count;
5112
5113 scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
5114 mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
5115 for (i = 0; i < count; i++) {
5116 scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
5117 scsi_tg->ASC = mpi_scsi_tg->ASC;
5118 scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
5119
5120 scsi_tg++;
5121 mpi_scsi_tg++;
5122 }
5123 }
5124 return 0;
5125 }
5126
5127
5128
5129
5130
5131
5132
5133
5134 static int
5135 _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5136 {
5137 Mpi26DriverTriggerPage4_t trigger_pg4;
5138 struct SL_WH_MPI_TRIGGER_T *status_tg;
5139 MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
5140 Mpi2ConfigReply_t mpi_reply;
5141 int r = 0, i = 0;
5142 u16 count = 0;
5143 u16 ioc_status;
5144
5145 r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
5146 &trigger_pg4);
5147 if (r)
5148 return r;
5149
5150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5151 MPI2_IOCSTATUS_MASK;
5152 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5153 dinitprintk(ioc,
5154 ioc_err(ioc,
5155 "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
5156 __func__, ioc_status));
5157 return 0;
5158 }
5159
5160 if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
5161 count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
5162 count = min_t(u16, NUM_VALID_ENTRIES, count);
5163 ioc->diag_trigger_mpi.ValidEntries = count;
5164
5165 status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
5166 mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
5167
5168 for (i = 0; i < count; i++) {
5169 status_tg->IOCStatus = le16_to_cpu(
5170 mpi_status_tg->IOCStatus);
5171 status_tg->IocLogInfo = le32_to_cpu(
5172 mpi_status_tg->LogInfo);
5173
5174 status_tg++;
5175 mpi_status_tg++;
5176 }
5177 }
5178 return 0;
5179 }
5180
5181
5182
5183
5184
5185
5186
5187
5188 static int
5189 _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5190 {
5191 Mpi26DriverTriggerPage1_t trigger_pg1;
5192 Mpi2ConfigReply_t mpi_reply;
5193 int r;
5194 u16 ioc_status;
5195
5196 r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
5197 &trigger_pg1);
5198 if (r)
5199 return r;
5200
5201 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5202 MPI2_IOCSTATUS_MASK;
5203 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5204 dinitprintk(ioc,
5205 ioc_err(ioc,
5206 "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
5207 __func__, ioc_status));
5208 return 0;
5209 }
5210
5211 if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
5212 ioc->diag_trigger_master.MasterData |=
5213 le32_to_cpu(
5214 trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
5215 return 0;
5216 }
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230 static int
5231 _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
5232 {
5233 Mpi26DriverTriggerPage0_t trigger_pg0;
5234 int r = 0;
5235 Mpi2ConfigReply_t mpi_reply;
5236 u16 ioc_status;
5237
5238 r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
5239 &trigger_pg0);
5240 if (r)
5241 return r;
5242
5243 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5244 MPI2_IOCSTATUS_MASK;
5245 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5246 return -EFAULT;
5247
5248 *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
5249 return 0;
5250 }
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260 static int
5261 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5262 {
5263 int trigger_flags;
5264 int r;
5265
5266
5267
5268
5269 ioc->diag_trigger_master.MasterData =
5270 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
5271
5272 r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
5273 if (r) {
5274 if (r == -EAGAIN)
5275 return r;
5276
5277
5278
5279
5280 return 0;
5281 }
5282
5283 ioc->supports_trigger_pages = 1;
5284
5285
5286
5287
5288
5289 if ((u16)trigger_flags &
5290 MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
5291 r = _base_get_master_diag_triggers(ioc);
5292 if (r)
5293 return r;
5294 }
5295
5296
5297
5298
5299
5300 if ((u16)trigger_flags &
5301 MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
5302 r = _base_get_event_diag_triggers(ioc);
5303 if (r)
5304 return r;
5305 }
5306
5307
5308
5309
5310
5311 if ((u16)trigger_flags &
5312 MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
5313 r = _base_get_scsi_diag_triggers(ioc);
5314 if (r)
5315 return r;
5316 }
5317
5318
5319
5320
5321 if ((u16)trigger_flags &
5322 MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
5323 r = _base_get_mpi_diag_triggers(ioc);
5324 if (r)
5325 return r;
5326 }
5327 return 0;
5328 }
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338 static void
5339 _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
5340 {
5341
5342 if (ioc->diag_trigger_master.MasterData)
5343 mpt3sas_config_update_driver_trigger_pg1(ioc,
5344 &ioc->diag_trigger_master, 1);
5345
5346 if (ioc->diag_trigger_event.ValidEntries)
5347 mpt3sas_config_update_driver_trigger_pg2(ioc,
5348 &ioc->diag_trigger_event, 1);
5349
5350 if (ioc->diag_trigger_scsi.ValidEntries)
5351 mpt3sas_config_update_driver_trigger_pg3(ioc,
5352 &ioc->diag_trigger_scsi, 1);
5353
5354 if (ioc->diag_trigger_mpi.ValidEntries)
5355 mpt3sas_config_update_driver_trigger_pg4(ioc,
5356 &ioc->diag_trigger_mpi, 1);
5357 }
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367 static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
5368 {
5369 Mpi2ConfigReply_t mpi_reply;
5370 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5371 Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
5372 u16 depth;
5373 int sz;
5374 int rc = 0;
5375
5376 ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5377 ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5378 ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
5379 ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
5380 if (!ioc->is_gen35_ioc)
5381 goto out;
5382
5383 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
5384 sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
5385 if (!sas_iounit_pg1) {
5386 pr_err("%s: failure at %s:%d/%s()!\n",
5387 ioc->name, __FILE__, __LINE__, __func__);
5388 return rc;
5389 }
5390 rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5391 sas_iounit_pg1, sz);
5392 if (rc) {
5393 pr_err("%s: failure at %s:%d/%s()!\n",
5394 ioc->name, __FILE__, __LINE__, __func__);
5395 goto out;
5396 }
5397
5398 depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
5399 ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5400
5401 depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
5402 ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5403
5404 depth = sas_iounit_pg1->SATAMaxQDepth;
5405 ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
5406
5407
5408 rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
5409 &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
5410 if (rc) {
5411 pr_err("%s: failure at %s:%d/%s()!\n",
5412 ioc->name, __FILE__, __LINE__, __func__);
5413 goto out;
5414 }
5415 ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
5416 (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
5417 MPT3SAS_NVME_QUEUE_DEPTH;
5418 out:
5419 dinitprintk(ioc, pr_err(
5420 "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
5421 ioc->max_wideport_qd, ioc->max_narrowport_qd,
5422 ioc->max_sata_qd, ioc->max_nvme_qd));
5423 kfree(sas_iounit_pg1);
5424 return rc;
5425 }
5426
5427
5428
5429
5430
5431 static int
5432 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
5433 {
5434 Mpi2ConfigReply_t mpi_reply;
5435 u32 iounit_pg1_flags;
5436 int tg_flags = 0;
5437 int rc;
5438 ioc->nvme_abort_timeout = 30;
5439
5440 rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
5441 &ioc->manu_pg0);
5442 if (rc)
5443 return rc;
5444 if (ioc->ir_firmware) {
5445 rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
5446 &ioc->manu_pg10);
5447 if (rc)
5448 return rc;
5449 }
5450
5451
5452
5453
5454 rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
5455 &ioc->manu_pg11);
5456 if (rc)
5457 return rc;
5458 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
5459 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
5460 ioc->name);
5461 ioc->manu_pg11.EEDPTagMode &= ~0x3;
5462 ioc->manu_pg11.EEDPTagMode |= 0x1;
5463 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
5464 &ioc->manu_pg11);
5465 }
5466 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
5467 ioc->tm_custom_handling = 1;
5468 else {
5469 ioc->tm_custom_handling = 0;
5470 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
5471 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
5472 else if (ioc->manu_pg11.NVMeAbortTO >
5473 NVME_TASK_ABORT_MAX_TIMEOUT)
5474 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
5475 else
5476 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
5477 }
5478 ioc->time_sync_interval =
5479 ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
5480 if (ioc->time_sync_interval) {
5481 if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
5482 ioc->time_sync_interval =
5483 ioc->time_sync_interval * SECONDS_PER_HOUR;
5484 else
5485 ioc->time_sync_interval =
5486 ioc->time_sync_interval * SECONDS_PER_MIN;
5487 dinitprintk(ioc, ioc_info(ioc,
5488 "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
5489 ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
5490 MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
5491 } else {
5492 if (ioc->is_gen35_ioc)
5493 ioc_warn(ioc,
5494 "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
5495 }
5496 rc = _base_assign_fw_reported_qd(ioc);
5497 if (rc)
5498 return rc;
5499 rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
5500 if (rc)
5501 return rc;
5502 rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
5503 if (rc)
5504 return rc;
5505 rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
5506 if (rc)
5507 return rc;
5508 rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
5509 if (rc)
5510 return rc;
5511 rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5512 if (rc)
5513 return rc;
5514 rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
5515 if (rc)
5516 return rc;
5517 _base_display_ioc_capabilities(ioc);
5518
5519
5520
5521
5522
5523 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
5524 if ((ioc->facts.IOCCapabilities &
5525 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
5526 iounit_pg1_flags &=
5527 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5528 else
5529 iounit_pg1_flags |=
5530 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5531 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
5532 rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5533 if (rc)
5534 return rc;
5535
5536 if (ioc->iounit_pg8.NumSensors)
5537 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
5538 if (ioc->is_aero_ioc) {
5539 rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
5540 if (rc)
5541 return rc;
5542 }
5543 if (ioc->is_gen35_ioc) {
5544 if (ioc->is_driver_loading) {
5545 rc = _base_get_diag_triggers(ioc);
5546 if (rc)
5547 return rc;
5548 } else {
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560 _base_check_for_trigger_pages_support(ioc, &tg_flags);
5561 if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
5562 _base_update_diag_trigger_pages(ioc);
5563 else if (ioc->supports_trigger_pages &&
5564 tg_flags == -EFAULT)
5565 ioc->supports_trigger_pages = 0;
5566 }
5567 }
5568 return 0;
5569 }
5570
5571
5572
5573
5574
5575
5576
5577 void
5578 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
5579 {
5580 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
5581
5582
5583 list_for_each_entry_safe(enclosure_dev,
5584 enclosure_dev_next, &ioc->enclosure_list, list) {
5585 list_del(&enclosure_dev->list);
5586 kfree(enclosure_dev);
5587 }
5588 }
5589
5590
5591
5592
5593
5594
5595
5596 static void
5597 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5598 {
5599 int i = 0;
5600 int j = 0;
5601 int dma_alloc_count = 0;
5602 struct chain_tracker *ct;
5603 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5604
5605 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5606
5607 if (ioc->request) {
5608 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
5609 ioc->request, ioc->request_dma);
5610 dexitprintk(ioc,
5611 ioc_info(ioc, "request_pool(0x%p): free\n",
5612 ioc->request));
5613 ioc->request = NULL;
5614 }
5615
5616 if (ioc->sense) {
5617 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5618 dma_pool_destroy(ioc->sense_dma_pool);
5619 dexitprintk(ioc,
5620 ioc_info(ioc, "sense_pool(0x%p): free\n",
5621 ioc->sense));
5622 ioc->sense = NULL;
5623 }
5624
5625 if (ioc->reply) {
5626 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
5627 dma_pool_destroy(ioc->reply_dma_pool);
5628 dexitprintk(ioc,
5629 ioc_info(ioc, "reply_pool(0x%p): free\n",
5630 ioc->reply));
5631 ioc->reply = NULL;
5632 }
5633
5634 if (ioc->reply_free) {
5635 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
5636 ioc->reply_free_dma);
5637 dma_pool_destroy(ioc->reply_free_dma_pool);
5638 dexitprintk(ioc,
5639 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
5640 ioc->reply_free));
5641 ioc->reply_free = NULL;
5642 }
5643
5644 if (ioc->reply_post) {
5645 dma_alloc_count = DIV_ROUND_UP(count,
5646 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5647 for (i = 0; i < count; i++) {
5648 if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
5649 && dma_alloc_count) {
5650 if (ioc->reply_post[i].reply_post_free) {
5651 dma_pool_free(
5652 ioc->reply_post_free_dma_pool,
5653 ioc->reply_post[i].reply_post_free,
5654 ioc->reply_post[i].reply_post_free_dma);
5655 dexitprintk(ioc, ioc_info(ioc,
5656 "reply_post_free_pool(0x%p): free\n",
5657 ioc->reply_post[i].reply_post_free));
5658 ioc->reply_post[i].reply_post_free =
5659 NULL;
5660 }
5661 --dma_alloc_count;
5662 }
5663 }
5664 dma_pool_destroy(ioc->reply_post_free_dma_pool);
5665 if (ioc->reply_post_free_array &&
5666 ioc->rdpq_array_enable) {
5667 dma_pool_free(ioc->reply_post_free_array_dma_pool,
5668 ioc->reply_post_free_array,
5669 ioc->reply_post_free_array_dma);
5670 ioc->reply_post_free_array = NULL;
5671 }
5672 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
5673 kfree(ioc->reply_post);
5674 }
5675
5676 if (ioc->pcie_sgl_dma_pool) {
5677 for (i = 0; i < ioc->scsiio_depth; i++) {
5678 dma_pool_free(ioc->pcie_sgl_dma_pool,
5679 ioc->pcie_sg_lookup[i].pcie_sgl,
5680 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5681 ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
5682 }
5683 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
5684 }
5685 if (ioc->config_page) {
5686 dexitprintk(ioc,
5687 ioc_info(ioc, "config_page(0x%p): free\n",
5688 ioc->config_page));
5689 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
5690 ioc->config_page, ioc->config_page_dma);
5691 }
5692
5693 kfree(ioc->hpr_lookup);
5694 ioc->hpr_lookup = NULL;
5695 kfree(ioc->internal_lookup);
5696 ioc->internal_lookup = NULL;
5697 if (ioc->chain_lookup) {
5698 for (i = 0; i < ioc->scsiio_depth; i++) {
5699 for (j = ioc->chains_per_prp_buffer;
5700 j < ioc->chains_needed_per_io; j++) {
5701 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5702 if (ct && ct->chain_buffer)
5703 dma_pool_free(ioc->chain_dma_pool,
5704 ct->chain_buffer,
5705 ct->chain_buffer_dma);
5706 }
5707 kfree(ioc->chain_lookup[i].chains_per_smid);
5708 }
5709 dma_pool_destroy(ioc->chain_dma_pool);
5710 kfree(ioc->chain_lookup);
5711 ioc->chain_lookup = NULL;
5712 }
5713
5714 kfree(ioc->io_queue_num);
5715 ioc->io_queue_num = NULL;
5716 }
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727 static int
5728 mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
5729 {
5730 dma_addr_t end_address;
5731
5732 end_address = start_address + pool_sz - 1;
5733
5734 if (upper_32_bits(start_address) == upper_32_bits(end_address))
5735 return 1;
5736 else
5737 return 0;
5738 }
5739
5740
5741
5742
5743
5744
5745
5746 static inline int
5747 _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
5748 {
5749 int reduce_sz = 64;
5750
5751 if ((ioc->hba_queue_depth - reduce_sz) >
5752 (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
5753 ioc->hba_queue_depth -= reduce_sz;
5754 return 0;
5755 } else
5756 return -ENOMEM;
5757 }
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768 static int
5769 _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5770 {
5771 int i = 0, j = 0;
5772 struct chain_tracker *ct;
5773
5774 ioc->pcie_sgl_dma_pool =
5775 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
5776 ioc->page_size, 0);
5777 if (!ioc->pcie_sgl_dma_pool) {
5778 ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5779 return -ENOMEM;
5780 }
5781
5782 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5783 ioc->chains_per_prp_buffer =
5784 min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
5785 for (i = 0; i < ioc->scsiio_depth; i++) {
5786 ioc->pcie_sg_lookup[i].pcie_sgl =
5787 dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5788 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5789 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5790 ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5791 return -EAGAIN;
5792 }
5793
5794 if (!mpt3sas_check_same_4gb_region(
5795 ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
5796 ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5797 ioc->pcie_sg_lookup[i].pcie_sgl,
5798 (unsigned long long)
5799 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5800 ioc->use_32bit_dma = true;
5801 return -EAGAIN;
5802 }
5803
5804 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5805 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5806 ct->chain_buffer =
5807 ioc->pcie_sg_lookup[i].pcie_sgl +
5808 (j * ioc->chain_segment_sz);
5809 ct->chain_buffer_dma =
5810 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5811 (j * ioc->chain_segment_sz);
5812 }
5813 }
5814 dinitprintk(ioc, ioc_info(ioc,
5815 "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5816 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
5817 dinitprintk(ioc, ioc_info(ioc,
5818 "Number of chains can fit in a PRP page(%d)\n",
5819 ioc->chains_per_prp_buffer));
5820 return 0;
5821 }
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831 static int
5832 _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5833 {
5834 int i = 0, j = 0;
5835 struct chain_tracker *ctr;
5836
5837 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5838 ioc->chain_segment_sz, 16, 0);
5839 if (!ioc->chain_dma_pool)
5840 return -ENOMEM;
5841
5842 for (i = 0; i < ioc->scsiio_depth; i++) {
5843 for (j = ioc->chains_per_prp_buffer;
5844 j < ioc->chains_needed_per_io; j++) {
5845 ctr = &ioc->chain_lookup[i].chains_per_smid[j];
5846 ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
5847 GFP_KERNEL, &ctr->chain_buffer_dma);
5848 if (!ctr->chain_buffer)
5849 return -EAGAIN;
5850 if (!mpt3sas_check_same_4gb_region(
5851 ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
5852 ioc_err(ioc,
5853 "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
5854 ctr->chain_buffer,
5855 (unsigned long long)ctr->chain_buffer_dma);
5856 ioc->use_32bit_dma = true;
5857 return -EAGAIN;
5858 }
5859 }
5860 }
5861 dinitprintk(ioc, ioc_info(ioc,
5862 "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
5863 ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
5864 (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
5865 ioc->chain_segment_sz))/1024));
5866 return 0;
5867 }
5868
5869
5870
5871
5872
5873
5874
5875
5876 static int
5877 _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5878 {
5879 ioc->sense_dma_pool =
5880 dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
5881 if (!ioc->sense_dma_pool)
5882 return -ENOMEM;
5883 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
5884 GFP_KERNEL, &ioc->sense_dma);
5885 if (!ioc->sense)
5886 return -EAGAIN;
5887 if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
5888 dinitprintk(ioc, pr_err(
5889 "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
5890 ioc->sense, (unsigned long long) ioc->sense_dma));
5891 ioc->use_32bit_dma = true;
5892 return -EAGAIN;
5893 }
5894 ioc_info(ioc,
5895 "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
5896 ioc->sense, (unsigned long long)ioc->sense_dma,
5897 ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
5898 return 0;
5899 }
5900
5901
5902
5903
5904
5905
5906
5907
5908 static int
5909 _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5910 {
5911
5912 ioc->reply_dma_pool = dma_pool_create("reply pool",
5913 &ioc->pdev->dev, sz, 4, 0);
5914 if (!ioc->reply_dma_pool)
5915 return -ENOMEM;
5916 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5917 &ioc->reply_dma);
5918 if (!ioc->reply)
5919 return -EAGAIN;
5920 if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
5921 dinitprintk(ioc, pr_err(
5922 "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
5923 ioc->reply, (unsigned long long) ioc->reply_dma));
5924 ioc->use_32bit_dma = true;
5925 return -EAGAIN;
5926 }
5927 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5928 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5929 ioc_info(ioc,
5930 "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5931 ioc->reply, (unsigned long long)ioc->reply_dma,
5932 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
5933 return 0;
5934 }
5935
5936
5937
5938
5939
5940
5941
5942
5943 static int
5944 _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5945 {
5946
5947 ioc->reply_free_dma_pool = dma_pool_create(
5948 "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
5949 if (!ioc->reply_free_dma_pool)
5950 return -ENOMEM;
5951 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
5952 GFP_KERNEL, &ioc->reply_free_dma);
5953 if (!ioc->reply_free)
5954 return -EAGAIN;
5955 if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
5956 dinitprintk(ioc,
5957 pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5958 ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
5959 ioc->use_32bit_dma = true;
5960 return -EAGAIN;
5961 }
5962 memset(ioc->reply_free, 0, sz);
5963 dinitprintk(ioc, ioc_info(ioc,
5964 "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5965 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
5966 dinitprintk(ioc, ioc_info(ioc,
5967 "reply_free_dma (0x%llx)\n",
5968 (unsigned long long)ioc->reply_free_dma));
5969 return 0;
5970 }
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980 static int
5981 _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
5982 u32 reply_post_free_array_sz)
5983 {
5984 ioc->reply_post_free_array_dma_pool =
5985 dma_pool_create("reply_post_free_array pool",
5986 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5987 if (!ioc->reply_post_free_array_dma_pool)
5988 return -ENOMEM;
5989 ioc->reply_post_free_array =
5990 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5991 GFP_KERNEL, &ioc->reply_post_free_array_dma);
5992 if (!ioc->reply_post_free_array)
5993 return -EAGAIN;
5994 if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
5995 reply_post_free_array_sz)) {
5996 dinitprintk(ioc, pr_err(
5997 "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5998 ioc->reply_free,
5999 (unsigned long long) ioc->reply_free_dma));
6000 ioc->use_32bit_dma = true;
6001 return -EAGAIN;
6002 }
6003 return 0;
6004 }
6005
6006
6007
6008
6009
6010
6011
6012 static int
6013 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
6014 {
6015 int i = 0;
6016 u32 dma_alloc_count = 0;
6017 int reply_post_free_sz = ioc->reply_post_queue_depth *
6018 sizeof(Mpi2DefaultReplyDescriptor_t);
6019 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
6020
6021 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
6022 GFP_KERNEL);
6023 if (!ioc->reply_post)
6024 return -ENOMEM;
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034 dma_alloc_count = DIV_ROUND_UP(count,
6035 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
6036 ioc->reply_post_free_dma_pool =
6037 dma_pool_create("reply_post_free pool",
6038 &ioc->pdev->dev, sz, 16, 0);
6039 if (!ioc->reply_post_free_dma_pool)
6040 return -ENOMEM;
6041 for (i = 0; i < count; i++) {
6042 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
6043 ioc->reply_post[i].reply_post_free =
6044 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
6045 GFP_KERNEL,
6046 &ioc->reply_post[i].reply_post_free_dma);
6047 if (!ioc->reply_post[i].reply_post_free)
6048 return -ENOMEM;
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058 if (!mpt3sas_check_same_4gb_region(
6059 ioc->reply_post[i].reply_post_free_dma, sz)) {
6060 dinitprintk(ioc,
6061 ioc_err(ioc, "bad Replypost free pool(0x%p)"
6062 "reply_post_free_dma = (0x%llx)\n",
6063 ioc->reply_post[i].reply_post_free,
6064 (unsigned long long)
6065 ioc->reply_post[i].reply_post_free_dma));
6066 return -EAGAIN;
6067 }
6068 dma_alloc_count--;
6069
6070 } else {
6071 ioc->reply_post[i].reply_post_free =
6072 (Mpi2ReplyDescriptorsUnion_t *)
6073 ((long)ioc->reply_post[i-1].reply_post_free
6074 + reply_post_free_sz);
6075 ioc->reply_post[i].reply_post_free_dma =
6076 (dma_addr_t)
6077 (ioc->reply_post[i-1].reply_post_free_dma +
6078 reply_post_free_sz);
6079 }
6080 }
6081 return 0;
6082 }
6083
6084
6085
6086
6087
6088
6089
6090 static int
6091 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
6092 {
6093 struct mpt3sas_facts *facts;
6094 u16 max_sge_elements;
6095 u16 chains_needed_per_io;
6096 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
6097 u32 retry_sz;
6098 u32 rdpq_sz = 0, sense_sz = 0;
6099 u16 max_request_credit, nvme_blocks_needed;
6100 unsigned short sg_tablesize;
6101 u16 sge_size;
6102 int i;
6103 int ret = 0, rc = 0;
6104
6105 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6106
6107
6108 retry_sz = 0;
6109 facts = &ioc->facts;
6110
6111
6112 if (max_sgl_entries != -1)
6113 sg_tablesize = max_sgl_entries;
6114 else {
6115 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
6116 sg_tablesize = MPT2SAS_SG_DEPTH;
6117 else
6118 sg_tablesize = MPT3SAS_SG_DEPTH;
6119 }
6120
6121
6122 if (reset_devices)
6123 sg_tablesize = min_t(unsigned short, sg_tablesize,
6124 MPT_KDUMP_MIN_PHYS_SEGMENTS);
6125
6126 if (ioc->is_mcpu_endpoint)
6127 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
6128 else {
6129 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
6130 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
6131 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
6132 sg_tablesize = min_t(unsigned short, sg_tablesize,
6133 SG_MAX_SEGMENTS);
6134 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
6135 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
6136 }
6137 ioc->shost->sg_tablesize = sg_tablesize;
6138 }
6139
6140 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
6141 (facts->RequestCredit / 4));
6142 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
6143 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
6144 INTERNAL_SCSIIO_CMDS_COUNT)) {
6145 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
6146 facts->RequestCredit);
6147 return -ENOMEM;
6148 }
6149 ioc->internal_depth = 10;
6150 }
6151
6152 ioc->hi_priority_depth = ioc->internal_depth - (5);
6153
6154 if (max_queue_depth != -1 && max_queue_depth != 0) {
6155 max_request_credit = min_t(u16, max_queue_depth +
6156 ioc->internal_depth, facts->RequestCredit);
6157 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
6158 max_request_credit = MAX_HBA_QUEUE_DEPTH;
6159 } else if (reset_devices)
6160 max_request_credit = min_t(u16, facts->RequestCredit,
6161 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
6162 else
6163 max_request_credit = min_t(u16, facts->RequestCredit,
6164 MAX_HBA_QUEUE_DEPTH);
6165
6166
6167
6168
6169
6170 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
6171
6172
6173 ioc->request_sz = facts->IOCRequestFrameSize * 4;
6174
6175
6176 ioc->reply_sz = facts->ReplyFrameSize * 4;
6177
6178
6179 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6180 if (facts->IOCMaxChainSegmentSize)
6181 ioc->chain_segment_sz =
6182 facts->IOCMaxChainSegmentSize *
6183 MAX_CHAIN_ELEMT_SZ;
6184 else
6185
6186 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
6187 MAX_CHAIN_ELEMT_SZ;
6188 } else
6189 ioc->chain_segment_sz = ioc->request_sz;
6190
6191
6192 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
6193
6194 retry_allocation:
6195 total_sz = 0;
6196
6197 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
6198 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
6199 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
6200
6201
6202 max_sge_elements = ioc->chain_segment_sz - sge_size;
6203 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
6204
6205
6206
6207
6208 chains_needed_per_io = ((ioc->shost->sg_tablesize -
6209 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
6210 + 1;
6211 if (chains_needed_per_io > facts->MaxChainDepth) {
6212 chains_needed_per_io = facts->MaxChainDepth;
6213 ioc->shost->sg_tablesize = min_t(u16,
6214 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
6215 * chains_needed_per_io), ioc->shost->sg_tablesize);
6216 }
6217 ioc->chains_needed_per_io = chains_needed_per_io;
6218
6219
6220 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6221
6222
6223 if (ioc->is_mcpu_endpoint)
6224 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
6225 else {
6226
6227 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
6228 ioc->reply_free_queue_depth + 1;
6229
6230 if (ioc->reply_post_queue_depth % 16)
6231 ioc->reply_post_queue_depth += 16 -
6232 (ioc->reply_post_queue_depth % 16);
6233 }
6234
6235 if (ioc->reply_post_queue_depth >
6236 facts->MaxReplyDescriptorPostQueueDepth) {
6237 ioc->reply_post_queue_depth =
6238 facts->MaxReplyDescriptorPostQueueDepth -
6239 (facts->MaxReplyDescriptorPostQueueDepth % 16);
6240 ioc->hba_queue_depth =
6241 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
6242 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6243 }
6244
6245 ioc_info(ioc,
6246 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
6247 "sge_per_io(%d), chains_per_io(%d)\n",
6248 ioc->max_sges_in_main_message,
6249 ioc->max_sges_in_chain_message,
6250 ioc->shost->sg_tablesize,
6251 ioc->chains_needed_per_io);
6252
6253
6254 reply_post_free_sz = ioc->reply_post_queue_depth *
6255 sizeof(Mpi2DefaultReplyDescriptor_t);
6256 rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
6257 if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
6258 || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
6259 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
6260 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
6261 if (ret == -EAGAIN) {
6262
6263
6264
6265
6266 _base_release_memory_pools(ioc);
6267 ioc->use_32bit_dma = true;
6268 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6269 ioc_err(ioc,
6270 "32 DMA mask failed %s\n", pci_name(ioc->pdev));
6271 return -ENODEV;
6272 }
6273 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
6274 return -ENOMEM;
6275 } else if (ret == -ENOMEM)
6276 return -ENOMEM;
6277 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
6278 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
6279 ioc->scsiio_depth = ioc->hba_queue_depth -
6280 ioc->hi_priority_depth - ioc->internal_depth;
6281
6282
6283
6284
6285 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
6286 dinitprintk(ioc,
6287 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
6288 ioc->shost->can_queue));
6289
6290
6291
6292
6293 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
6294 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
6295
6296
6297 sz += (ioc->hi_priority_depth * ioc->request_sz);
6298
6299
6300 sz += (ioc->internal_depth * ioc->request_sz);
6301
6302 ioc->request_dma_sz = sz;
6303 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
6304 &ioc->request_dma, GFP_KERNEL);
6305 if (!ioc->request) {
6306 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
6307 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6308 ioc->request_sz, sz / 1024);
6309 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
6310 goto out;
6311 retry_sz = 64;
6312 ioc->hba_queue_depth -= retry_sz;
6313 _base_release_memory_pools(ioc);
6314 goto retry_allocation;
6315 }
6316
6317 if (retry_sz)
6318 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
6319 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6320 ioc->request_sz, sz / 1024);
6321
6322
6323 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
6324 ioc->request_sz);
6325 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
6326 ioc->request_sz);
6327
6328
6329 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
6330 ioc->request_sz);
6331 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
6332 ioc->request_sz);
6333
6334 ioc_info(ioc,
6335 "request pool(0x%p) - dma(0x%llx): "
6336 "depth(%d), frame_size(%d), pool_size(%d kB)\n",
6337 ioc->request, (unsigned long long) ioc->request_dma,
6338 ioc->hba_queue_depth, ioc->request_sz,
6339 (ioc->hba_queue_depth * ioc->request_sz) / 1024);
6340
6341 total_sz += sz;
6342
6343 dinitprintk(ioc,
6344 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
6345 ioc->request, ioc->scsiio_depth));
6346
6347 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
6348 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
6349 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
6350 if (!ioc->chain_lookup) {
6351 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
6352 goto out;
6353 }
6354
6355 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
6356 for (i = 0; i < ioc->scsiio_depth; i++) {
6357 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
6358 if (!ioc->chain_lookup[i].chains_per_smid) {
6359 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
6360 goto out;
6361 }
6362 }
6363
6364
6365 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
6366 sizeof(struct request_tracker), GFP_KERNEL);
6367 if (!ioc->hpr_lookup) {
6368 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
6369 goto out;
6370 }
6371 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
6372 dinitprintk(ioc,
6373 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6374 ioc->hi_priority,
6375 ioc->hi_priority_depth, ioc->hi_priority_smid));
6376
6377
6378 ioc->internal_lookup = kcalloc(ioc->internal_depth,
6379 sizeof(struct request_tracker), GFP_KERNEL);
6380 if (!ioc->internal_lookup) {
6381 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
6382 goto out;
6383 }
6384 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
6385 dinitprintk(ioc,
6386 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
6387 ioc->internal,
6388 ioc->internal_depth, ioc->internal_smid));
6389
6390 ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
6391 sizeof(u16), GFP_KERNEL);
6392 if (!ioc->io_queue_num)
6393 goto out;
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408 ioc->chains_per_prp_buffer = 0;
6409 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
6410 nvme_blocks_needed =
6411 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
6412 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
6413 nvme_blocks_needed++;
6414
6415 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
6416 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
6417 if (!ioc->pcie_sg_lookup) {
6418 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
6419 goto out;
6420 }
6421 sz = nvme_blocks_needed * ioc->page_size;
6422 rc = _base_allocate_pcie_sgl_pool(ioc, sz);
6423 if (rc == -ENOMEM)
6424 return -ENOMEM;
6425 else if (rc == -EAGAIN)
6426 goto try_32bit_dma;
6427 total_sz += sz * ioc->scsiio_depth;
6428 }
6429
6430 rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
6431 if (rc == -ENOMEM)
6432 return -ENOMEM;
6433 else if (rc == -EAGAIN)
6434 goto try_32bit_dma;
6435 total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
6436 ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
6437 dinitprintk(ioc,
6438 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6439 ioc->chain_depth, ioc->chain_segment_sz,
6440 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
6441
6442 sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
6443 rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
6444 if (rc == -ENOMEM)
6445 return -ENOMEM;
6446 else if (rc == -EAGAIN)
6447 goto try_32bit_dma;
6448 total_sz += sense_sz;
6449 ioc_info(ioc,
6450 "sense pool(0x%p)- dma(0x%llx): depth(%d),"
6451 "element_size(%d), pool_size(%d kB)\n",
6452 ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
6453 SCSI_SENSE_BUFFERSIZE, sz / 1024);
6454
6455 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
6456 rc = _base_allocate_reply_pool(ioc, sz);
6457 if (rc == -ENOMEM)
6458 return -ENOMEM;
6459 else if (rc == -EAGAIN)
6460 goto try_32bit_dma;
6461 total_sz += sz;
6462
6463
6464 sz = ioc->reply_free_queue_depth * 4;
6465 rc = _base_allocate_reply_free_dma_pool(ioc, sz);
6466 if (rc == -ENOMEM)
6467 return -ENOMEM;
6468 else if (rc == -EAGAIN)
6469 goto try_32bit_dma;
6470 dinitprintk(ioc,
6471 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
6472 (unsigned long long)ioc->reply_free_dma));
6473 total_sz += sz;
6474 if (ioc->rdpq_array_enable) {
6475 reply_post_free_array_sz = ioc->reply_queue_count *
6476 sizeof(Mpi2IOCInitRDPQArrayEntry);
6477 rc = _base_allocate_reply_post_free_array(ioc,
6478 reply_post_free_array_sz);
6479 if (rc == -ENOMEM)
6480 return -ENOMEM;
6481 else if (rc == -EAGAIN)
6482 goto try_32bit_dma;
6483 }
6484 ioc->config_page_sz = 512;
6485 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
6486 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
6487 if (!ioc->config_page) {
6488 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
6489 goto out;
6490 }
6491
6492 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6493 ioc->config_page, (unsigned long long)ioc->config_page_dma,
6494 ioc->config_page_sz);
6495 total_sz += ioc->config_page_sz;
6496
6497 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
6498 total_sz / 1024);
6499 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6500 ioc->shost->can_queue, facts->RequestCredit);
6501 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
6502 ioc->shost->sg_tablesize);
6503 return 0;
6504
6505 try_32bit_dma:
6506 _base_release_memory_pools(ioc);
6507 if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
6508
6509 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6510 pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
6511 pci_name(ioc->pdev));
6512 return -ENODEV;
6513 }
6514 } else if (_base_reduce_hba_queue_depth(ioc) != 0)
6515 return -ENOMEM;
6516 goto retry_allocation;
6517
6518 out:
6519 return -ENOMEM;
6520 }
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530 u32
6531 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
6532 {
6533 u32 s, sc;
6534
6535 s = ioc->base_readl(&ioc->chip->Doorbell);
6536 sc = s & MPI2_IOC_STATE_MASK;
6537 return cooked ? sc : s;
6538 }
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548 static int
6549 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
6550 {
6551 u32 count, cntdn;
6552 u32 current_state;
6553
6554 count = 0;
6555 cntdn = 1000 * timeout;
6556 do {
6557 current_state = mpt3sas_base_get_iocstate(ioc, 1);
6558 if (current_state == ioc_state)
6559 return 0;
6560 if (count && current_state == MPI2_IOC_STATE_FAULT)
6561 break;
6562 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
6563 break;
6564
6565 usleep_range(1000, 1500);
6566 count++;
6567 } while (--cntdn);
6568
6569 return current_state;
6570 }
6571
6572
6573
6574
6575
6576
6577
6578 static inline void
6579 _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
6580 {
6581 unsigned int i, sz = 256;
6582 u32 __iomem *reg = (u32 __iomem *)ioc->chip;
6583
6584 ioc_info(ioc, "System Register set:\n");
6585 for (i = 0; i < (sz / sizeof(u32)); i++)
6586 pr_info("%08x: %08x\n", (i * 4), readl(®[i]));
6587 }
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600 static int
6601 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6602 {
6603 u32 cntdn, count;
6604 u32 int_status;
6605
6606 count = 0;
6607 cntdn = 1000 * timeout;
6608 do {
6609 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6610 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6611 dhsprintk(ioc,
6612 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6613 __func__, count, timeout));
6614 return 0;
6615 }
6616
6617 usleep_range(1000, 1500);
6618 count++;
6619 } while (--cntdn);
6620
6621 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6622 __func__, count, int_status);
6623 return -EFAULT;
6624 }
6625
6626 static int
6627 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6628 {
6629 u32 cntdn, count;
6630 u32 int_status;
6631
6632 count = 0;
6633 cntdn = 2000 * timeout;
6634 do {
6635 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6636 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6637 dhsprintk(ioc,
6638 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6639 __func__, count, timeout));
6640 return 0;
6641 }
6642
6643 udelay(500);
6644 count++;
6645 } while (--cntdn);
6646
6647 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6648 __func__, count, int_status);
6649 return -EFAULT;
6650
6651 }
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663 static int
6664 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
6665 {
6666 u32 cntdn, count;
6667 u32 int_status;
6668 u32 doorbell;
6669
6670 count = 0;
6671 cntdn = 1000 * timeout;
6672 do {
6673 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6674 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
6675 dhsprintk(ioc,
6676 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6677 __func__, count, timeout));
6678 return 0;
6679 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6680 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
6681 if ((doorbell & MPI2_IOC_STATE_MASK) ==
6682 MPI2_IOC_STATE_FAULT) {
6683 mpt3sas_print_fault_code(ioc, doorbell);
6684 return -EFAULT;
6685 }
6686 if ((doorbell & MPI2_IOC_STATE_MASK) ==
6687 MPI2_IOC_STATE_COREDUMP) {
6688 mpt3sas_print_coredump_info(ioc, doorbell);
6689 return -EFAULT;
6690 }
6691 } else if (int_status == 0xFFFFFFFF)
6692 goto out;
6693
6694 usleep_range(1000, 1500);
6695 count++;
6696 } while (--cntdn);
6697
6698 out:
6699 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6700 __func__, count, int_status);
6701 return -EFAULT;
6702 }
6703
6704
6705
6706
6707
6708
6709
6710
6711 static int
6712 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
6713 {
6714 u32 cntdn, count;
6715 u32 doorbell_reg;
6716
6717 count = 0;
6718 cntdn = 1000 * timeout;
6719 do {
6720 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
6721 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
6722 dhsprintk(ioc,
6723 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6724 __func__, count, timeout));
6725 return 0;
6726 }
6727
6728 usleep_range(1000, 1500);
6729 count++;
6730 } while (--cntdn);
6731
6732 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6733 __func__, count, doorbell_reg);
6734 return -EFAULT;
6735 }
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745 static int
6746 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
6747 {
6748 u32 ioc_state;
6749 int r = 0;
6750 unsigned long flags;
6751
6752 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
6753 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
6754 return -EFAULT;
6755 }
6756
6757 if (!(ioc->facts.IOCCapabilities &
6758 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
6759 return -EFAULT;
6760
6761 ioc_info(ioc, "sending message unit reset !!\n");
6762
6763 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
6764 &ioc->chip->Doorbell);
6765 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
6766 r = -EFAULT;
6767 goto out;
6768 }
6769
6770 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6771 if (ioc_state) {
6772 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6773 __func__, ioc_state);
6774 r = -EFAULT;
6775 goto out;
6776 }
6777 out:
6778 if (r != 0) {
6779 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6780 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6781
6782
6783
6784
6785 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6786 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
6787 ioc->fault_reset_work_q == NULL)) {
6788 spin_unlock_irqrestore(
6789 &ioc->ioc_reset_in_progress_lock, flags);
6790 mpt3sas_print_coredump_info(ioc, ioc_state);
6791 mpt3sas_base_wait_for_coredump_completion(ioc,
6792 __func__);
6793 spin_lock_irqsave(
6794 &ioc->ioc_reset_in_progress_lock, flags);
6795 }
6796 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6797 }
6798 ioc_info(ioc, "message unit reset: %s\n",
6799 r == 0 ? "SUCCESS" : "FAILED");
6800 return r;
6801 }
6802
6803
6804
6805
6806
6807
6808
6809
6810
6811
6812
6813 int
6814 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
6815 {
6816 int wait_state_count = 0;
6817 u32 ioc_state;
6818
6819 do {
6820 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
6821 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
6822 break;
6823
6824
6825
6826
6827
6828
6829
6830
6831 if (ioc->is_driver_loading)
6832 return -ETIME;
6833
6834 ssleep(1);
6835 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
6836 __func__, ++wait_state_count);
6837 } while (--timeout);
6838 if (!timeout) {
6839 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
6840 return -EFAULT;
6841 }
6842 if (wait_state_count)
6843 ioc_info(ioc, "ioc is operational\n");
6844 return 0;
6845 }
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858 static int
6859 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
6860 u32 *request, int reply_bytes, u16 *reply, int timeout)
6861 {
6862 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
6863 int i;
6864 u8 failed;
6865 __le32 *mfp;
6866
6867
6868 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
6869 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
6870 return -EFAULT;
6871 }
6872
6873
6874 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
6875 MPI2_HIS_IOC2SYS_DB_STATUS)
6876 writel(0, &ioc->chip->HostInterruptStatus);
6877
6878
6879 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
6880 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
6881 &ioc->chip->Doorbell);
6882
6883 if ((_base_spin_on_doorbell_int(ioc, 5))) {
6884 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6885 __LINE__);
6886 return -EFAULT;
6887 }
6888 writel(0, &ioc->chip->HostInterruptStatus);
6889
6890 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
6891 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
6892 __LINE__);
6893 return -EFAULT;
6894 }
6895
6896
6897 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
6898 writel(request[i], &ioc->chip->Doorbell);
6899 if ((_base_wait_for_doorbell_ack(ioc, 5)))
6900 failed = 1;
6901 }
6902
6903 if (failed) {
6904 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
6905 __LINE__);
6906 return -EFAULT;
6907 }
6908
6909
6910 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
6911 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6912 __LINE__);
6913 return -EFAULT;
6914 }
6915
6916
6917 reply[0] = ioc->base_readl(&ioc->chip->Doorbell)
6918 & MPI2_DOORBELL_DATA_MASK;
6919 writel(0, &ioc->chip->HostInterruptStatus);
6920 if ((_base_wait_for_doorbell_int(ioc, 5))) {
6921 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6922 __LINE__);
6923 return -EFAULT;
6924 }
6925 reply[1] = ioc->base_readl(&ioc->chip->Doorbell)
6926 & MPI2_DOORBELL_DATA_MASK;
6927 writel(0, &ioc->chip->HostInterruptStatus);
6928
6929 for (i = 2; i < default_reply->MsgLength * 2; i++) {
6930 if ((_base_wait_for_doorbell_int(ioc, 5))) {
6931 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6932 __LINE__);
6933 return -EFAULT;
6934 }
6935 if (i >= reply_bytes/2)
6936 ioc->base_readl(&ioc->chip->Doorbell);
6937 else
6938 reply[i] = ioc->base_readl(&ioc->chip->Doorbell)
6939 & MPI2_DOORBELL_DATA_MASK;
6940 writel(0, &ioc->chip->HostInterruptStatus);
6941 }
6942
6943 _base_wait_for_doorbell_int(ioc, 5);
6944 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
6945 dhsprintk(ioc,
6946 ioc_info(ioc, "doorbell is in use (line=%d)\n",
6947 __LINE__));
6948 }
6949 writel(0, &ioc->chip->HostInterruptStatus);
6950
6951 if (ioc->logging_level & MPT_DEBUG_INIT) {
6952 mfp = (__le32 *)reply;
6953 pr_info("\toffset:data\n");
6954 for (i = 0; i < reply_bytes/4; i++)
6955 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6956 le32_to_cpu(mfp[i]));
6957 }
6958 return 0;
6959 }
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975 int
6976 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
6977 Mpi2SasIoUnitControlReply_t *mpi_reply,
6978 Mpi2SasIoUnitControlRequest_t *mpi_request)
6979 {
6980 u16 smid;
6981 u8 issue_reset = 0;
6982 int rc;
6983 void *request;
6984
6985 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6986
6987 mutex_lock(&ioc->base_cmds.mutex);
6988
6989 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6990 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6991 rc = -EAGAIN;
6992 goto out;
6993 }
6994
6995 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6996 if (rc)
6997 goto out;
6998
6999 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7000 if (!smid) {
7001 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7002 rc = -EAGAIN;
7003 goto out;
7004 }
7005
7006 rc = 0;
7007 ioc->base_cmds.status = MPT3_CMD_PENDING;
7008 request = mpt3sas_base_get_msg_frame(ioc, smid);
7009 ioc->base_cmds.smid = smid;
7010 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
7011 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
7012 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
7013 ioc->ioc_link_reset_in_progress = 1;
7014 init_completion(&ioc->base_cmds.done);
7015 ioc->put_smid_default(ioc, smid);
7016 wait_for_completion_timeout(&ioc->base_cmds.done,
7017 msecs_to_jiffies(10000));
7018 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
7019 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
7020 ioc->ioc_link_reset_in_progress)
7021 ioc->ioc_link_reset_in_progress = 0;
7022 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7023 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
7024 mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
7025 issue_reset);
7026 goto issue_host_reset;
7027 }
7028 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7029 memcpy(mpi_reply, ioc->base_cmds.reply,
7030 sizeof(Mpi2SasIoUnitControlReply_t));
7031 else
7032 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
7033 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7034 goto out;
7035
7036 issue_host_reset:
7037 if (issue_reset)
7038 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7039 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7040 rc = -EFAULT;
7041 out:
7042 mutex_unlock(&ioc->base_cmds.mutex);
7043 return rc;
7044 }
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057 int
7058 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
7059 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
7060 {
7061 u16 smid;
7062 u8 issue_reset = 0;
7063 int rc;
7064 void *request;
7065
7066 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7067
7068 mutex_lock(&ioc->base_cmds.mutex);
7069
7070 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
7071 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
7072 rc = -EAGAIN;
7073 goto out;
7074 }
7075
7076 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
7077 if (rc)
7078 goto out;
7079
7080 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7081 if (!smid) {
7082 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7083 rc = -EAGAIN;
7084 goto out;
7085 }
7086
7087 rc = 0;
7088 ioc->base_cmds.status = MPT3_CMD_PENDING;
7089 request = mpt3sas_base_get_msg_frame(ioc, smid);
7090 ioc->base_cmds.smid = smid;
7091 memset(request, 0, ioc->request_sz);
7092 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
7093 init_completion(&ioc->base_cmds.done);
7094 ioc->put_smid_default(ioc, smid);
7095 wait_for_completion_timeout(&ioc->base_cmds.done,
7096 msecs_to_jiffies(10000));
7097 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7098 mpt3sas_check_cmd_timeout(ioc,
7099 ioc->base_cmds.status, mpi_request,
7100 sizeof(Mpi2SepRequest_t)/4, issue_reset);
7101 goto issue_host_reset;
7102 }
7103 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7104 memcpy(mpi_reply, ioc->base_cmds.reply,
7105 sizeof(Mpi2SepReply_t));
7106 else
7107 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
7108 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7109 goto out;
7110
7111 issue_host_reset:
7112 if (issue_reset)
7113 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7114 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7115 rc = -EFAULT;
7116 out:
7117 mutex_unlock(&ioc->base_cmds.mutex);
7118 return rc;
7119 }
7120
7121
7122
7123
7124
7125
7126
7127
7128 static int
7129 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
7130 {
7131 Mpi2PortFactsRequest_t mpi_request;
7132 Mpi2PortFactsReply_t mpi_reply;
7133 struct mpt3sas_port_facts *pfacts;
7134 int mpi_reply_sz, mpi_request_sz, r;
7135
7136 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7137
7138 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
7139 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
7140 memset(&mpi_request, 0, mpi_request_sz);
7141 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
7142 mpi_request.PortNumber = port;
7143 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7144 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
7145
7146 if (r != 0) {
7147 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7148 return r;
7149 }
7150
7151 pfacts = &ioc->pfacts[port];
7152 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
7153 pfacts->PortNumber = mpi_reply.PortNumber;
7154 pfacts->VP_ID = mpi_reply.VP_ID;
7155 pfacts->VF_ID = mpi_reply.VF_ID;
7156 pfacts->MaxPostedCmdBuffers =
7157 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
7158
7159 return 0;
7160 }
7161
7162
7163
7164
7165
7166
7167
7168
7169 static int
7170 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
7171 {
7172 u32 ioc_state;
7173 int rc;
7174
7175 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7176
7177 if (ioc->pci_error_recovery) {
7178 dfailprintk(ioc,
7179 ioc_info(ioc, "%s: host in pci error recovery\n",
7180 __func__));
7181 return -EFAULT;
7182 }
7183
7184 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7185 dhsprintk(ioc,
7186 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7187 __func__, ioc_state));
7188
7189 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
7190 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
7191 return 0;
7192
7193 if (ioc_state & MPI2_DOORBELL_USED) {
7194 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
7195 goto issue_diag_reset;
7196 }
7197
7198 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
7199 mpt3sas_print_fault_code(ioc, ioc_state &
7200 MPI2_DOORBELL_DATA_MASK);
7201 goto issue_diag_reset;
7202 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
7203 MPI2_IOC_STATE_COREDUMP) {
7204 ioc_info(ioc,
7205 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
7206 __func__, ioc_state);
7207 return -EFAULT;
7208 }
7209
7210 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
7211 if (ioc_state) {
7212 dfailprintk(ioc,
7213 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7214 __func__, ioc_state));
7215 return -EFAULT;
7216 }
7217
7218 issue_diag_reset:
7219 rc = _base_diag_reset(ioc);
7220 return rc;
7221 }
7222
7223
7224
7225
7226
7227
7228
7229 static int
7230 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
7231 {
7232 Mpi2IOCFactsRequest_t mpi_request;
7233 Mpi2IOCFactsReply_t mpi_reply;
7234 struct mpt3sas_facts *facts;
7235 int mpi_reply_sz, mpi_request_sz, r;
7236
7237 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7238
7239 r = _base_wait_for_iocstate(ioc, 10);
7240 if (r) {
7241 dfailprintk(ioc,
7242 ioc_info(ioc, "%s: failed getting to correct state\n",
7243 __func__));
7244 return r;
7245 }
7246 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
7247 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
7248 memset(&mpi_request, 0, mpi_request_sz);
7249 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
7250 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7251 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
7252
7253 if (r != 0) {
7254 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7255 return r;
7256 }
7257
7258 facts = &ioc->facts;
7259 memset(facts, 0, sizeof(struct mpt3sas_facts));
7260 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
7261 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
7262 facts->VP_ID = mpi_reply.VP_ID;
7263 facts->VF_ID = mpi_reply.VF_ID;
7264 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
7265 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
7266 facts->WhoInit = mpi_reply.WhoInit;
7267 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
7268 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
7269 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
7270 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
7271 ioc->combined_reply_queue = 0;
7272 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
7273 facts->MaxReplyDescriptorPostQueueDepth =
7274 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
7275 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
7276 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
7277 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
7278 ioc->ir_firmware = 1;
7279 if ((facts->IOCCapabilities &
7280 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
7281 ioc->rdpq_array_capable = 1;
7282 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
7283 && ioc->is_aero_ioc)
7284 ioc->atomic_desc_capable = 1;
7285 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
7286 facts->IOCRequestFrameSize =
7287 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
7288 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7289 facts->IOCMaxChainSegmentSize =
7290 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
7291 }
7292 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
7293 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
7294 ioc->shost->max_id = -1;
7295 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
7296 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
7297 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
7298 facts->HighPriorityCredit =
7299 le16_to_cpu(mpi_reply.HighPriorityCredit);
7300 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
7301 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
7302 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
7303
7304
7305
7306
7307 ioc->page_size = 1 << facts->CurrentHostPageSize;
7308 if (ioc->page_size == 1) {
7309 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
7310 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
7311 }
7312 dinitprintk(ioc,
7313 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
7314 facts->CurrentHostPageSize));
7315
7316 dinitprintk(ioc,
7317 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
7318 facts->RequestCredit, facts->MaxChainDepth));
7319 dinitprintk(ioc,
7320 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
7321 facts->IOCRequestFrameSize * 4,
7322 facts->ReplyFrameSize * 4));
7323 return 0;
7324 }
7325
7326
7327
7328
7329
7330
7331
7332 static int
7333 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
7334 {
7335 Mpi2IOCInitRequest_t mpi_request;
7336 Mpi2IOCInitReply_t mpi_reply;
7337 int i, r = 0;
7338 ktime_t current_time;
7339 u16 ioc_status;
7340 u32 reply_post_free_array_sz = 0;
7341
7342 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7343
7344 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
7345 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
7346 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
7347 mpi_request.VF_ID = 0;
7348 mpi_request.VP_ID = 0;
7349 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
7350 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
7351 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
7352
7353 if (_base_is_controller_msix_enabled(ioc))
7354 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
7355 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
7356 mpi_request.ReplyDescriptorPostQueueDepth =
7357 cpu_to_le16(ioc->reply_post_queue_depth);
7358 mpi_request.ReplyFreeQueueDepth =
7359 cpu_to_le16(ioc->reply_free_queue_depth);
7360
7361 mpi_request.SenseBufferAddressHigh =
7362 cpu_to_le32((u64)ioc->sense_dma >> 32);
7363 mpi_request.SystemReplyAddressHigh =
7364 cpu_to_le32((u64)ioc->reply_dma >> 32);
7365 mpi_request.SystemRequestFrameBaseAddress =
7366 cpu_to_le64((u64)ioc->request_dma);
7367 mpi_request.ReplyFreeQueueAddress =
7368 cpu_to_le64((u64)ioc->reply_free_dma);
7369
7370 if (ioc->rdpq_array_enable) {
7371 reply_post_free_array_sz = ioc->reply_queue_count *
7372 sizeof(Mpi2IOCInitRDPQArrayEntry);
7373 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
7374 for (i = 0; i < ioc->reply_queue_count; i++)
7375 ioc->reply_post_free_array[i].RDPQBaseAddress =
7376 cpu_to_le64(
7377 (u64)ioc->reply_post[i].reply_post_free_dma);
7378 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
7379 mpi_request.ReplyDescriptorPostQueueAddress =
7380 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
7381 } else {
7382 mpi_request.ReplyDescriptorPostQueueAddress =
7383 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
7384 }
7385
7386
7387
7388
7389 mpi_request.ConfigurationFlags |=
7390 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
7391
7392
7393
7394
7395 current_time = ktime_get_real();
7396 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
7397
7398 if (ioc->logging_level & MPT_DEBUG_INIT) {
7399 __le32 *mfp;
7400 int i;
7401
7402 mfp = (__le32 *)&mpi_request;
7403 ioc_info(ioc, "\toffset:data\n");
7404 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
7405 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7406 le32_to_cpu(mfp[i]));
7407 }
7408
7409 r = _base_handshake_req_reply_wait(ioc,
7410 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
7411 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
7412
7413 if (r != 0) {
7414 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7415 return r;
7416 }
7417
7418 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7419 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
7420 mpi_reply.IOCLogInfo) {
7421 ioc_err(ioc, "%s: failed\n", __func__);
7422 r = -EIO;
7423 }
7424
7425
7426 ioc->timestamp_update_count = 0;
7427 return r;
7428 }
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440 u8
7441 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
7442 u32 reply)
7443 {
7444 MPI2DefaultReply_t *mpi_reply;
7445 u16 ioc_status;
7446
7447 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
7448 return 1;
7449
7450 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7451 if (!mpi_reply)
7452 return 1;
7453
7454 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
7455 return 1;
7456
7457 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
7458 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
7459 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
7460 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
7461 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7462 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7463 ioc->port_enable_failed = 1;
7464
7465 if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
7466 ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
7467 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
7468 mpt3sas_port_enable_complete(ioc);
7469 return 1;
7470 } else {
7471 ioc->start_scan_failed = ioc_status;
7472 ioc->start_scan = 0;
7473 return 1;
7474 }
7475 }
7476 complete(&ioc->port_enable_cmds.done);
7477 return 1;
7478 }
7479
7480
7481
7482
7483
7484
7485
7486 static int
7487 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
7488 {
7489 Mpi2PortEnableRequest_t *mpi_request;
7490 Mpi2PortEnableReply_t *mpi_reply;
7491 int r = 0;
7492 u16 smid;
7493 u16 ioc_status;
7494
7495 ioc_info(ioc, "sending port enable !!\n");
7496
7497 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7498 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7499 return -EAGAIN;
7500 }
7501
7502 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7503 if (!smid) {
7504 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7505 return -EAGAIN;
7506 }
7507
7508 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7509 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7510 ioc->port_enable_cmds.smid = smid;
7511 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7512 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7513
7514 init_completion(&ioc->port_enable_cmds.done);
7515 ioc->put_smid_default(ioc, smid);
7516 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
7517 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
7518 ioc_err(ioc, "%s: timeout\n", __func__);
7519 _debug_dump_mf(mpi_request,
7520 sizeof(Mpi2PortEnableRequest_t)/4);
7521 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
7522 r = -EFAULT;
7523 else
7524 r = -ETIME;
7525 goto out;
7526 }
7527
7528 mpi_reply = ioc->port_enable_cmds.reply;
7529 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7530 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7531 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
7532 __func__, ioc_status);
7533 r = -EFAULT;
7534 goto out;
7535 }
7536
7537 out:
7538 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7539 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
7540 return r;
7541 }
7542
7543
7544
7545
7546
7547
7548
7549 int
7550 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
7551 {
7552 Mpi2PortEnableRequest_t *mpi_request;
7553 u16 smid;
7554
7555 ioc_info(ioc, "sending port enable !!\n");
7556
7557 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7558 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7559 return -EAGAIN;
7560 }
7561
7562 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7563 if (!smid) {
7564 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7565 return -EAGAIN;
7566 }
7567 ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
7568 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7569 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
7570 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7571 ioc->port_enable_cmds.smid = smid;
7572 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7573 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7574
7575 ioc->put_smid_default(ioc, smid);
7576 return 0;
7577 }
7578
7579
7580
7581
7582
7583
7584
7585
7586
7587
7588 static int
7589 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
7590 {
7591
7592
7593
7594
7595
7596
7597 if (ioc->ir_firmware)
7598 return 1;
7599
7600
7601 if (!ioc->bios_pg3.BiosVersion)
7602 return 0;
7603
7604
7605
7606
7607
7608
7609
7610
7611 if ((ioc->bios_pg2.CurrentBootDeviceForm &
7612 MPI2_BIOSPAGE2_FORM_MASK) ==
7613 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7614
7615 (ioc->bios_pg2.ReqBootDeviceForm &
7616 MPI2_BIOSPAGE2_FORM_MASK) ==
7617 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7618
7619 (ioc->bios_pg2.ReqAltBootDeviceForm &
7620 MPI2_BIOSPAGE2_FORM_MASK) ==
7621 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
7622 return 0;
7623
7624 return 1;
7625 }
7626
7627
7628
7629
7630
7631
7632
7633
7634 static void
7635 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
7636 {
7637 u32 desired_event;
7638
7639 if (event >= 128)
7640 return;
7641
7642 desired_event = (1 << (event % 32));
7643
7644 if (event < 32)
7645 ioc->event_masks[0] &= ~desired_event;
7646 else if (event < 64)
7647 ioc->event_masks[1] &= ~desired_event;
7648 else if (event < 96)
7649 ioc->event_masks[2] &= ~desired_event;
7650 else if (event < 128)
7651 ioc->event_masks[3] &= ~desired_event;
7652 }
7653
7654
7655
7656
7657
7658
7659
7660 static int
7661 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
7662 {
7663 Mpi2EventNotificationRequest_t *mpi_request;
7664 u16 smid;
7665 int r = 0;
7666 int i, issue_diag_reset = 0;
7667
7668 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7669
7670 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7671 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7672 return -EAGAIN;
7673 }
7674
7675 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7676 if (!smid) {
7677 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7678 return -EAGAIN;
7679 }
7680 ioc->base_cmds.status = MPT3_CMD_PENDING;
7681 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7682 ioc->base_cmds.smid = smid;
7683 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
7684 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
7685 mpi_request->VF_ID = 0;
7686 mpi_request->VP_ID = 0;
7687 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7688 mpi_request->EventMasks[i] =
7689 cpu_to_le32(ioc->event_masks[i]);
7690 init_completion(&ioc->base_cmds.done);
7691 ioc->put_smid_default(ioc, smid);
7692 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
7693 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7694 ioc_err(ioc, "%s: timeout\n", __func__);
7695 _debug_dump_mf(mpi_request,
7696 sizeof(Mpi2EventNotificationRequest_t)/4);
7697 if (ioc->base_cmds.status & MPT3_CMD_RESET)
7698 r = -EFAULT;
7699 else
7700 issue_diag_reset = 1;
7701
7702 } else
7703 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
7704 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7705
7706 if (issue_diag_reset) {
7707 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
7708 return -EFAULT;
7709 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
7710 return -EFAULT;
7711 r = -EAGAIN;
7712 }
7713 return r;
7714 }
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724 void
7725 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
7726 {
7727 int i, j;
7728 u32 event_mask, desired_event;
7729 u8 send_update_to_fw;
7730
7731 for (i = 0, send_update_to_fw = 0; i <
7732 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
7733 event_mask = ~event_type[i];
7734 desired_event = 1;
7735 for (j = 0; j < 32; j++) {
7736 if (!(event_mask & desired_event) &&
7737 (ioc->event_masks[i] & desired_event)) {
7738 ioc->event_masks[i] &= ~desired_event;
7739 send_update_to_fw = 1;
7740 }
7741 desired_event = (desired_event << 1);
7742 }
7743 }
7744
7745 if (!send_update_to_fw)
7746 return;
7747
7748 mutex_lock(&ioc->base_cmds.mutex);
7749 _base_event_notification(ioc);
7750 mutex_unlock(&ioc->base_cmds.mutex);
7751 }
7752
7753
7754
7755
7756
7757
7758
7759 static int
7760 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
7761 {
7762 u32 host_diagnostic;
7763 u32 ioc_state;
7764 u32 count;
7765 u32 hcb_size;
7766
7767 ioc_info(ioc, "sending diag reset !!\n");
7768
7769 pci_cfg_access_lock(ioc->pdev);
7770
7771 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
7772
7773 count = 0;
7774 do {
7775
7776
7777
7778 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
7779 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7780 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
7781 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
7782 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
7783 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
7784 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
7785 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
7786
7787
7788 msleep(100);
7789
7790 if (count++ > 20) {
7791 ioc_info(ioc,
7792 "Stop writing magic sequence after 20 retries\n");
7793 _base_dump_reg_set(ioc);
7794 goto out;
7795 }
7796
7797 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7798 drsprintk(ioc,
7799 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7800 count, host_diagnostic));
7801
7802 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
7803
7804 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
7805
7806 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
7807 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
7808 &ioc->chip->HostDiagnostic);
7809
7810
7811 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
7812
7813
7814 for (count = 0; count < (300000000 /
7815 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
7816
7817 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7818
7819 if (host_diagnostic == 0xFFFFFFFF) {
7820 ioc_info(ioc,
7821 "Invalid host diagnostic register value\n");
7822 _base_dump_reg_set(ioc);
7823 goto out;
7824 }
7825 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
7826 break;
7827
7828 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
7829 }
7830
7831 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
7832
7833 drsprintk(ioc,
7834 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
7835 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
7836 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
7837 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
7838
7839 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
7840 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
7841 &ioc->chip->HCBSize);
7842 }
7843
7844 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
7845 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
7846 &ioc->chip->HostDiagnostic);
7847
7848 drsprintk(ioc,
7849 ioc_info(ioc, "disable writes to the diagnostic register\n"));
7850 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7851
7852 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
7853 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
7854 if (ioc_state) {
7855 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7856 __func__, ioc_state);
7857 _base_dump_reg_set(ioc);
7858 goto out;
7859 }
7860
7861 pci_cfg_access_unlock(ioc->pdev);
7862 ioc_info(ioc, "diag reset: SUCCESS\n");
7863 return 0;
7864
7865 out:
7866 pci_cfg_access_unlock(ioc->pdev);
7867 ioc_err(ioc, "diag reset: FAILED\n");
7868 return -EFAULT;
7869 }
7870
7871
7872
7873
7874
7875
7876
7877
7878 int
7879 mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
7880 {
7881 u32 ioc_state;
7882 int rc;
7883 int count;
7884
7885 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7886
7887 if (ioc->pci_error_recovery)
7888 return 0;
7889
7890 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7891 dhsprintk(ioc,
7892 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7893 __func__, ioc_state));
7894
7895
7896 count = 0;
7897 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
7898 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
7899 MPI2_IOC_STATE_READY) {
7900 if (count++ == 10) {
7901 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7902 __func__, ioc_state);
7903 return -EFAULT;
7904 }
7905 ssleep(1);
7906 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7907 }
7908 }
7909
7910 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
7911 return 0;
7912
7913 if (ioc_state & MPI2_DOORBELL_USED) {
7914 ioc_info(ioc, "unexpected doorbell active!\n");
7915 goto issue_diag_reset;
7916 }
7917
7918 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
7919 mpt3sas_print_fault_code(ioc, ioc_state &
7920 MPI2_DOORBELL_DATA_MASK);
7921 goto issue_diag_reset;
7922 }
7923
7924 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
7925
7926
7927
7928
7929
7930
7931
7932 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
7933 mpt3sas_print_coredump_info(ioc, ioc_state &
7934 MPI2_DOORBELL_DATA_MASK);
7935 mpt3sas_base_wait_for_coredump_completion(ioc,
7936 __func__);
7937 }
7938 goto issue_diag_reset;
7939 }
7940
7941 if (type == FORCE_BIG_HAMMER)
7942 goto issue_diag_reset;
7943
7944 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
7945 if (!(_base_send_ioc_reset(ioc,
7946 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
7947 return 0;
7948 }
7949
7950 issue_diag_reset:
7951 rc = _base_diag_reset(ioc);
7952 return rc;
7953 }
7954
7955
7956
7957
7958
7959
7960
7961 static int
7962 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
7963 {
7964 int r, i, index, rc;
7965 unsigned long flags;
7966 u32 reply_address;
7967 u16 smid;
7968 struct _tr_list *delayed_tr, *delayed_tr_next;
7969 struct _sc_list *delayed_sc, *delayed_sc_next;
7970 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7971 u8 hide_flag;
7972 struct adapter_reply_queue *reply_q;
7973 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
7974
7975 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7976
7977
7978 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7979 &ioc->delayed_tr_list, list) {
7980 list_del(&delayed_tr->list);
7981 kfree(delayed_tr);
7982 }
7983
7984
7985 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7986 &ioc->delayed_tr_volume_list, list) {
7987 list_del(&delayed_tr->list);
7988 kfree(delayed_tr);
7989 }
7990
7991 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
7992 &ioc->delayed_sc_list, list) {
7993 list_del(&delayed_sc->list);
7994 kfree(delayed_sc);
7995 }
7996
7997 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
7998 &ioc->delayed_event_ack_list, list) {
7999 list_del(&delayed_event_ack->list);
8000 kfree(delayed_event_ack);
8001 }
8002
8003 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8004
8005
8006 INIT_LIST_HEAD(&ioc->hpr_free_list);
8007 smid = ioc->hi_priority_smid;
8008 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
8009 ioc->hpr_lookup[i].cb_idx = 0xFF;
8010 ioc->hpr_lookup[i].smid = smid;
8011 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
8012 &ioc->hpr_free_list);
8013 }
8014
8015
8016 INIT_LIST_HEAD(&ioc->internal_free_list);
8017 smid = ioc->internal_smid;
8018 for (i = 0; i < ioc->internal_depth; i++, smid++) {
8019 ioc->internal_lookup[i].cb_idx = 0xFF;
8020 ioc->internal_lookup[i].smid = smid;
8021 list_add_tail(&ioc->internal_lookup[i].tracker_list,
8022 &ioc->internal_free_list);
8023 }
8024
8025 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8026
8027
8028 for (i = 0, reply_address = (u32)ioc->reply_dma ;
8029 i < ioc->reply_free_queue_depth ; i++, reply_address +=
8030 ioc->reply_sz) {
8031 ioc->reply_free[i] = cpu_to_le32(reply_address);
8032 if (ioc->is_mcpu_endpoint)
8033 _base_clone_reply_to_sys_mem(ioc,
8034 reply_address, i);
8035 }
8036
8037
8038 if (ioc->is_driver_loading)
8039 _base_assign_reply_queues(ioc);
8040
8041
8042 index = 0;
8043 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
8044 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8045
8046
8047
8048
8049 if (ioc->rdpq_array_enable) {
8050 reply_q->reply_post_free =
8051 ioc->reply_post[index++].reply_post_free;
8052 } else {
8053 reply_q->reply_post_free = reply_post_free_contig;
8054 reply_post_free_contig += ioc->reply_post_queue_depth;
8055 }
8056
8057 reply_q->reply_post_host_index = 0;
8058 for (i = 0; i < ioc->reply_post_queue_depth; i++)
8059 reply_q->reply_post_free[i].Words =
8060 cpu_to_le64(ULLONG_MAX);
8061 if (!_base_is_controller_msix_enabled(ioc))
8062 goto skip_init_reply_post_free_queue;
8063 }
8064 skip_init_reply_post_free_queue:
8065
8066 r = _base_send_ioc_init(ioc);
8067 if (r) {
8068
8069
8070
8071
8072
8073 if (!ioc->is_driver_loading)
8074 return r;
8075
8076 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8077 if (rc || (_base_send_ioc_init(ioc)))
8078 return r;
8079 }
8080
8081
8082 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
8083 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
8084
8085
8086 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8087 if (ioc->combined_reply_queue)
8088 writel((reply_q->msix_index & 7)<<
8089 MPI2_RPHI_MSIX_INDEX_SHIFT,
8090 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
8091 else
8092 writel(reply_q->msix_index <<
8093 MPI2_RPHI_MSIX_INDEX_SHIFT,
8094 &ioc->chip->ReplyPostHostIndex);
8095
8096 if (!_base_is_controller_msix_enabled(ioc))
8097 goto skip_init_reply_post_host_index;
8098 }
8099
8100 skip_init_reply_post_host_index:
8101
8102 mpt3sas_base_unmask_interrupts(ioc);
8103
8104 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8105 r = _base_display_fwpkg_version(ioc);
8106 if (r)
8107 return r;
8108 }
8109
8110 r = _base_static_config_pages(ioc);
8111 if (r)
8112 return r;
8113
8114 r = _base_event_notification(ioc);
8115 if (r)
8116 return r;
8117
8118 if (!ioc->shost_recovery) {
8119
8120 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
8121 == 0x80) {
8122 hide_flag = (u8) (
8123 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
8124 MFG_PAGE10_HIDE_SSDS_MASK);
8125 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
8126 ioc->mfg_pg10_hide_flag = hide_flag;
8127 }
8128
8129 ioc->wait_for_discovery_to_complete =
8130 _base_determine_wait_on_discovery(ioc);
8131
8132 return r;
8133 }
8134
8135 r = _base_send_port_enable(ioc);
8136 if (r)
8137 return r;
8138
8139 return r;
8140 }
8141
8142
8143
8144
8145
8146 void
8147 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
8148 {
8149 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8150
8151
8152 mutex_lock(&ioc->pci_access_mutex);
8153 if (ioc->chip_phys && ioc->chip) {
8154 mpt3sas_base_mask_interrupts(ioc);
8155 ioc->shost_recovery = 1;
8156 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8157 ioc->shost_recovery = 0;
8158 }
8159
8160 mpt3sas_base_unmap_resources(ioc);
8161 mutex_unlock(&ioc->pci_access_mutex);
8162 return;
8163 }
8164
8165
8166
8167
8168
8169
8170
8171 int
8172 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
8173 {
8174 int r, i, rc;
8175 int cpu_id, last_cpu_id = 0;
8176
8177 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8178
8179
8180 ioc->cpu_count = num_online_cpus();
8181 for_each_online_cpu(cpu_id)
8182 last_cpu_id = cpu_id;
8183 ioc->cpu_msix_table_sz = last_cpu_id + 1;
8184 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
8185 ioc->reply_queue_count = 1;
8186 if (!ioc->cpu_msix_table) {
8187 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
8188 r = -ENOMEM;
8189 goto out_free_resources;
8190 }
8191
8192 if (ioc->is_warpdrive) {
8193 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
8194 sizeof(resource_size_t *), GFP_KERNEL);
8195 if (!ioc->reply_post_host_index) {
8196 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
8197 r = -ENOMEM;
8198 goto out_free_resources;
8199 }
8200 }
8201
8202 ioc->smp_affinity_enable = smp_affinity_enable;
8203
8204 ioc->rdpq_array_enable_assigned = 0;
8205 ioc->use_32bit_dma = false;
8206 ioc->dma_mask = 64;
8207 if (ioc->is_aero_ioc)
8208 ioc->base_readl = &_base_readl_aero;
8209 else
8210 ioc->base_readl = &_base_readl;
8211 r = mpt3sas_base_map_resources(ioc);
8212 if (r)
8213 goto out_free_resources;
8214
8215 pci_set_drvdata(ioc->pdev, ioc->shost);
8216 r = _base_get_ioc_facts(ioc);
8217 if (r) {
8218 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8219 if (rc || (_base_get_ioc_facts(ioc)))
8220 goto out_free_resources;
8221 }
8222
8223 switch (ioc->hba_mpi_version_belonged) {
8224 case MPI2_VERSION:
8225 ioc->build_sg_scmd = &_base_build_sg_scmd;
8226 ioc->build_sg = &_base_build_sg;
8227 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
8228 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8229 break;
8230 case MPI25_VERSION:
8231 case MPI26_VERSION:
8232
8233
8234
8235
8236
8237
8238 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
8239 ioc->build_sg = &_base_build_sg_ieee;
8240 ioc->build_nvme_prp = &_base_build_nvme_prp;
8241 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
8242 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
8243 if (ioc->high_iops_queues)
8244 ioc->get_msix_index_for_smlio =
8245 &_base_get_high_iops_msix_index;
8246 else
8247 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8248 break;
8249 }
8250 if (ioc->atomic_desc_capable) {
8251 ioc->put_smid_default = &_base_put_smid_default_atomic;
8252 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
8253 ioc->put_smid_fast_path =
8254 &_base_put_smid_fast_path_atomic;
8255 ioc->put_smid_hi_priority =
8256 &_base_put_smid_hi_priority_atomic;
8257 } else {
8258 ioc->put_smid_default = &_base_put_smid_default;
8259 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
8260 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
8261 if (ioc->is_mcpu_endpoint)
8262 ioc->put_smid_scsi_io =
8263 &_base_put_smid_mpi_ep_scsi_io;
8264 else
8265 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
8266 }
8267
8268
8269
8270
8271
8272
8273 ioc->build_sg_mpi = &_base_build_sg;
8274 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
8275
8276 r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8277 if (r)
8278 goto out_free_resources;
8279
8280 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
8281 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
8282 if (!ioc->pfacts) {
8283 r = -ENOMEM;
8284 goto out_free_resources;
8285 }
8286
8287 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
8288 r = _base_get_port_facts(ioc, i);
8289 if (r) {
8290 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8291 if (rc || (_base_get_port_facts(ioc, i)))
8292 goto out_free_resources;
8293 }
8294 }
8295
8296 r = _base_allocate_memory_pools(ioc);
8297 if (r)
8298 goto out_free_resources;
8299
8300 if (irqpoll_weight > 0)
8301 ioc->thresh_hold = irqpoll_weight;
8302 else
8303 ioc->thresh_hold = ioc->hba_queue_depth/4;
8304
8305 _base_init_irqpolls(ioc);
8306 init_waitqueue_head(&ioc->reset_wq);
8307
8308
8309 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8310 if (ioc->facts.MaxDevHandle % 8)
8311 ioc->pd_handles_sz++;
8312 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
8313 GFP_KERNEL);
8314 if (!ioc->pd_handles) {
8315 r = -ENOMEM;
8316 goto out_free_resources;
8317 }
8318 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
8319 GFP_KERNEL);
8320 if (!ioc->blocking_handles) {
8321 r = -ENOMEM;
8322 goto out_free_resources;
8323 }
8324
8325
8326 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
8327 if (ioc->facts.MaxDevHandle % 8)
8328 ioc->pend_os_device_add_sz++;
8329 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
8330 GFP_KERNEL);
8331 if (!ioc->pend_os_device_add) {
8332 r = -ENOMEM;
8333 goto out_free_resources;
8334 }
8335
8336 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
8337 ioc->device_remove_in_progress =
8338 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
8339 if (!ioc->device_remove_in_progress) {
8340 r = -ENOMEM;
8341 goto out_free_resources;
8342 }
8343
8344 ioc->fwfault_debug = mpt3sas_fwfault_debug;
8345
8346
8347 mutex_init(&ioc->base_cmds.mutex);
8348 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8349 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
8350
8351
8352 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8353 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
8354
8355
8356 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8357 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
8358 mutex_init(&ioc->transport_cmds.mutex);
8359
8360
8361 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8362 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8363 mutex_init(&ioc->scsih_cmds.mutex);
8364
8365
8366 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8367 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
8368 mutex_init(&ioc->tm_cmds.mutex);
8369
8370
8371 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8372 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
8373 mutex_init(&ioc->config_cmds.mutex);
8374
8375
8376 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8377 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
8378 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
8379 mutex_init(&ioc->ctl_cmds.mutex);
8380
8381 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
8382 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
8383 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
8384 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
8385 r = -ENOMEM;
8386 goto out_free_resources;
8387 }
8388
8389 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
8390 ioc->event_masks[i] = -1;
8391
8392
8393 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
8394 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
8395 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
8396 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
8397 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
8398 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
8399 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
8400 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
8401 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
8402 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
8403 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
8404 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
8405 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
8406 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
8407 if (ioc->is_gen35_ioc) {
8408 _base_unmask_events(ioc,
8409 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
8410 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
8411 _base_unmask_events(ioc,
8412 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
8413 }
8414 }
8415 r = _base_make_ioc_operational(ioc);
8416 if (r == -EAGAIN) {
8417 r = _base_make_ioc_operational(ioc);
8418 if (r)
8419 goto out_free_resources;
8420 }
8421
8422
8423
8424
8425
8426 memcpy(&ioc->prev_fw_facts, &ioc->facts,
8427 sizeof(struct mpt3sas_facts));
8428
8429 ioc->non_operational_loop = 0;
8430 ioc->ioc_coredump_loop = 0;
8431 ioc->got_task_abort_from_ioctl = 0;
8432 return 0;
8433
8434 out_free_resources:
8435
8436 ioc->remove_host = 1;
8437
8438 mpt3sas_base_free_resources(ioc);
8439 _base_release_memory_pools(ioc);
8440 pci_set_drvdata(ioc->pdev, NULL);
8441 kfree(ioc->cpu_msix_table);
8442 if (ioc->is_warpdrive)
8443 kfree(ioc->reply_post_host_index);
8444 kfree(ioc->pd_handles);
8445 kfree(ioc->blocking_handles);
8446 kfree(ioc->device_remove_in_progress);
8447 kfree(ioc->pend_os_device_add);
8448 kfree(ioc->tm_cmds.reply);
8449 kfree(ioc->transport_cmds.reply);
8450 kfree(ioc->scsih_cmds.reply);
8451 kfree(ioc->config_cmds.reply);
8452 kfree(ioc->base_cmds.reply);
8453 kfree(ioc->port_enable_cmds.reply);
8454 kfree(ioc->ctl_cmds.reply);
8455 kfree(ioc->ctl_cmds.sense);
8456 kfree(ioc->pfacts);
8457 ioc->ctl_cmds.reply = NULL;
8458 ioc->base_cmds.reply = NULL;
8459 ioc->tm_cmds.reply = NULL;
8460 ioc->scsih_cmds.reply = NULL;
8461 ioc->transport_cmds.reply = NULL;
8462 ioc->config_cmds.reply = NULL;
8463 ioc->pfacts = NULL;
8464 return r;
8465 }
8466
8467
8468
8469
8470
8471
8472 void
8473 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
8474 {
8475 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8476
8477 mpt3sas_base_stop_watchdog(ioc);
8478 mpt3sas_base_free_resources(ioc);
8479 _base_release_memory_pools(ioc);
8480 mpt3sas_free_enclosure_list(ioc);
8481 pci_set_drvdata(ioc->pdev, NULL);
8482 kfree(ioc->cpu_msix_table);
8483 if (ioc->is_warpdrive)
8484 kfree(ioc->reply_post_host_index);
8485 kfree(ioc->pd_handles);
8486 kfree(ioc->blocking_handles);
8487 kfree(ioc->device_remove_in_progress);
8488 kfree(ioc->pend_os_device_add);
8489 kfree(ioc->pfacts);
8490 kfree(ioc->ctl_cmds.reply);
8491 kfree(ioc->ctl_cmds.sense);
8492 kfree(ioc->base_cmds.reply);
8493 kfree(ioc->port_enable_cmds.reply);
8494 kfree(ioc->tm_cmds.reply);
8495 kfree(ioc->transport_cmds.reply);
8496 kfree(ioc->scsih_cmds.reply);
8497 kfree(ioc->config_cmds.reply);
8498 }
8499
8500
8501
8502
8503
8504 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
8505 {
8506 mpt3sas_scsih_pre_reset_handler(ioc);
8507 mpt3sas_ctl_pre_reset_handler(ioc);
8508 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
8509 }
8510
8511
8512
8513
8514
8515 static void
8516 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
8517 {
8518 dtmprintk(ioc,
8519 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
8520 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
8521 ioc->transport_cmds.status |= MPT3_CMD_RESET;
8522 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
8523 complete(&ioc->transport_cmds.done);
8524 }
8525 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
8526 ioc->base_cmds.status |= MPT3_CMD_RESET;
8527 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
8528 complete(&ioc->base_cmds.done);
8529 }
8530 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
8531 ioc->port_enable_failed = 1;
8532 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
8533 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
8534 if (ioc->is_driver_loading) {
8535 ioc->start_scan_failed =
8536 MPI2_IOCSTATUS_INTERNAL_ERROR;
8537 ioc->start_scan = 0;
8538 } else {
8539 complete(&ioc->port_enable_cmds.done);
8540 }
8541 }
8542 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
8543 ioc->config_cmds.status |= MPT3_CMD_RESET;
8544 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
8545 ioc->config_cmds.smid = USHRT_MAX;
8546 complete(&ioc->config_cmds.done);
8547 }
8548 }
8549
8550
8551
8552
8553
8554 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
8555 {
8556 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
8557 mpt3sas_ctl_clear_outstanding_ioctls(ioc);
8558 _base_clear_outstanding_mpt_commands(ioc);
8559 }
8560
8561
8562
8563
8564
8565 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
8566 {
8567 mpt3sas_scsih_reset_done_handler(ioc);
8568 mpt3sas_ctl_reset_done_handler(ioc);
8569 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
8570 }
8571
8572
8573
8574
8575
8576
8577
8578
8579 void
8580 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
8581 {
8582 u32 ioc_state;
8583
8584 ioc->pending_io_count = 0;
8585
8586 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8587 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
8588 return;
8589
8590
8591 ioc->pending_io_count = scsi_host_busy(ioc->shost);
8592
8593 if (!ioc->pending_io_count)
8594 return;
8595
8596
8597 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
8598 }
8599
8600
8601
8602
8603
8604
8605
8606
8607 static int
8608 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
8609 {
8610 u16 pd_handles_sz;
8611 void *pd_handles = NULL, *blocking_handles = NULL;
8612 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
8613 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
8614
8615 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
8616 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8617 if (ioc->facts.MaxDevHandle % 8)
8618 pd_handles_sz++;
8619
8620 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
8621 GFP_KERNEL);
8622 if (!pd_handles) {
8623 ioc_info(ioc,
8624 "Unable to allocate the memory for pd_handles of sz: %d\n",
8625 pd_handles_sz);
8626 return -ENOMEM;
8627 }
8628 memset(pd_handles + ioc->pd_handles_sz, 0,
8629 (pd_handles_sz - ioc->pd_handles_sz));
8630 ioc->pd_handles = pd_handles;
8631
8632 blocking_handles = krealloc(ioc->blocking_handles,
8633 pd_handles_sz, GFP_KERNEL);
8634 if (!blocking_handles) {
8635 ioc_info(ioc,
8636 "Unable to allocate the memory for "
8637 "blocking_handles of sz: %d\n",
8638 pd_handles_sz);
8639 return -ENOMEM;
8640 }
8641 memset(blocking_handles + ioc->pd_handles_sz, 0,
8642 (pd_handles_sz - ioc->pd_handles_sz));
8643 ioc->blocking_handles = blocking_handles;
8644 ioc->pd_handles_sz = pd_handles_sz;
8645
8646 pend_os_device_add = krealloc(ioc->pend_os_device_add,
8647 pd_handles_sz, GFP_KERNEL);
8648 if (!pend_os_device_add) {
8649 ioc_info(ioc,
8650 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
8651 pd_handles_sz);
8652 return -ENOMEM;
8653 }
8654 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
8655 (pd_handles_sz - ioc->pend_os_device_add_sz));
8656 ioc->pend_os_device_add = pend_os_device_add;
8657 ioc->pend_os_device_add_sz = pd_handles_sz;
8658
8659 device_remove_in_progress = krealloc(
8660 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
8661 if (!device_remove_in_progress) {
8662 ioc_info(ioc,
8663 "Unable to allocate the memory for "
8664 "device_remove_in_progress of sz: %d\n "
8665 , pd_handles_sz);
8666 return -ENOMEM;
8667 }
8668 memset(device_remove_in_progress +
8669 ioc->device_remove_in_progress_sz, 0,
8670 (pd_handles_sz - ioc->device_remove_in_progress_sz));
8671 ioc->device_remove_in_progress = device_remove_in_progress;
8672 ioc->device_remove_in_progress_sz = pd_handles_sz;
8673 }
8674
8675 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
8676 return 0;
8677 }
8678
8679
8680
8681
8682
8683
8684
8685
8686 int
8687 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
8688 enum reset_type type)
8689 {
8690 int r;
8691 unsigned long flags;
8692 u32 ioc_state;
8693 u8 is_fault = 0, is_trigger = 0;
8694
8695 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
8696
8697 if (ioc->pci_error_recovery) {
8698 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
8699 r = 0;
8700 goto out_unlocked;
8701 }
8702
8703 if (mpt3sas_fwfault_debug)
8704 mpt3sas_halt_firmware(ioc);
8705
8706
8707 mutex_lock(&ioc->reset_in_progress_mutex);
8708
8709 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8710 ioc->shost_recovery = 1;
8711 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8712
8713 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8714 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
8715 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8716 MPT3_DIAG_BUFFER_IS_RELEASED))) {
8717 is_trigger = 1;
8718 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8719 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
8720 (ioc_state & MPI2_IOC_STATE_MASK) ==
8721 MPI2_IOC_STATE_COREDUMP) {
8722 is_fault = 1;
8723 ioc->htb_rel.trigger_info_dwords[1] =
8724 (ioc_state & MPI2_DOORBELL_DATA_MASK);
8725 }
8726 }
8727 _base_pre_reset_handler(ioc);
8728 mpt3sas_wait_for_commands_to_complete(ioc);
8729 mpt3sas_base_mask_interrupts(ioc);
8730 mpt3sas_base_pause_mq_polling(ioc);
8731 r = mpt3sas_base_make_ioc_ready(ioc, type);
8732 if (r)
8733 goto out;
8734 _base_clear_outstanding_commands(ioc);
8735
8736
8737
8738
8739 if (ioc->is_driver_loading && ioc->port_enable_failed) {
8740 ioc->remove_host = 1;
8741 r = -EFAULT;
8742 goto out;
8743 }
8744 r = _base_get_ioc_facts(ioc);
8745 if (r)
8746 goto out;
8747
8748 r = _base_check_ioc_facts_changes(ioc);
8749 if (r) {
8750 ioc_info(ioc,
8751 "Some of the parameters got changed in this new firmware"
8752 " image and it requires system reboot\n");
8753 goto out;
8754 }
8755 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
8756 panic("%s: Issue occurred with flashing controller firmware."
8757 "Please reboot the system and ensure that the correct"
8758 " firmware version is running\n", ioc->name);
8759
8760 r = _base_make_ioc_operational(ioc);
8761 if (!r)
8762 _base_reset_done_handler(ioc);
8763
8764 out:
8765 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
8766
8767 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8768 ioc->shost_recovery = 0;
8769 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8770 ioc->ioc_reset_count++;
8771 mutex_unlock(&ioc->reset_in_progress_mutex);
8772 mpt3sas_base_resume_mq_polling(ioc);
8773
8774 out_unlocked:
8775 if ((r == 0) && is_trigger) {
8776 if (is_fault)
8777 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
8778 else
8779 mpt3sas_trigger_master(ioc,
8780 MASTER_TRIGGER_ADAPTER_RESET);
8781 }
8782 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
8783 return r;
8784 }