Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *    driver for Microchip PQI-based storage controllers
0004  *    Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
0005  *    Copyright (c) 2016-2018 Microsemi Corporation
0006  *    Copyright (c) 2016 PMC-Sierra, Inc.
0007  *
0008  *    Questions/Comments/Bugfixes to storagedev@microchip.com
0009  *
0010  */
0011 
0012 #include <linux/module.h>
0013 #include <linux/kernel.h>
0014 #include <linux/pci.h>
0015 #include <linux/delay.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/sched.h>
0018 #include <linux/rtc.h>
0019 #include <linux/bcd.h>
0020 #include <linux/reboot.h>
0021 #include <linux/cciss_ioctl.h>
0022 #include <linux/blk-mq-pci.h>
0023 #include <scsi/scsi_host.h>
0024 #include <scsi/scsi_cmnd.h>
0025 #include <scsi/scsi_device.h>
0026 #include <scsi/scsi_eh.h>
0027 #include <scsi/scsi_transport_sas.h>
0028 #include <asm/unaligned.h>
0029 #include "smartpqi.h"
0030 #include "smartpqi_sis.h"
0031 
0032 #if !defined(BUILD_TIMESTAMP)
0033 #define BUILD_TIMESTAMP
0034 #endif
0035 
0036 #define DRIVER_VERSION      "2.1.18-045"
0037 #define DRIVER_MAJOR        2
0038 #define DRIVER_MINOR        1
0039 #define DRIVER_RELEASE      18
0040 #define DRIVER_REVISION     45
0041 
0042 #define DRIVER_NAME     "Microchip SmartPQI Driver (v" \
0043                 DRIVER_VERSION BUILD_TIMESTAMP ")"
0044 #define DRIVER_NAME_SHORT   "smartpqi"
0045 
0046 #define PQI_EXTRA_SGL_MEMORY    (12 * sizeof(struct pqi_sg_descriptor))
0047 
0048 #define PQI_POST_RESET_DELAY_SECS           5
0049 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS  10
0050 
0051 MODULE_AUTHOR("Microchip");
0052 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
0053     DRIVER_VERSION);
0054 MODULE_VERSION(DRIVER_VERSION);
0055 MODULE_LICENSE("GPL");
0056 
0057 struct pqi_cmd_priv {
0058     int this_residual;
0059 };
0060 
0061 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
0062 {
0063     return scsi_cmd_priv(cmd);
0064 }
0065 
0066 static void pqi_verify_structures(void);
0067 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
0068     enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
0069 static void pqi_ctrl_offline_worker(struct work_struct *work);
0070 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
0071 static void pqi_scan_start(struct Scsi_Host *shost);
0072 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
0073     struct pqi_queue_group *queue_group, enum pqi_io_path path,
0074     struct pqi_io_request *io_request);
0075 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
0076     struct pqi_iu_header *request, unsigned int flags,
0077     struct pqi_raid_error_info *error_info);
0078 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
0079     struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
0080     unsigned int cdb_length, struct pqi_queue_group *queue_group,
0081     struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
0082 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
0083     struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
0084     struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
0085     struct pqi_scsi_dev_raid_map_data *rmd);
0086 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
0087     struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
0088     struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
0089     struct pqi_scsi_dev_raid_map_data *rmd);
0090 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
0091 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
0092 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
0093 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
0094 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
0095 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
0096 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
0097     struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
0098 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
0099 
0100 /* for flags argument to pqi_submit_raid_request_synchronous() */
0101 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
0102 
0103 static struct scsi_transport_template *pqi_sas_transport_template;
0104 
0105 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
0106 
0107 enum pqi_lockup_action {
0108     NONE,
0109     REBOOT,
0110     PANIC
0111 };
0112 
0113 static enum pqi_lockup_action pqi_lockup_action = NONE;
0114 
0115 static struct {
0116     enum pqi_lockup_action  action;
0117     char            *name;
0118 } pqi_lockup_actions[] = {
0119     {
0120         .action = NONE,
0121         .name = "none",
0122     },
0123     {
0124         .action = REBOOT,
0125         .name = "reboot",
0126     },
0127     {
0128         .action = PANIC,
0129         .name = "panic",
0130     },
0131 };
0132 
0133 static unsigned int pqi_supported_event_types[] = {
0134     PQI_EVENT_TYPE_HOTPLUG,
0135     PQI_EVENT_TYPE_HARDWARE,
0136     PQI_EVENT_TYPE_PHYSICAL_DEVICE,
0137     PQI_EVENT_TYPE_LOGICAL_DEVICE,
0138     PQI_EVENT_TYPE_OFA,
0139     PQI_EVENT_TYPE_AIO_STATE_CHANGE,
0140     PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
0141 };
0142 
0143 static int pqi_disable_device_id_wildcards;
0144 module_param_named(disable_device_id_wildcards,
0145     pqi_disable_device_id_wildcards, int, 0644);
0146 MODULE_PARM_DESC(disable_device_id_wildcards,
0147     "Disable device ID wildcards.");
0148 
0149 static int pqi_disable_heartbeat;
0150 module_param_named(disable_heartbeat,
0151     pqi_disable_heartbeat, int, 0644);
0152 MODULE_PARM_DESC(disable_heartbeat,
0153     "Disable heartbeat.");
0154 
0155 static int pqi_disable_ctrl_shutdown;
0156 module_param_named(disable_ctrl_shutdown,
0157     pqi_disable_ctrl_shutdown, int, 0644);
0158 MODULE_PARM_DESC(disable_ctrl_shutdown,
0159     "Disable controller shutdown when controller locked up.");
0160 
0161 static char *pqi_lockup_action_param;
0162 module_param_named(lockup_action,
0163     pqi_lockup_action_param, charp, 0644);
0164 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
0165     "\t\tSupported: none, reboot, panic\n"
0166     "\t\tDefault: none");
0167 
0168 static int pqi_expose_ld_first;
0169 module_param_named(expose_ld_first,
0170     pqi_expose_ld_first, int, 0644);
0171 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
0172 
0173 static int pqi_hide_vsep;
0174 module_param_named(hide_vsep,
0175     pqi_hide_vsep, int, 0644);
0176 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
0177 
0178 static int pqi_disable_managed_interrupts;
0179 module_param_named(disable_managed_interrupts,
0180     pqi_disable_managed_interrupts, int, 0644);
0181 MODULE_PARM_DESC(disable_managed_interrupts,
0182     "Disable the kernel automatically assigning SMP affinity to IRQs.");
0183 
0184 static unsigned int pqi_ctrl_ready_timeout_secs;
0185 module_param_named(ctrl_ready_timeout,
0186     pqi_ctrl_ready_timeout_secs, uint, 0644);
0187 MODULE_PARM_DESC(ctrl_ready_timeout,
0188     "Timeout in seconds for driver to wait for controller ready.");
0189 
0190 static char *raid_levels[] = {
0191     "RAID-0",
0192     "RAID-4",
0193     "RAID-1(1+0)",
0194     "RAID-5",
0195     "RAID-5+1",
0196     "RAID-6",
0197     "RAID-1(Triple)",
0198 };
0199 
0200 static char *pqi_raid_level_to_string(u8 raid_level)
0201 {
0202     if (raid_level < ARRAY_SIZE(raid_levels))
0203         return raid_levels[raid_level];
0204 
0205     return "RAID UNKNOWN";
0206 }
0207 
0208 #define SA_RAID_0       0
0209 #define SA_RAID_4       1
0210 #define SA_RAID_1       2   /* also used for RAID 10 */
0211 #define SA_RAID_5       3   /* also used for RAID 50 */
0212 #define SA_RAID_51      4
0213 #define SA_RAID_6       5   /* also used for RAID 60 */
0214 #define SA_RAID_TRIPLE      6   /* also used for RAID 1+0 Triple */
0215 #define SA_RAID_MAX     SA_RAID_TRIPLE
0216 #define SA_RAID_UNKNOWN     0xff
0217 
0218 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
0219 {
0220     pqi_prep_for_scsi_done(scmd);
0221     scsi_done(scmd);
0222 }
0223 
0224 static inline void pqi_disable_write_same(struct scsi_device *sdev)
0225 {
0226     sdev->no_write_same = 1;
0227 }
0228 
0229 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
0230 {
0231     return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
0232 }
0233 
0234 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
0235 {
0236     return !device->is_physical_device;
0237 }
0238 
0239 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
0240 {
0241     return scsi3addr[2] != 0;
0242 }
0243 
0244 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
0245 {
0246     return !ctrl_info->controller_online;
0247 }
0248 
0249 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
0250 {
0251     if (ctrl_info->controller_online)
0252         if (!sis_is_firmware_running(ctrl_info))
0253             pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
0254 }
0255 
0256 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
0257 {
0258     return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
0259 }
0260 
0261 #define PQI_DRIVER_SCRATCH_PQI_MODE         0x1
0262 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED      0x2
0263 
0264 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
0265 {
0266     return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
0267 }
0268 
0269 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
0270     enum pqi_ctrl_mode mode)
0271 {
0272     u32 driver_scratch;
0273 
0274     driver_scratch = sis_read_driver_scratch(ctrl_info);
0275 
0276     if (mode == PQI_MODE)
0277         driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
0278     else
0279         driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
0280 
0281     sis_write_driver_scratch(ctrl_info, driver_scratch);
0282 }
0283 
0284 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
0285 {
0286     return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
0287 }
0288 
0289 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
0290 {
0291     u32 driver_scratch;
0292 
0293     driver_scratch = sis_read_driver_scratch(ctrl_info);
0294 
0295     if (is_supported)
0296         driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
0297     else
0298         driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
0299 
0300     sis_write_driver_scratch(ctrl_info, driver_scratch);
0301 }
0302 
0303 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
0304 {
0305     ctrl_info->scan_blocked = true;
0306     mutex_lock(&ctrl_info->scan_mutex);
0307 }
0308 
0309 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
0310 {
0311     ctrl_info->scan_blocked = false;
0312     mutex_unlock(&ctrl_info->scan_mutex);
0313 }
0314 
0315 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
0316 {
0317     return ctrl_info->scan_blocked;
0318 }
0319 
0320 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
0321 {
0322     mutex_lock(&ctrl_info->lun_reset_mutex);
0323 }
0324 
0325 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
0326 {
0327     mutex_unlock(&ctrl_info->lun_reset_mutex);
0328 }
0329 
0330 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
0331 {
0332     struct Scsi_Host *shost;
0333     unsigned int num_loops;
0334     int msecs_sleep;
0335 
0336     shost = ctrl_info->scsi_host;
0337 
0338     scsi_block_requests(shost);
0339 
0340     num_loops = 0;
0341     msecs_sleep = 20;
0342     while (scsi_host_busy(shost)) {
0343         num_loops++;
0344         if (num_loops == 10)
0345             msecs_sleep = 500;
0346         msleep(msecs_sleep);
0347     }
0348 }
0349 
0350 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
0351 {
0352     scsi_unblock_requests(ctrl_info->scsi_host);
0353 }
0354 
0355 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
0356 {
0357     atomic_inc(&ctrl_info->num_busy_threads);
0358 }
0359 
0360 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
0361 {
0362     atomic_dec(&ctrl_info->num_busy_threads);
0363 }
0364 
0365 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
0366 {
0367     return ctrl_info->block_requests;
0368 }
0369 
0370 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
0371 {
0372     ctrl_info->block_requests = true;
0373 }
0374 
0375 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
0376 {
0377     ctrl_info->block_requests = false;
0378     wake_up_all(&ctrl_info->block_requests_wait);
0379 }
0380 
0381 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
0382 {
0383     if (!pqi_ctrl_blocked(ctrl_info))
0384         return;
0385 
0386     atomic_inc(&ctrl_info->num_blocked_threads);
0387     wait_event(ctrl_info->block_requests_wait,
0388         !pqi_ctrl_blocked(ctrl_info));
0389     atomic_dec(&ctrl_info->num_blocked_threads);
0390 }
0391 
0392 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS        10
0393 
0394 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
0395 {
0396     unsigned long start_jiffies;
0397     unsigned long warning_timeout;
0398     bool displayed_warning;
0399 
0400     displayed_warning = false;
0401     start_jiffies = jiffies;
0402     warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
0403 
0404     while (atomic_read(&ctrl_info->num_busy_threads) >
0405         atomic_read(&ctrl_info->num_blocked_threads)) {
0406         if (time_after(jiffies, warning_timeout)) {
0407             dev_warn(&ctrl_info->pci_dev->dev,
0408                 "waiting %u seconds for driver activity to quiesce\n",
0409                 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
0410             displayed_warning = true;
0411             warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
0412         }
0413         usleep_range(1000, 2000);
0414     }
0415 
0416     if (displayed_warning)
0417         dev_warn(&ctrl_info->pci_dev->dev,
0418             "driver activity quiesced after waiting for %u seconds\n",
0419             jiffies_to_msecs(jiffies - start_jiffies) / 1000);
0420 }
0421 
0422 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
0423 {
0424     return device->device_offline;
0425 }
0426 
0427 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
0428 {
0429     mutex_lock(&ctrl_info->ofa_mutex);
0430 }
0431 
0432 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
0433 {
0434     mutex_unlock(&ctrl_info->ofa_mutex);
0435 }
0436 
0437 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
0438 {
0439     mutex_lock(&ctrl_info->ofa_mutex);
0440     mutex_unlock(&ctrl_info->ofa_mutex);
0441 }
0442 
0443 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
0444 {
0445     return mutex_is_locked(&ctrl_info->ofa_mutex);
0446 }
0447 
0448 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
0449 {
0450     device->in_remove = true;
0451 }
0452 
0453 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
0454 {
0455     return device->in_remove;
0456 }
0457 
0458 static inline int pqi_event_type_to_event_index(unsigned int event_type)
0459 {
0460     int index;
0461 
0462     for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
0463         if (event_type == pqi_supported_event_types[index])
0464             return index;
0465 
0466     return -1;
0467 }
0468 
0469 static inline bool pqi_is_supported_event(unsigned int event_type)
0470 {
0471     return pqi_event_type_to_event_index(event_type) != -1;
0472 }
0473 
0474 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
0475     unsigned long delay)
0476 {
0477     if (pqi_ctrl_offline(ctrl_info))
0478         return;
0479 
0480     schedule_delayed_work(&ctrl_info->rescan_work, delay);
0481 }
0482 
0483 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
0484 {
0485     pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
0486 }
0487 
0488 #define PQI_RESCAN_WORK_DELAY   (10 * HZ)
0489 
0490 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
0491 {
0492     pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
0493 }
0494 
0495 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
0496 {
0497     cancel_delayed_work_sync(&ctrl_info->rescan_work);
0498 }
0499 
0500 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
0501 {
0502     if (!ctrl_info->heartbeat_counter)
0503         return 0;
0504 
0505     return readl(ctrl_info->heartbeat_counter);
0506 }
0507 
0508 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
0509 {
0510     return readb(ctrl_info->soft_reset_status);
0511 }
0512 
0513 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
0514 {
0515     u8 status;
0516 
0517     status = pqi_read_soft_reset_status(ctrl_info);
0518     status &= ~PQI_SOFT_RESET_ABORT;
0519     writeb(status, ctrl_info->soft_reset_status);
0520 }
0521 
0522 static int pqi_map_single(struct pci_dev *pci_dev,
0523     struct pqi_sg_descriptor *sg_descriptor, void *buffer,
0524     size_t buffer_length, enum dma_data_direction data_direction)
0525 {
0526     dma_addr_t bus_address;
0527 
0528     if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
0529         return 0;
0530 
0531     bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
0532         data_direction);
0533     if (dma_mapping_error(&pci_dev->dev, bus_address))
0534         return -ENOMEM;
0535 
0536     put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
0537     put_unaligned_le32(buffer_length, &sg_descriptor->length);
0538     put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
0539 
0540     return 0;
0541 }
0542 
0543 static void pqi_pci_unmap(struct pci_dev *pci_dev,
0544     struct pqi_sg_descriptor *descriptors, int num_descriptors,
0545     enum dma_data_direction data_direction)
0546 {
0547     int i;
0548 
0549     if (data_direction == DMA_NONE)
0550         return;
0551 
0552     for (i = 0; i < num_descriptors; i++)
0553         dma_unmap_single(&pci_dev->dev,
0554             (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
0555             get_unaligned_le32(&descriptors[i].length),
0556             data_direction);
0557 }
0558 
0559 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
0560     struct pqi_raid_path_request *request, u8 cmd,
0561     u8 *scsi3addr, void *buffer, size_t buffer_length,
0562     u16 vpd_page, enum dma_data_direction *dir)
0563 {
0564     u8 *cdb;
0565     size_t cdb_length = buffer_length;
0566 
0567     memset(request, 0, sizeof(*request));
0568 
0569     request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
0570     put_unaligned_le16(offsetof(struct pqi_raid_path_request,
0571         sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
0572         &request->header.iu_length);
0573     put_unaligned_le32(buffer_length, &request->buffer_length);
0574     memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
0575     request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
0576     request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
0577 
0578     cdb = request->cdb;
0579 
0580     switch (cmd) {
0581     case TEST_UNIT_READY:
0582         request->data_direction = SOP_READ_FLAG;
0583         cdb[0] = TEST_UNIT_READY;
0584         break;
0585     case INQUIRY:
0586         request->data_direction = SOP_READ_FLAG;
0587         cdb[0] = INQUIRY;
0588         if (vpd_page & VPD_PAGE) {
0589             cdb[1] = 0x1;
0590             cdb[2] = (u8)vpd_page;
0591         }
0592         cdb[4] = (u8)cdb_length;
0593         break;
0594     case CISS_REPORT_LOG:
0595     case CISS_REPORT_PHYS:
0596         request->data_direction = SOP_READ_FLAG;
0597         cdb[0] = cmd;
0598         if (cmd == CISS_REPORT_PHYS) {
0599             if (ctrl_info->rpl_extended_format_4_5_supported)
0600                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
0601             else
0602                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
0603         } else {
0604             cdb[1] = ctrl_info->ciss_report_log_flags;
0605         }
0606         put_unaligned_be32(cdb_length, &cdb[6]);
0607         break;
0608     case CISS_GET_RAID_MAP:
0609         request->data_direction = SOP_READ_FLAG;
0610         cdb[0] = CISS_READ;
0611         cdb[1] = CISS_GET_RAID_MAP;
0612         put_unaligned_be32(cdb_length, &cdb[6]);
0613         break;
0614     case SA_FLUSH_CACHE:
0615         request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
0616         request->data_direction = SOP_WRITE_FLAG;
0617         cdb[0] = BMIC_WRITE;
0618         cdb[6] = BMIC_FLUSH_CACHE;
0619         put_unaligned_be16(cdb_length, &cdb[7]);
0620         break;
0621     case BMIC_SENSE_DIAG_OPTIONS:
0622         cdb_length = 0;
0623         fallthrough;
0624     case BMIC_IDENTIFY_CONTROLLER:
0625     case BMIC_IDENTIFY_PHYSICAL_DEVICE:
0626     case BMIC_SENSE_SUBSYSTEM_INFORMATION:
0627     case BMIC_SENSE_FEATURE:
0628         request->data_direction = SOP_READ_FLAG;
0629         cdb[0] = BMIC_READ;
0630         cdb[6] = cmd;
0631         put_unaligned_be16(cdb_length, &cdb[7]);
0632         break;
0633     case BMIC_SET_DIAG_OPTIONS:
0634         cdb_length = 0;
0635         fallthrough;
0636     case BMIC_WRITE_HOST_WELLNESS:
0637         request->data_direction = SOP_WRITE_FLAG;
0638         cdb[0] = BMIC_WRITE;
0639         cdb[6] = cmd;
0640         put_unaligned_be16(cdb_length, &cdb[7]);
0641         break;
0642     case BMIC_CSMI_PASSTHRU:
0643         request->data_direction = SOP_BIDIRECTIONAL;
0644         cdb[0] = BMIC_WRITE;
0645         cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
0646         cdb[6] = cmd;
0647         put_unaligned_be16(cdb_length, &cdb[7]);
0648         break;
0649     default:
0650         dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
0651         break;
0652     }
0653 
0654     switch (request->data_direction) {
0655     case SOP_READ_FLAG:
0656         *dir = DMA_FROM_DEVICE;
0657         break;
0658     case SOP_WRITE_FLAG:
0659         *dir = DMA_TO_DEVICE;
0660         break;
0661     case SOP_NO_DIRECTION_FLAG:
0662         *dir = DMA_NONE;
0663         break;
0664     default:
0665         *dir = DMA_BIDIRECTIONAL;
0666         break;
0667     }
0668 
0669     return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
0670         buffer, buffer_length, *dir);
0671 }
0672 
0673 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
0674 {
0675     io_request->scmd = NULL;
0676     io_request->status = 0;
0677     io_request->error_info = NULL;
0678     io_request->raid_bypass = false;
0679 }
0680 
0681 static struct pqi_io_request *pqi_alloc_io_request(
0682     struct pqi_ctrl_info *ctrl_info)
0683 {
0684     struct pqi_io_request *io_request;
0685     u16 i = ctrl_info->next_io_request_slot;    /* benignly racy */
0686 
0687     while (1) {
0688         io_request = &ctrl_info->io_request_pool[i];
0689         if (atomic_inc_return(&io_request->refcount) == 1)
0690             break;
0691         atomic_dec(&io_request->refcount);
0692         i = (i + 1) % ctrl_info->max_io_slots;
0693     }
0694 
0695     /* benignly racy */
0696     ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
0697 
0698     pqi_reinit_io_request(io_request);
0699 
0700     return io_request;
0701 }
0702 
0703 static void pqi_free_io_request(struct pqi_io_request *io_request)
0704 {
0705     atomic_dec(&io_request->refcount);
0706 }
0707 
0708 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
0709     u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
0710     struct pqi_raid_error_info *error_info)
0711 {
0712     int rc;
0713     struct pqi_raid_path_request request;
0714     enum dma_data_direction dir;
0715 
0716     rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
0717         buffer, buffer_length, vpd_page, &dir);
0718     if (rc)
0719         return rc;
0720 
0721     rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
0722 
0723     pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
0724 
0725     return rc;
0726 }
0727 
0728 /* helper functions for pqi_send_scsi_raid_request */
0729 
0730 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
0731     u8 cmd, void *buffer, size_t buffer_length)
0732 {
0733     return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
0734         buffer, buffer_length, 0, NULL);
0735 }
0736 
0737 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
0738     u8 cmd, void *buffer, size_t buffer_length,
0739     struct pqi_raid_error_info *error_info)
0740 {
0741     return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
0742         buffer, buffer_length, 0, error_info);
0743 }
0744 
0745 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
0746     struct bmic_identify_controller *buffer)
0747 {
0748     return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
0749         buffer, sizeof(*buffer));
0750 }
0751 
0752 static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
0753     struct bmic_sense_subsystem_info *sense_info)
0754 {
0755     return pqi_send_ctrl_raid_request(ctrl_info,
0756         BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
0757         sizeof(*sense_info));
0758 }
0759 
0760 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
0761     u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
0762 {
0763     return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
0764         buffer, buffer_length, vpd_page, NULL);
0765 }
0766 
0767 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
0768     struct pqi_scsi_dev *device,
0769     struct bmic_identify_physical_device *buffer, size_t buffer_length)
0770 {
0771     int rc;
0772     enum dma_data_direction dir;
0773     u16 bmic_device_index;
0774     struct pqi_raid_path_request request;
0775 
0776     rc = pqi_build_raid_path_request(ctrl_info, &request,
0777         BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
0778         buffer_length, 0, &dir);
0779     if (rc)
0780         return rc;
0781 
0782     bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
0783     request.cdb[2] = (u8)bmic_device_index;
0784     request.cdb[9] = (u8)(bmic_device_index >> 8);
0785 
0786     rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
0787 
0788     pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
0789 
0790     return rc;
0791 }
0792 
0793 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
0794 {
0795     u32 bytes;
0796 
0797     bytes = get_unaligned_le16(limit);
0798     if (bytes == 0)
0799         bytes = ~0;
0800     else
0801         bytes *= 1024;
0802 
0803     return bytes;
0804 }
0805 
0806 #pragma pack(1)
0807 
0808 struct bmic_sense_feature_buffer {
0809     struct bmic_sense_feature_buffer_header header;
0810     struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
0811 };
0812 
0813 #pragma pack()
0814 
0815 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH   \
0816     offsetofend(struct bmic_sense_feature_buffer, \
0817         aio_subpage.max_write_raid_1_10_3drive)
0818 
0819 #define MINIMUM_AIO_SUBPAGE_LENGTH  \
0820     (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
0821         max_write_raid_1_10_3drive) - \
0822         sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
0823 
0824 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
0825 {
0826     int rc;
0827     enum dma_data_direction dir;
0828     struct pqi_raid_path_request request;
0829     struct bmic_sense_feature_buffer *buffer;
0830 
0831     buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
0832     if (!buffer)
0833         return -ENOMEM;
0834 
0835     rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
0836         buffer, sizeof(*buffer), 0, &dir);
0837     if (rc)
0838         goto error;
0839 
0840     request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
0841     request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
0842 
0843     rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
0844 
0845     pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
0846 
0847     if (rc)
0848         goto error;
0849 
0850     if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
0851         buffer->header.subpage_code !=
0852             BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
0853         get_unaligned_le16(&buffer->header.buffer_length) <
0854             MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
0855         buffer->aio_subpage.header.page_code !=
0856             BMIC_SENSE_FEATURE_IO_PAGE ||
0857         buffer->aio_subpage.header.subpage_code !=
0858             BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
0859         get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
0860             MINIMUM_AIO_SUBPAGE_LENGTH) {
0861         goto error;
0862     }
0863 
0864     ctrl_info->max_transfer_encrypted_sas_sata =
0865         pqi_aio_limit_to_bytes(
0866             &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
0867 
0868     ctrl_info->max_transfer_encrypted_nvme =
0869         pqi_aio_limit_to_bytes(
0870             &buffer->aio_subpage.max_transfer_encrypted_nvme);
0871 
0872     ctrl_info->max_write_raid_5_6 =
0873         pqi_aio_limit_to_bytes(
0874             &buffer->aio_subpage.max_write_raid_5_6);
0875 
0876     ctrl_info->max_write_raid_1_10_2drive =
0877         pqi_aio_limit_to_bytes(
0878             &buffer->aio_subpage.max_write_raid_1_10_2drive);
0879 
0880     ctrl_info->max_write_raid_1_10_3drive =
0881         pqi_aio_limit_to_bytes(
0882             &buffer->aio_subpage.max_write_raid_1_10_3drive);
0883 
0884 error:
0885     kfree(buffer);
0886 
0887     return rc;
0888 }
0889 
0890 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
0891     enum bmic_flush_cache_shutdown_event shutdown_event)
0892 {
0893     int rc;
0894     struct bmic_flush_cache *flush_cache;
0895 
0896     flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
0897     if (!flush_cache)
0898         return -ENOMEM;
0899 
0900     flush_cache->shutdown_event = shutdown_event;
0901 
0902     rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
0903         sizeof(*flush_cache));
0904 
0905     kfree(flush_cache);
0906 
0907     return rc;
0908 }
0909 
0910 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
0911     struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
0912     struct pqi_raid_error_info *error_info)
0913 {
0914     return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
0915         buffer, buffer_length, error_info);
0916 }
0917 
0918 #define PQI_FETCH_PTRAID_DATA       (1 << 31)
0919 
0920 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
0921 {
0922     int rc;
0923     struct bmic_diag_options *diag;
0924 
0925     diag = kzalloc(sizeof(*diag), GFP_KERNEL);
0926     if (!diag)
0927         return -ENOMEM;
0928 
0929     rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
0930         diag, sizeof(*diag));
0931     if (rc)
0932         goto out;
0933 
0934     diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
0935 
0936     rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
0937         sizeof(*diag));
0938 
0939 out:
0940     kfree(diag);
0941 
0942     return rc;
0943 }
0944 
0945 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
0946     void *buffer, size_t buffer_length)
0947 {
0948     return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
0949         buffer, buffer_length);
0950 }
0951 
0952 #pragma pack(1)
0953 
0954 struct bmic_host_wellness_driver_version {
0955     u8  start_tag[4];
0956     u8  driver_version_tag[2];
0957     __le16  driver_version_length;
0958     char    driver_version[32];
0959     u8  dont_write_tag[2];
0960     u8  end_tag[2];
0961 };
0962 
0963 #pragma pack()
0964 
0965 static int pqi_write_driver_version_to_host_wellness(
0966     struct pqi_ctrl_info *ctrl_info)
0967 {
0968     int rc;
0969     struct bmic_host_wellness_driver_version *buffer;
0970     size_t buffer_length;
0971 
0972     buffer_length = sizeof(*buffer);
0973 
0974     buffer = kmalloc(buffer_length, GFP_KERNEL);
0975     if (!buffer)
0976         return -ENOMEM;
0977 
0978     buffer->start_tag[0] = '<';
0979     buffer->start_tag[1] = 'H';
0980     buffer->start_tag[2] = 'W';
0981     buffer->start_tag[3] = '>';
0982     buffer->driver_version_tag[0] = 'D';
0983     buffer->driver_version_tag[1] = 'V';
0984     put_unaligned_le16(sizeof(buffer->driver_version),
0985         &buffer->driver_version_length);
0986     strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
0987         sizeof(buffer->driver_version) - 1);
0988     buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
0989     buffer->dont_write_tag[0] = 'D';
0990     buffer->dont_write_tag[1] = 'W';
0991     buffer->end_tag[0] = 'Z';
0992     buffer->end_tag[1] = 'Z';
0993 
0994     rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
0995 
0996     kfree(buffer);
0997 
0998     return rc;
0999 }
1000 
1001 #pragma pack(1)
1002 
1003 struct bmic_host_wellness_time {
1004     u8  start_tag[4];
1005     u8  time_tag[2];
1006     __le16  time_length;
1007     u8  time[8];
1008     u8  dont_write_tag[2];
1009     u8  end_tag[2];
1010 };
1011 
1012 #pragma pack()
1013 
1014 static int pqi_write_current_time_to_host_wellness(
1015     struct pqi_ctrl_info *ctrl_info)
1016 {
1017     int rc;
1018     struct bmic_host_wellness_time *buffer;
1019     size_t buffer_length;
1020     time64_t local_time;
1021     unsigned int year;
1022     struct tm tm;
1023 
1024     buffer_length = sizeof(*buffer);
1025 
1026     buffer = kmalloc(buffer_length, GFP_KERNEL);
1027     if (!buffer)
1028         return -ENOMEM;
1029 
1030     buffer->start_tag[0] = '<';
1031     buffer->start_tag[1] = 'H';
1032     buffer->start_tag[2] = 'W';
1033     buffer->start_tag[3] = '>';
1034     buffer->time_tag[0] = 'T';
1035     buffer->time_tag[1] = 'D';
1036     put_unaligned_le16(sizeof(buffer->time),
1037         &buffer->time_length);
1038 
1039     local_time = ktime_get_real_seconds();
1040     time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1041     year = tm.tm_year + 1900;
1042 
1043     buffer->time[0] = bin2bcd(tm.tm_hour);
1044     buffer->time[1] = bin2bcd(tm.tm_min);
1045     buffer->time[2] = bin2bcd(tm.tm_sec);
1046     buffer->time[3] = 0;
1047     buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1048     buffer->time[5] = bin2bcd(tm.tm_mday);
1049     buffer->time[6] = bin2bcd(year / 100);
1050     buffer->time[7] = bin2bcd(year % 100);
1051 
1052     buffer->dont_write_tag[0] = 'D';
1053     buffer->dont_write_tag[1] = 'W';
1054     buffer->end_tag[0] = 'Z';
1055     buffer->end_tag[1] = 'Z';
1056 
1057     rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1058 
1059     kfree(buffer);
1060 
1061     return rc;
1062 }
1063 
1064 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
1065 
1066 static void pqi_update_time_worker(struct work_struct *work)
1067 {
1068     int rc;
1069     struct pqi_ctrl_info *ctrl_info;
1070 
1071     ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1072         update_time_work);
1073 
1074     rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1075     if (rc)
1076         dev_warn(&ctrl_info->pci_dev->dev,
1077             "error updating time on controller\n");
1078 
1079     schedule_delayed_work(&ctrl_info->update_time_work,
1080         PQI_UPDATE_TIME_WORK_INTERVAL);
1081 }
1082 
1083 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1084 {
1085     schedule_delayed_work(&ctrl_info->update_time_work, 0);
1086 }
1087 
1088 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1089 {
1090     cancel_delayed_work_sync(&ctrl_info->update_time_work);
1091 }
1092 
1093 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1094     size_t buffer_length)
1095 {
1096     return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1097 }
1098 
1099 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1100 {
1101     int rc;
1102     size_t lun_list_length;
1103     size_t lun_data_length;
1104     size_t new_lun_list_length;
1105     void *lun_data = NULL;
1106     struct report_lun_header *report_lun_header;
1107 
1108     report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1109     if (!report_lun_header) {
1110         rc = -ENOMEM;
1111         goto out;
1112     }
1113 
1114     rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1115     if (rc)
1116         goto out;
1117 
1118     lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1119 
1120 again:
1121     lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1122 
1123     lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1124     if (!lun_data) {
1125         rc = -ENOMEM;
1126         goto out;
1127     }
1128 
1129     if (lun_list_length == 0) {
1130         memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1131         goto out;
1132     }
1133 
1134     rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1135     if (rc)
1136         goto out;
1137 
1138     new_lun_list_length =
1139         get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1140 
1141     if (new_lun_list_length > lun_list_length) {
1142         lun_list_length = new_lun_list_length;
1143         kfree(lun_data);
1144         goto again;
1145     }
1146 
1147 out:
1148     kfree(report_lun_header);
1149 
1150     if (rc) {
1151         kfree(lun_data);
1152         lun_data = NULL;
1153     }
1154 
1155     *buffer = lun_data;
1156 
1157     return rc;
1158 }
1159 
1160 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1161 {
1162     int rc;
1163     unsigned int i;
1164     u8 rpl_response_format;
1165     u32 num_physicals;
1166     size_t rpl_16byte_wwid_list_length;
1167     void *rpl_list;
1168     struct report_lun_header *rpl_header;
1169     struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1170     struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1171 
1172     rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1173     if (rc)
1174         return rc;
1175 
1176     if (ctrl_info->rpl_extended_format_4_5_supported) {
1177         rpl_header = rpl_list;
1178         rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1179         if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1180             *buffer = rpl_list;
1181             return 0;
1182         } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1183             dev_err(&ctrl_info->pci_dev->dev,
1184                 "RPL returned unsupported data format %u\n",
1185                 rpl_response_format);
1186             return -EINVAL;
1187         } else {
1188             dev_warn(&ctrl_info->pci_dev->dev,
1189                 "RPL returned extended format 2 instead of 4\n");
1190         }
1191     }
1192 
1193     rpl_8byte_wwid_list = rpl_list;
1194     num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1195     rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1196 
1197     rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1198     if (!rpl_16byte_wwid_list)
1199         return -ENOMEM;
1200 
1201     put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1202         &rpl_16byte_wwid_list->header.list_length);
1203     rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1204 
1205     for (i = 0; i < num_physicals; i++) {
1206         memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1207         memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1208         memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1209         rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1210         rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1211         rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1212         rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1213         rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1214     }
1215 
1216     kfree(rpl_8byte_wwid_list);
1217     *buffer = rpl_16byte_wwid_list;
1218 
1219     return 0;
1220 }
1221 
1222 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1223 {
1224     return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1225 }
1226 
1227 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1228     struct report_phys_lun_16byte_wwid_list **physdev_list,
1229     struct report_log_lun_list **logdev_list)
1230 {
1231     int rc;
1232     size_t logdev_list_length;
1233     size_t logdev_data_length;
1234     struct report_log_lun_list *internal_logdev_list;
1235     struct report_log_lun_list *logdev_data;
1236     struct report_lun_header report_lun_header;
1237 
1238     rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1239     if (rc)
1240         dev_err(&ctrl_info->pci_dev->dev,
1241             "report physical LUNs failed\n");
1242 
1243     rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1244     if (rc)
1245         dev_err(&ctrl_info->pci_dev->dev,
1246             "report logical LUNs failed\n");
1247 
1248     /*
1249      * Tack the controller itself onto the end of the logical device list.
1250      */
1251 
1252     logdev_data = *logdev_list;
1253 
1254     if (logdev_data) {
1255         logdev_list_length =
1256             get_unaligned_be32(&logdev_data->header.list_length);
1257     } else {
1258         memset(&report_lun_header, 0, sizeof(report_lun_header));
1259         logdev_data =
1260             (struct report_log_lun_list *)&report_lun_header;
1261         logdev_list_length = 0;
1262     }
1263 
1264     logdev_data_length = sizeof(struct report_lun_header) +
1265         logdev_list_length;
1266 
1267     internal_logdev_list = kmalloc(logdev_data_length +
1268         sizeof(struct report_log_lun), GFP_KERNEL);
1269     if (!internal_logdev_list) {
1270         kfree(*logdev_list);
1271         *logdev_list = NULL;
1272         return -ENOMEM;
1273     }
1274 
1275     memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1276     memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1277         sizeof(struct report_log_lun));
1278     put_unaligned_be32(logdev_list_length +
1279         sizeof(struct report_log_lun),
1280         &internal_logdev_list->header.list_length);
1281 
1282     kfree(*logdev_list);
1283     *logdev_list = internal_logdev_list;
1284 
1285     return 0;
1286 }
1287 
1288 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1289     int bus, int target, int lun)
1290 {
1291     device->bus = bus;
1292     device->target = target;
1293     device->lun = lun;
1294 }
1295 
1296 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1297 {
1298     u8 *scsi3addr;
1299     u32 lunid;
1300     int bus;
1301     int target;
1302     int lun;
1303 
1304     scsi3addr = device->scsi3addr;
1305     lunid = get_unaligned_le32(scsi3addr);
1306 
1307     if (pqi_is_hba_lunid(scsi3addr)) {
1308         /* The specified device is the controller. */
1309         pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1310         device->target_lun_valid = true;
1311         return;
1312     }
1313 
1314     if (pqi_is_logical_device(device)) {
1315         if (device->is_external_raid_device) {
1316             bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1317             target = (lunid >> 16) & 0x3fff;
1318             lun = lunid & 0xff;
1319         } else {
1320             bus = PQI_RAID_VOLUME_BUS;
1321             target = 0;
1322             lun = lunid & 0x3fff;
1323         }
1324         pqi_set_bus_target_lun(device, bus, target, lun);
1325         device->target_lun_valid = true;
1326         return;
1327     }
1328 
1329     /*
1330      * Defer target and LUN assignment for non-controller physical devices
1331      * because the SAS transport layer will make these assignments later.
1332      */
1333     pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1334 }
1335 
1336 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1337     struct pqi_scsi_dev *device)
1338 {
1339     int rc;
1340     u8 raid_level;
1341     u8 *buffer;
1342 
1343     raid_level = SA_RAID_UNKNOWN;
1344 
1345     buffer = kmalloc(64, GFP_KERNEL);
1346     if (buffer) {
1347         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1348             VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1349         if (rc == 0) {
1350             raid_level = buffer[8];
1351             if (raid_level > SA_RAID_MAX)
1352                 raid_level = SA_RAID_UNKNOWN;
1353         }
1354         kfree(buffer);
1355     }
1356 
1357     device->raid_level = raid_level;
1358 }
1359 
1360 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1361     struct pqi_scsi_dev *device, struct raid_map *raid_map)
1362 {
1363     char *err_msg;
1364     u32 raid_map_size;
1365     u32 r5or6_blocks_per_row;
1366 
1367     raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1368 
1369     if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1370         err_msg = "RAID map too small";
1371         goto bad_raid_map;
1372     }
1373 
1374     if (device->raid_level == SA_RAID_1) {
1375         if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1376             err_msg = "invalid RAID-1 map";
1377             goto bad_raid_map;
1378         }
1379     } else if (device->raid_level == SA_RAID_TRIPLE) {
1380         if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1381             err_msg = "invalid RAID-1(Triple) map";
1382             goto bad_raid_map;
1383         }
1384     } else if ((device->raid_level == SA_RAID_5 ||
1385         device->raid_level == SA_RAID_6) &&
1386         get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1387         /* RAID 50/60 */
1388         r5or6_blocks_per_row =
1389             get_unaligned_le16(&raid_map->strip_size) *
1390             get_unaligned_le16(&raid_map->data_disks_per_row);
1391         if (r5or6_blocks_per_row == 0) {
1392             err_msg = "invalid RAID-5 or RAID-6 map";
1393             goto bad_raid_map;
1394         }
1395     }
1396 
1397     return 0;
1398 
1399 bad_raid_map:
1400     dev_warn(&ctrl_info->pci_dev->dev,
1401         "logical device %08x%08x %s\n",
1402         *((u32 *)&device->scsi3addr),
1403         *((u32 *)&device->scsi3addr[4]), err_msg);
1404 
1405     return -EINVAL;
1406 }
1407 
1408 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1409     struct pqi_scsi_dev *device)
1410 {
1411     int rc;
1412     u32 raid_map_size;
1413     struct raid_map *raid_map;
1414 
1415     raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1416     if (!raid_map)
1417         return -ENOMEM;
1418 
1419     rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1420         device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1421     if (rc)
1422         goto error;
1423 
1424     raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1425 
1426     if (raid_map_size > sizeof(*raid_map)) {
1427 
1428         kfree(raid_map);
1429 
1430         raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1431         if (!raid_map)
1432             return -ENOMEM;
1433 
1434         rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1435             device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1436         if (rc)
1437             goto error;
1438 
1439         if (get_unaligned_le32(&raid_map->structure_size)
1440             != raid_map_size) {
1441             dev_warn(&ctrl_info->pci_dev->dev,
1442                 "requested %u bytes, received %u bytes\n",
1443                 raid_map_size,
1444                 get_unaligned_le32(&raid_map->structure_size));
1445             rc = -EINVAL;
1446             goto error;
1447         }
1448     }
1449 
1450     rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1451     if (rc)
1452         goto error;
1453 
1454     device->raid_map = raid_map;
1455 
1456     return 0;
1457 
1458 error:
1459     kfree(raid_map);
1460 
1461     return rc;
1462 }
1463 
1464 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1465     struct pqi_scsi_dev *device)
1466 {
1467     if (!ctrl_info->lv_drive_type_mix_valid) {
1468         device->max_transfer_encrypted = ~0;
1469         return;
1470     }
1471 
1472     switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1473     case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1474     case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1475     case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1476     case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1477     case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1478     case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1479     case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1480         device->max_transfer_encrypted =
1481             ctrl_info->max_transfer_encrypted_sas_sata;
1482         break;
1483     case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1484         device->max_transfer_encrypted =
1485             ctrl_info->max_transfer_encrypted_nvme;
1486         break;
1487     case LV_DRIVE_TYPE_MIX_UNKNOWN:
1488     case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1489     default:
1490         device->max_transfer_encrypted =
1491             min(ctrl_info->max_transfer_encrypted_sas_sata,
1492                 ctrl_info->max_transfer_encrypted_nvme);
1493         break;
1494     }
1495 }
1496 
1497 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1498     struct pqi_scsi_dev *device)
1499 {
1500     int rc;
1501     u8 *buffer;
1502     u8 bypass_status;
1503 
1504     buffer = kmalloc(64, GFP_KERNEL);
1505     if (!buffer)
1506         return;
1507 
1508     rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1509         VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1510     if (rc)
1511         goto out;
1512 
1513 #define RAID_BYPASS_STATUS      4
1514 #define RAID_BYPASS_CONFIGURED      0x1
1515 #define RAID_BYPASS_ENABLED     0x2
1516 
1517     bypass_status = buffer[RAID_BYPASS_STATUS];
1518     device->raid_bypass_configured =
1519         (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1520     if (device->raid_bypass_configured &&
1521         (bypass_status & RAID_BYPASS_ENABLED) &&
1522         pqi_get_raid_map(ctrl_info, device) == 0) {
1523         device->raid_bypass_enabled = true;
1524         if (get_unaligned_le16(&device->raid_map->flags) &
1525             RAID_MAP_ENCRYPTION_ENABLED)
1526             pqi_set_max_transfer_encrypted(ctrl_info, device);
1527     }
1528 
1529 out:
1530     kfree(buffer);
1531 }
1532 
1533 /*
1534  * Use vendor-specific VPD to determine online/offline status of a volume.
1535  */
1536 
1537 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1538     struct pqi_scsi_dev *device)
1539 {
1540     int rc;
1541     size_t page_length;
1542     u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1543     bool volume_offline = true;
1544     u32 volume_flags;
1545     struct ciss_vpd_logical_volume_status *vpd;
1546 
1547     vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1548     if (!vpd)
1549         goto no_buffer;
1550 
1551     rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1552         VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1553     if (rc)
1554         goto out;
1555 
1556     if (vpd->page_code != CISS_VPD_LV_STATUS)
1557         goto out;
1558 
1559     page_length = offsetof(struct ciss_vpd_logical_volume_status,
1560         volume_status) + vpd->page_length;
1561     if (page_length < sizeof(*vpd))
1562         goto out;
1563 
1564     volume_status = vpd->volume_status;
1565     volume_flags = get_unaligned_be32(&vpd->flags);
1566     volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1567 
1568 out:
1569     kfree(vpd);
1570 no_buffer:
1571     device->volume_status = volume_status;
1572     device->volume_offline = volume_offline;
1573 }
1574 
1575 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED   0x01
1576 #define PQI_DEVICE_PHY_MAP_SUPPORTED    0x10
1577 
1578 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1579     struct pqi_scsi_dev *device,
1580     struct bmic_identify_physical_device *id_phys)
1581 {
1582     int rc;
1583 
1584     memset(id_phys, 0, sizeof(*id_phys));
1585 
1586     rc = pqi_identify_physical_device(ctrl_info, device,
1587         id_phys, sizeof(*id_phys));
1588     if (rc) {
1589         device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1590         return rc;
1591     }
1592 
1593     scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1594     scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1595 
1596     memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1597     memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1598 
1599     device->box_index = id_phys->box_index;
1600     device->phys_box_on_bus = id_phys->phys_box_on_bus;
1601     device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1602     device->queue_depth =
1603         get_unaligned_le16(&id_phys->current_queue_depth_limit);
1604     device->active_path_index = id_phys->active_path_number;
1605     device->path_map = id_phys->redundant_path_present_map;
1606     memcpy(&device->box,
1607         &id_phys->alternate_paths_phys_box_on_port,
1608         sizeof(device->box));
1609     memcpy(&device->phys_connector,
1610         &id_phys->alternate_paths_phys_connector,
1611         sizeof(device->phys_connector));
1612     device->bay = id_phys->phys_bay_in_box;
1613     device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
1614     if (!device->multi_lun_device_lun_count)
1615         device->multi_lun_device_lun_count = 1;
1616     if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1617         id_phys->phy_count)
1618         device->phy_id =
1619             id_phys->phy_to_phy_map[device->active_path_index];
1620     else
1621         device->phy_id = 0xFF;
1622 
1623     device->ncq_prio_support =
1624         ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1625         PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1626 
1627     return 0;
1628 }
1629 
1630 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1631     struct pqi_scsi_dev *device)
1632 {
1633     int rc;
1634     u8 *buffer;
1635 
1636     buffer = kmalloc(64, GFP_KERNEL);
1637     if (!buffer)
1638         return -ENOMEM;
1639 
1640     /* Send an inquiry to the device to see what it is. */
1641     rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1642     if (rc)
1643         goto out;
1644 
1645     scsi_sanitize_inquiry_string(&buffer[8], 8);
1646     scsi_sanitize_inquiry_string(&buffer[16], 16);
1647 
1648     device->devtype = buffer[0] & 0x1f;
1649     memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1650     memcpy(device->model, &buffer[16], sizeof(device->model));
1651 
1652     if (device->devtype == TYPE_DISK) {
1653         if (device->is_external_raid_device) {
1654             device->raid_level = SA_RAID_UNKNOWN;
1655             device->volume_status = CISS_LV_OK;
1656             device->volume_offline = false;
1657         } else {
1658             pqi_get_raid_level(ctrl_info, device);
1659             pqi_get_raid_bypass_status(ctrl_info, device);
1660             pqi_get_volume_status(ctrl_info, device);
1661         }
1662     }
1663 
1664 out:
1665     kfree(buffer);
1666 
1667     return rc;
1668 }
1669 
1670 /*
1671  * Prevent adding drive to OS for some corner cases such as a drive
1672  * undergoing a sanitize operation. Some OSes will continue to poll
1673  * the drive until the sanitize completes, which can take hours,
1674  * resulting in long bootup delays. Commands such as TUR, READ_CAP
1675  * are allowed, but READ/WRITE cause check condition. So the OS
1676  * cannot check/read the partition table.
1677  * Note: devices that have completed sanitize must be re-enabled
1678  *       using the management utility.
1679  */
1680 static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1681     struct pqi_scsi_dev *device)
1682 {
1683     u8 scsi_status;
1684     int rc;
1685     enum dma_data_direction dir;
1686     char *buffer;
1687     int buffer_length = 64;
1688     size_t sense_data_length;
1689     struct scsi_sense_hdr sshdr;
1690     struct pqi_raid_path_request request;
1691     struct pqi_raid_error_info error_info;
1692     bool offline = false; /* Assume keep online */
1693 
1694     /* Do not check controllers. */
1695     if (pqi_is_hba_lunid(device->scsi3addr))
1696         return false;
1697 
1698     /* Do not check LVs. */
1699     if (pqi_is_logical_device(device))
1700         return false;
1701 
1702     buffer = kmalloc(buffer_length, GFP_KERNEL);
1703     if (!buffer)
1704         return false; /* Assume not offline */
1705 
1706     /* Check for SANITIZE in progress using TUR */
1707     rc = pqi_build_raid_path_request(ctrl_info, &request,
1708         TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1709         buffer_length, 0, &dir);
1710     if (rc)
1711         goto out; /* Assume not offline */
1712 
1713     memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1714 
1715     rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1716 
1717     if (rc)
1718         goto out; /* Assume not offline */
1719 
1720     scsi_status = error_info.status;
1721     sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1722     if (sense_data_length == 0)
1723         sense_data_length =
1724             get_unaligned_le16(&error_info.response_data_length);
1725     if (sense_data_length) {
1726         if (sense_data_length > sizeof(error_info.data))
1727             sense_data_length = sizeof(error_info.data);
1728 
1729         /*
1730          * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1731          */
1732         if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1733             scsi_normalize_sense(error_info.data,
1734                 sense_data_length, &sshdr) &&
1735                 sshdr.sense_key == NOT_READY &&
1736                 sshdr.asc == 0x04 &&
1737                 sshdr.ascq == 0x1b) {
1738             device->device_offline = true;
1739             offline = true;
1740             goto out; /* Keep device offline */
1741         }
1742     }
1743 
1744 out:
1745     kfree(buffer);
1746     return offline;
1747 }
1748 
1749 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1750     struct pqi_scsi_dev *device,
1751     struct bmic_identify_physical_device *id_phys)
1752 {
1753     int rc;
1754 
1755     if (device->is_expander_smp_device)
1756         return 0;
1757 
1758     if (pqi_is_logical_device(device))
1759         rc = pqi_get_logical_device_info(ctrl_info, device);
1760     else
1761         rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1762 
1763     return rc;
1764 }
1765 
1766 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1767     struct pqi_scsi_dev *device)
1768 {
1769     char *status;
1770     static const char unknown_state_str[] =
1771         "Volume is in an unknown state (%u)";
1772     char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1773 
1774     switch (device->volume_status) {
1775     case CISS_LV_OK:
1776         status = "Volume online";
1777         break;
1778     case CISS_LV_FAILED:
1779         status = "Volume failed";
1780         break;
1781     case CISS_LV_NOT_CONFIGURED:
1782         status = "Volume not configured";
1783         break;
1784     case CISS_LV_DEGRADED:
1785         status = "Volume degraded";
1786         break;
1787     case CISS_LV_READY_FOR_RECOVERY:
1788         status = "Volume ready for recovery operation";
1789         break;
1790     case CISS_LV_UNDERGOING_RECOVERY:
1791         status = "Volume undergoing recovery";
1792         break;
1793     case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1794         status = "Wrong physical drive was replaced";
1795         break;
1796     case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1797         status = "A physical drive not properly connected";
1798         break;
1799     case CISS_LV_HARDWARE_OVERHEATING:
1800         status = "Hardware is overheating";
1801         break;
1802     case CISS_LV_HARDWARE_HAS_OVERHEATED:
1803         status = "Hardware has overheated";
1804         break;
1805     case CISS_LV_UNDERGOING_EXPANSION:
1806         status = "Volume undergoing expansion";
1807         break;
1808     case CISS_LV_NOT_AVAILABLE:
1809         status = "Volume waiting for transforming volume";
1810         break;
1811     case CISS_LV_QUEUED_FOR_EXPANSION:
1812         status = "Volume queued for expansion";
1813         break;
1814     case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1815         status = "Volume disabled due to SCSI ID conflict";
1816         break;
1817     case CISS_LV_EJECTED:
1818         status = "Volume has been ejected";
1819         break;
1820     case CISS_LV_UNDERGOING_ERASE:
1821         status = "Volume undergoing background erase";
1822         break;
1823     case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1824         status = "Volume ready for predictive spare rebuild";
1825         break;
1826     case CISS_LV_UNDERGOING_RPI:
1827         status = "Volume undergoing rapid parity initialization";
1828         break;
1829     case CISS_LV_PENDING_RPI:
1830         status = "Volume queued for rapid parity initialization";
1831         break;
1832     case CISS_LV_ENCRYPTED_NO_KEY:
1833         status = "Encrypted volume inaccessible - key not present";
1834         break;
1835     case CISS_LV_UNDERGOING_ENCRYPTION:
1836         status = "Volume undergoing encryption process";
1837         break;
1838     case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1839         status = "Volume undergoing encryption re-keying process";
1840         break;
1841     case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1842         status = "Volume encrypted but encryption is disabled";
1843         break;
1844     case CISS_LV_PENDING_ENCRYPTION:
1845         status = "Volume pending migration to encrypted state";
1846         break;
1847     case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1848         status = "Volume pending encryption rekeying";
1849         break;
1850     case CISS_LV_NOT_SUPPORTED:
1851         status = "Volume not supported on this controller";
1852         break;
1853     case CISS_LV_STATUS_UNAVAILABLE:
1854         status = "Volume status not available";
1855         break;
1856     default:
1857         snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1858             unknown_state_str, device->volume_status);
1859         status = unknown_state_buffer;
1860         break;
1861     }
1862 
1863     dev_info(&ctrl_info->pci_dev->dev,
1864         "scsi %d:%d:%d:%d %s\n",
1865         ctrl_info->scsi_host->host_no,
1866         device->bus, device->target, device->lun, status);
1867 }
1868 
1869 static void pqi_rescan_worker(struct work_struct *work)
1870 {
1871     struct pqi_ctrl_info *ctrl_info;
1872 
1873     ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1874         rescan_work);
1875 
1876     pqi_scan_scsi_devices(ctrl_info);
1877 }
1878 
1879 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1880     struct pqi_scsi_dev *device)
1881 {
1882     int rc;
1883 
1884     if (pqi_is_logical_device(device))
1885         rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1886             device->target, device->lun);
1887     else
1888         rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1889 
1890     return rc;
1891 }
1892 
1893 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS  (20 * 1000)
1894 
1895 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1896 {
1897     int rc;
1898     int lun;
1899 
1900     for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
1901         rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1902             PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1903         if (rc)
1904             dev_err(&ctrl_info->pci_dev->dev,
1905                 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1906                 ctrl_info->scsi_host->host_no, device->bus,
1907                 device->target, lun,
1908                 atomic_read(&device->scsi_cmds_outstanding[lun]));
1909     }
1910 
1911     if (pqi_is_logical_device(device))
1912         scsi_remove_device(device->sdev);
1913     else
1914         pqi_remove_sas_device(device);
1915 
1916     pqi_device_remove_start(device);
1917 }
1918 
1919 /* Assumes the SCSI device list lock is held. */
1920 
1921 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1922     int bus, int target, int lun)
1923 {
1924     struct pqi_scsi_dev *device;
1925 
1926     list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1927         if (device->bus == bus && device->target == target && device->lun == lun)
1928             return device;
1929 
1930     return NULL;
1931 }
1932 
1933 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1934 {
1935     if (dev1->is_physical_device != dev2->is_physical_device)
1936         return false;
1937 
1938     if (dev1->is_physical_device)
1939         return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1940 
1941     return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1942 }
1943 
1944 enum pqi_find_result {
1945     DEVICE_NOT_FOUND,
1946     DEVICE_CHANGED,
1947     DEVICE_SAME,
1948 };
1949 
1950 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1951     struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1952 {
1953     struct pqi_scsi_dev *device;
1954 
1955     list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1956         if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1957             *matching_device = device;
1958             if (pqi_device_equal(device_to_find, device)) {
1959                 if (device_to_find->volume_offline)
1960                     return DEVICE_CHANGED;
1961                 return DEVICE_SAME;
1962             }
1963             return DEVICE_CHANGED;
1964         }
1965     }
1966 
1967     return DEVICE_NOT_FOUND;
1968 }
1969 
1970 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1971 {
1972     if (device->is_expander_smp_device)
1973         return "Enclosure SMP    ";
1974 
1975     return scsi_device_type(device->devtype);
1976 }
1977 
1978 #define PQI_DEV_INFO_BUFFER_LENGTH  128
1979 
1980 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1981     char *action, struct pqi_scsi_dev *device)
1982 {
1983     ssize_t count;
1984     char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1985 
1986     count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1987         "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1988 
1989     if (device->target_lun_valid)
1990         count += scnprintf(buffer + count,
1991             PQI_DEV_INFO_BUFFER_LENGTH - count,
1992             "%d:%d",
1993             device->target,
1994             device->lun);
1995     else
1996         count += scnprintf(buffer + count,
1997             PQI_DEV_INFO_BUFFER_LENGTH - count,
1998             "-:-");
1999 
2000     if (pqi_is_logical_device(device))
2001         count += scnprintf(buffer + count,
2002             PQI_DEV_INFO_BUFFER_LENGTH - count,
2003             " %08x%08x",
2004             *((u32 *)&device->scsi3addr),
2005             *((u32 *)&device->scsi3addr[4]));
2006     else
2007         count += scnprintf(buffer + count,
2008             PQI_DEV_INFO_BUFFER_LENGTH - count,
2009             " %016llx%016llx",
2010             get_unaligned_be64(&device->wwid[0]),
2011             get_unaligned_be64(&device->wwid[8]));
2012 
2013     count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2014         " %s %.8s %.16s ",
2015         pqi_device_type(device),
2016         device->vendor,
2017         device->model);
2018 
2019     if (pqi_is_logical_device(device)) {
2020         if (device->devtype == TYPE_DISK)
2021             count += scnprintf(buffer + count,
2022                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2023                 "SSDSmartPathCap%c En%c %-12s",
2024                 device->raid_bypass_configured ? '+' : '-',
2025                 device->raid_bypass_enabled ? '+' : '-',
2026                 pqi_raid_level_to_string(device->raid_level));
2027     } else {
2028         count += scnprintf(buffer + count,
2029             PQI_DEV_INFO_BUFFER_LENGTH - count,
2030             "AIO%c", device->aio_enabled ? '+' : '-');
2031         if (device->devtype == TYPE_DISK ||
2032             device->devtype == TYPE_ZBC)
2033             count += scnprintf(buffer + count,
2034                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2035                 " qd=%-6d", device->queue_depth);
2036     }
2037 
2038     dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2039 }
2040 
2041 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2042 {
2043     u32 raid_map1_size;
2044     u32 raid_map2_size;
2045 
2046     if (raid_map1 == NULL || raid_map2 == NULL)
2047         return raid_map1 == raid_map2;
2048 
2049     raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2050     raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2051 
2052     if (raid_map1_size != raid_map2_size)
2053         return false;
2054 
2055     return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2056 }
2057 
2058 /* Assumes the SCSI device list lock is held. */
2059 
2060 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2061     struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2062 {
2063     existing_device->device_type = new_device->device_type;
2064     existing_device->bus = new_device->bus;
2065     if (new_device->target_lun_valid) {
2066         existing_device->target = new_device->target;
2067         existing_device->lun = new_device->lun;
2068         existing_device->target_lun_valid = true;
2069     }
2070 
2071     /* By definition, the scsi3addr and wwid fields are already the same. */
2072 
2073     existing_device->is_physical_device = new_device->is_physical_device;
2074     memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2075     memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2076     existing_device->sas_address = new_device->sas_address;
2077     existing_device->queue_depth = new_device->queue_depth;
2078     existing_device->device_offline = false;
2079 
2080     if (pqi_is_logical_device(existing_device)) {
2081         existing_device->is_external_raid_device = new_device->is_external_raid_device;
2082 
2083         if (existing_device->devtype == TYPE_DISK) {
2084             existing_device->raid_level = new_device->raid_level;
2085             existing_device->volume_status = new_device->volume_status;
2086             if (ctrl_info->logical_volume_rescan_needed)
2087                 existing_device->rescan = true;
2088             memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2089             if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2090                 kfree(existing_device->raid_map);
2091                 existing_device->raid_map = new_device->raid_map;
2092                 /* To prevent this from being freed later. */
2093                 new_device->raid_map = NULL;
2094             }
2095             existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2096             existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2097         }
2098     } else {
2099         existing_device->aio_enabled = new_device->aio_enabled;
2100         existing_device->aio_handle = new_device->aio_handle;
2101         existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2102         existing_device->active_path_index = new_device->active_path_index;
2103         existing_device->phy_id = new_device->phy_id;
2104         existing_device->path_map = new_device->path_map;
2105         existing_device->bay = new_device->bay;
2106         existing_device->box_index = new_device->box_index;
2107         existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2108         existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2109         memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2110         memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2111 
2112         existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
2113         if (existing_device->multi_lun_device_lun_count == 0)
2114             existing_device->multi_lun_device_lun_count = 1;
2115     }
2116 }
2117 
2118 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2119 {
2120     if (device) {
2121         kfree(device->raid_map);
2122         kfree(device);
2123     }
2124 }
2125 
2126 /*
2127  * Called when exposing a new device to the OS fails in order to re-adjust
2128  * our internal SCSI device list to match the SCSI ML's view.
2129  */
2130 
2131 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2132     struct pqi_scsi_dev *device)
2133 {
2134     unsigned long flags;
2135 
2136     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2137     list_del(&device->scsi_device_list_entry);
2138     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2139 
2140     /* Allow the device structure to be freed later. */
2141     device->keep_device = false;
2142 }
2143 
2144 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2145 {
2146     if (device->is_expander_smp_device)
2147         return device->sas_port != NULL;
2148 
2149     return device->sdev != NULL;
2150 }
2151 
2152 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2153     struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2154 {
2155     int rc;
2156     unsigned int i;
2157     unsigned long flags;
2158     enum pqi_find_result find_result;
2159     struct pqi_scsi_dev *device;
2160     struct pqi_scsi_dev *next;
2161     struct pqi_scsi_dev *matching_device;
2162     LIST_HEAD(add_list);
2163     LIST_HEAD(delete_list);
2164 
2165     /*
2166      * The idea here is to do as little work as possible while holding the
2167      * spinlock.  That's why we go to great pains to defer anything other
2168      * than updating the internal device list until after we release the
2169      * spinlock.
2170      */
2171 
2172     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2173 
2174     /* Assume that all devices in the existing list have gone away. */
2175     list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2176         device->device_gone = true;
2177 
2178     for (i = 0; i < num_new_devices; i++) {
2179         device = new_device_list[i];
2180 
2181         find_result = pqi_scsi_find_entry(ctrl_info, device,
2182             &matching_device);
2183 
2184         switch (find_result) {
2185         case DEVICE_SAME:
2186             /*
2187              * The newly found device is already in the existing
2188              * device list.
2189              */
2190             device->new_device = false;
2191             matching_device->device_gone = false;
2192             pqi_scsi_update_device(ctrl_info, matching_device, device);
2193             break;
2194         case DEVICE_NOT_FOUND:
2195             /*
2196              * The newly found device is NOT in the existing device
2197              * list.
2198              */
2199             device->new_device = true;
2200             break;
2201         case DEVICE_CHANGED:
2202             /*
2203              * The original device has gone away and we need to add
2204              * the new device.
2205              */
2206             device->new_device = true;
2207             break;
2208         }
2209     }
2210 
2211     /* Process all devices that have gone away. */
2212     list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2213         scsi_device_list_entry) {
2214         if (device->device_gone) {
2215             list_del(&device->scsi_device_list_entry);
2216             list_add_tail(&device->delete_list_entry, &delete_list);
2217         }
2218     }
2219 
2220     /* Process all new devices. */
2221     for (i = 0; i < num_new_devices; i++) {
2222         device = new_device_list[i];
2223         if (!device->new_device)
2224             continue;
2225         if (device->volume_offline)
2226             continue;
2227         list_add_tail(&device->scsi_device_list_entry,
2228             &ctrl_info->scsi_device_list);
2229         list_add_tail(&device->add_list_entry, &add_list);
2230         /* To prevent this device structure from being freed later. */
2231         device->keep_device = true;
2232     }
2233 
2234     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2235 
2236     /*
2237      * If OFA is in progress and there are devices that need to be deleted,
2238      * allow any pending reset operations to continue and unblock any SCSI
2239      * requests before removal.
2240      */
2241     if (pqi_ofa_in_progress(ctrl_info)) {
2242         list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2243             if (pqi_is_device_added(device))
2244                 pqi_device_remove_start(device);
2245         pqi_ctrl_unblock_device_reset(ctrl_info);
2246         pqi_scsi_unblock_requests(ctrl_info);
2247     }
2248 
2249     /* Remove all devices that have gone away. */
2250     list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2251         if (device->volume_offline) {
2252             pqi_dev_info(ctrl_info, "offline", device);
2253             pqi_show_volume_status(ctrl_info, device);
2254         } else {
2255             pqi_dev_info(ctrl_info, "removed", device);
2256         }
2257         if (pqi_is_device_added(device))
2258             pqi_remove_device(ctrl_info, device);
2259         list_del(&device->delete_list_entry);
2260         pqi_free_device(device);
2261     }
2262 
2263     /*
2264      * Notify the SML of any existing device changes such as;
2265      * queue depth, device size.
2266      */
2267     list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2268         if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2269             device->advertised_queue_depth = device->queue_depth;
2270             scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2271             if (device->rescan) {
2272                 scsi_rescan_device(&device->sdev->sdev_gendev);
2273                 device->rescan = false;
2274             }
2275         }
2276     }
2277 
2278     /* Expose any new devices. */
2279     list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2280         if (!pqi_is_device_added(device)) {
2281             rc = pqi_add_device(ctrl_info, device);
2282             if (rc == 0) {
2283                 pqi_dev_info(ctrl_info, "added", device);
2284             } else {
2285                 dev_warn(&ctrl_info->pci_dev->dev,
2286                     "scsi %d:%d:%d:%d addition failed, device not added\n",
2287                     ctrl_info->scsi_host->host_no,
2288                     device->bus, device->target,
2289                     device->lun);
2290                 pqi_fixup_botched_add(ctrl_info, device);
2291             }
2292         }
2293     }
2294 
2295     ctrl_info->logical_volume_rescan_needed = false;
2296 
2297 }
2298 
2299 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2300 {
2301     /*
2302      * Only support the HBA controller itself as a RAID
2303      * controller.  If it's a RAID controller other than
2304      * the HBA itself (an external RAID controller, for
2305      * example), we don't support it.
2306      */
2307     if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2308         !pqi_is_hba_lunid(device->scsi3addr))
2309             return false;
2310 
2311     return true;
2312 }
2313 
2314 static inline bool pqi_skip_device(u8 *scsi3addr)
2315 {
2316     /* Ignore all masked devices. */
2317     if (MASKED_DEVICE(scsi3addr))
2318         return true;
2319 
2320     return false;
2321 }
2322 
2323 static inline void pqi_mask_device(u8 *scsi3addr)
2324 {
2325     scsi3addr[3] |= 0xc0;
2326 }
2327 
2328 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2329 {
2330     if (pqi_is_logical_device(device))
2331         return false;
2332 
2333     return (device->path_map & (device->path_map - 1)) != 0;
2334 }
2335 
2336 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2337 {
2338     return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2339 }
2340 
2341 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2342 {
2343     int i;
2344     int rc;
2345     LIST_HEAD(new_device_list_head);
2346     struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2347     struct report_log_lun_list *logdev_list = NULL;
2348     struct report_phys_lun_16byte_wwid *phys_lun;
2349     struct report_log_lun *log_lun;
2350     struct bmic_identify_physical_device *id_phys = NULL;
2351     u32 num_physicals;
2352     u32 num_logicals;
2353     struct pqi_scsi_dev **new_device_list = NULL;
2354     struct pqi_scsi_dev *device;
2355     struct pqi_scsi_dev *next;
2356     unsigned int num_new_devices;
2357     unsigned int num_valid_devices;
2358     bool is_physical_device;
2359     u8 *scsi3addr;
2360     unsigned int physical_index;
2361     unsigned int logical_index;
2362     static char *out_of_memory_msg =
2363         "failed to allocate memory, device discovery stopped";
2364 
2365     rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2366     if (rc)
2367         goto out;
2368 
2369     if (physdev_list)
2370         num_physicals =
2371             get_unaligned_be32(&physdev_list->header.list_length)
2372                 / sizeof(physdev_list->lun_entries[0]);
2373     else
2374         num_physicals = 0;
2375 
2376     if (logdev_list)
2377         num_logicals =
2378             get_unaligned_be32(&logdev_list->header.list_length)
2379                 / sizeof(logdev_list->lun_entries[0]);
2380     else
2381         num_logicals = 0;
2382 
2383     if (num_physicals) {
2384         /*
2385          * We need this buffer for calls to pqi_get_physical_disk_info()
2386          * below.  We allocate it here instead of inside
2387          * pqi_get_physical_disk_info() because it's a fairly large
2388          * buffer.
2389          */
2390         id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2391         if (!id_phys) {
2392             dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2393                 out_of_memory_msg);
2394             rc = -ENOMEM;
2395             goto out;
2396         }
2397 
2398         if (pqi_hide_vsep) {
2399             for (i = num_physicals - 1; i >= 0; i--) {
2400                 phys_lun = &physdev_list->lun_entries[i];
2401                 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2402                     pqi_mask_device(phys_lun->lunid);
2403                     break;
2404                 }
2405             }
2406         }
2407     }
2408 
2409     if (num_logicals &&
2410         (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2411         ctrl_info->lv_drive_type_mix_valid = true;
2412 
2413     num_new_devices = num_physicals + num_logicals;
2414 
2415     new_device_list = kmalloc_array(num_new_devices,
2416                     sizeof(*new_device_list),
2417                     GFP_KERNEL);
2418     if (!new_device_list) {
2419         dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2420         rc = -ENOMEM;
2421         goto out;
2422     }
2423 
2424     for (i = 0; i < num_new_devices; i++) {
2425         device = kzalloc(sizeof(*device), GFP_KERNEL);
2426         if (!device) {
2427             dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2428                 out_of_memory_msg);
2429             rc = -ENOMEM;
2430             goto out;
2431         }
2432         list_add_tail(&device->new_device_list_entry,
2433             &new_device_list_head);
2434     }
2435 
2436     device = NULL;
2437     num_valid_devices = 0;
2438     physical_index = 0;
2439     logical_index = 0;
2440 
2441     for (i = 0; i < num_new_devices; i++) {
2442 
2443         if ((!pqi_expose_ld_first && i < num_physicals) ||
2444             (pqi_expose_ld_first && i >= num_logicals)) {
2445             is_physical_device = true;
2446             phys_lun = &physdev_list->lun_entries[physical_index++];
2447             log_lun = NULL;
2448             scsi3addr = phys_lun->lunid;
2449         } else {
2450             is_physical_device = false;
2451             phys_lun = NULL;
2452             log_lun = &logdev_list->lun_entries[logical_index++];
2453             scsi3addr = log_lun->lunid;
2454         }
2455 
2456         if (is_physical_device && pqi_skip_device(scsi3addr))
2457             continue;
2458 
2459         if (device)
2460             device = list_next_entry(device, new_device_list_entry);
2461         else
2462             device = list_first_entry(&new_device_list_head,
2463                 struct pqi_scsi_dev, new_device_list_entry);
2464 
2465         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2466         device->is_physical_device = is_physical_device;
2467         if (is_physical_device) {
2468             device->device_type = phys_lun->device_type;
2469             if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2470                 device->is_expander_smp_device = true;
2471         } else {
2472             device->is_external_raid_device =
2473                 pqi_is_external_raid_addr(scsi3addr);
2474         }
2475 
2476         if (!pqi_is_supported_device(device))
2477             continue;
2478 
2479         /* Do not present disks that the OS cannot fully probe */
2480         if (pqi_keep_device_offline(ctrl_info, device))
2481             continue;
2482 
2483         /* Gather information about the device. */
2484         rc = pqi_get_device_info(ctrl_info, device, id_phys);
2485         if (rc == -ENOMEM) {
2486             dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2487                 out_of_memory_msg);
2488             goto out;
2489         }
2490         if (rc) {
2491             if (device->is_physical_device)
2492                 dev_warn(&ctrl_info->pci_dev->dev,
2493                     "obtaining device info failed, skipping physical device %016llx%016llx\n",
2494                     get_unaligned_be64(&phys_lun->wwid[0]),
2495                     get_unaligned_be64(&phys_lun->wwid[8]));
2496             else
2497                 dev_warn(&ctrl_info->pci_dev->dev,
2498                     "obtaining device info failed, skipping logical device %08x%08x\n",
2499                     *((u32 *)&device->scsi3addr),
2500                     *((u32 *)&device->scsi3addr[4]));
2501             rc = 0;
2502             continue;
2503         }
2504 
2505         pqi_assign_bus_target_lun(device);
2506 
2507         if (device->is_physical_device) {
2508             memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2509             if ((phys_lun->device_flags &
2510                 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2511                 phys_lun->aio_handle) {
2512                     device->aio_enabled = true;
2513                     device->aio_handle =
2514                         phys_lun->aio_handle;
2515             }
2516         } else {
2517             memcpy(device->volume_id, log_lun->volume_id,
2518                 sizeof(device->volume_id));
2519         }
2520 
2521         device->sas_address = get_unaligned_be64(&device->wwid[0]);
2522 
2523         new_device_list[num_valid_devices++] = device;
2524     }
2525 
2526     pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2527 
2528 out:
2529     list_for_each_entry_safe(device, next, &new_device_list_head,
2530         new_device_list_entry) {
2531         if (device->keep_device)
2532             continue;
2533         list_del(&device->new_device_list_entry);
2534         pqi_free_device(device);
2535     }
2536 
2537     kfree(new_device_list);
2538     kfree(physdev_list);
2539     kfree(logdev_list);
2540     kfree(id_phys);
2541 
2542     return rc;
2543 }
2544 
2545 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2546 {
2547     int rc;
2548     int mutex_acquired;
2549 
2550     if (pqi_ctrl_offline(ctrl_info))
2551         return -ENXIO;
2552 
2553     mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2554 
2555     if (!mutex_acquired) {
2556         if (pqi_ctrl_scan_blocked(ctrl_info))
2557             return -EBUSY;
2558         pqi_schedule_rescan_worker_delayed(ctrl_info);
2559         return -EINPROGRESS;
2560     }
2561 
2562     rc = pqi_update_scsi_devices(ctrl_info);
2563     if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2564         pqi_schedule_rescan_worker_delayed(ctrl_info);
2565 
2566     mutex_unlock(&ctrl_info->scan_mutex);
2567 
2568     return rc;
2569 }
2570 
2571 static void pqi_scan_start(struct Scsi_Host *shost)
2572 {
2573     struct pqi_ctrl_info *ctrl_info;
2574 
2575     ctrl_info = shost_to_hba(shost);
2576 
2577     pqi_scan_scsi_devices(ctrl_info);
2578 }
2579 
2580 /* Returns TRUE if scan is finished. */
2581 
2582 static int pqi_scan_finished(struct Scsi_Host *shost,
2583     unsigned long elapsed_time)
2584 {
2585     struct pqi_ctrl_info *ctrl_info;
2586 
2587     ctrl_info = shost_priv(shost);
2588 
2589     return !mutex_is_locked(&ctrl_info->scan_mutex);
2590 }
2591 
2592 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2593     struct raid_map *raid_map, u64 first_block)
2594 {
2595     u32 volume_blk_size;
2596 
2597     /*
2598      * Set the encryption tweak values based on logical block address.
2599      * If the block size is 512, the tweak value is equal to the LBA.
2600      * For other block sizes, tweak value is (LBA * block size) / 512.
2601      */
2602     volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2603     if (volume_blk_size != 512)
2604         first_block = (first_block * volume_blk_size) / 512;
2605 
2606     encryption_info->data_encryption_key_index =
2607         get_unaligned_le16(&raid_map->data_encryption_key_index);
2608     encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2609     encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2610 }
2611 
2612 /*
2613  * Attempt to perform RAID bypass mapping for a logical volume I/O.
2614  */
2615 
2616 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2617     struct pqi_scsi_dev_raid_map_data *rmd)
2618 {
2619     bool is_supported = true;
2620 
2621     switch (rmd->raid_level) {
2622     case SA_RAID_0:
2623         break;
2624     case SA_RAID_1:
2625         if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2626             rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2627             is_supported = false;
2628         break;
2629     case SA_RAID_TRIPLE:
2630         if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2631             rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2632             is_supported = false;
2633         break;
2634     case SA_RAID_5:
2635         if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2636             rmd->data_length > ctrl_info->max_write_raid_5_6))
2637             is_supported = false;
2638         break;
2639     case SA_RAID_6:
2640         if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2641             rmd->data_length > ctrl_info->max_write_raid_5_6))
2642             is_supported = false;
2643         break;
2644     default:
2645         is_supported = false;
2646         break;
2647     }
2648 
2649     return is_supported;
2650 }
2651 
2652 #define PQI_RAID_BYPASS_INELIGIBLE  1
2653 
2654 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2655     struct pqi_scsi_dev_raid_map_data *rmd)
2656 {
2657     /* Check for valid opcode, get LBA and block count. */
2658     switch (scmd->cmnd[0]) {
2659     case WRITE_6:
2660         rmd->is_write = true;
2661         fallthrough;
2662     case READ_6:
2663         rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2664             (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2665         rmd->block_cnt = (u32)scmd->cmnd[4];
2666         if (rmd->block_cnt == 0)
2667             rmd->block_cnt = 256;
2668         break;
2669     case WRITE_10:
2670         rmd->is_write = true;
2671         fallthrough;
2672     case READ_10:
2673         rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2674         rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2675         break;
2676     case WRITE_12:
2677         rmd->is_write = true;
2678         fallthrough;
2679     case READ_12:
2680         rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2681         rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2682         break;
2683     case WRITE_16:
2684         rmd->is_write = true;
2685         fallthrough;
2686     case READ_16:
2687         rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2688         rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2689         break;
2690     default:
2691         /* Process via normal I/O path. */
2692         return PQI_RAID_BYPASS_INELIGIBLE;
2693     }
2694 
2695     put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2696 
2697     return 0;
2698 }
2699 
2700 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2701     struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2702 {
2703 #if BITS_PER_LONG == 32
2704     u64 tmpdiv;
2705 #endif
2706 
2707     rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2708 
2709     /* Check for invalid block or wraparound. */
2710     if (rmd->last_block >=
2711         get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2712         rmd->last_block < rmd->first_block)
2713         return PQI_RAID_BYPASS_INELIGIBLE;
2714 
2715     rmd->data_disks_per_row =
2716         get_unaligned_le16(&raid_map->data_disks_per_row);
2717     rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2718     rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2719 
2720     /* Calculate stripe information for the request. */
2721     rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2722     if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2723         return PQI_RAID_BYPASS_INELIGIBLE;
2724 #if BITS_PER_LONG == 32
2725     tmpdiv = rmd->first_block;
2726     do_div(tmpdiv, rmd->blocks_per_row);
2727     rmd->first_row = tmpdiv;
2728     tmpdiv = rmd->last_block;
2729     do_div(tmpdiv, rmd->blocks_per_row);
2730     rmd->last_row = tmpdiv;
2731     rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2732     rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2733     tmpdiv = rmd->first_row_offset;
2734     do_div(tmpdiv, rmd->strip_size);
2735     rmd->first_column = tmpdiv;
2736     tmpdiv = rmd->last_row_offset;
2737     do_div(tmpdiv, rmd->strip_size);
2738     rmd->last_column = tmpdiv;
2739 #else
2740     rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2741     rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2742     rmd->first_row_offset = (u32)(rmd->first_block -
2743         (rmd->first_row * rmd->blocks_per_row));
2744     rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2745         rmd->blocks_per_row));
2746     rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2747     rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2748 #endif
2749 
2750     /* If this isn't a single row/column then give to the controller. */
2751     if (rmd->first_row != rmd->last_row ||
2752         rmd->first_column != rmd->last_column)
2753         return PQI_RAID_BYPASS_INELIGIBLE;
2754 
2755     /* Proceeding with driver mapping. */
2756     rmd->total_disks_per_row = rmd->data_disks_per_row +
2757         get_unaligned_le16(&raid_map->metadata_disks_per_row);
2758     rmd->map_row = ((u32)(rmd->first_row >>
2759         raid_map->parity_rotation_shift)) %
2760         get_unaligned_le16(&raid_map->row_cnt);
2761     rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2762         rmd->first_column;
2763 
2764     return 0;
2765 }
2766 
2767 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2768     struct raid_map *raid_map)
2769 {
2770 #if BITS_PER_LONG == 32
2771     u64 tmpdiv;
2772 #endif
2773 
2774     if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2775         return PQI_RAID_BYPASS_INELIGIBLE;
2776 
2777     /* RAID 50/60 */
2778     /* Verify first and last block are in same RAID group. */
2779     rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2780 #if BITS_PER_LONG == 32
2781     tmpdiv = rmd->first_block;
2782     rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2783     tmpdiv = rmd->first_group;
2784     do_div(tmpdiv, rmd->blocks_per_row);
2785     rmd->first_group = tmpdiv;
2786     tmpdiv = rmd->last_block;
2787     rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2788     tmpdiv = rmd->last_group;
2789     do_div(tmpdiv, rmd->blocks_per_row);
2790     rmd->last_group = tmpdiv;
2791 #else
2792     rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2793     rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2794 #endif
2795     if (rmd->first_group != rmd->last_group)
2796         return PQI_RAID_BYPASS_INELIGIBLE;
2797 
2798     /* Verify request is in a single row of RAID 5/6. */
2799 #if BITS_PER_LONG == 32
2800     tmpdiv = rmd->first_block;
2801     do_div(tmpdiv, rmd->stripesize);
2802     rmd->first_row = tmpdiv;
2803     rmd->r5or6_first_row = tmpdiv;
2804     tmpdiv = rmd->last_block;
2805     do_div(tmpdiv, rmd->stripesize);
2806     rmd->r5or6_last_row = tmpdiv;
2807 #else
2808     rmd->first_row = rmd->r5or6_first_row =
2809         rmd->first_block / rmd->stripesize;
2810     rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2811 #endif
2812     if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2813         return PQI_RAID_BYPASS_INELIGIBLE;
2814 
2815     /* Verify request is in a single column. */
2816 #if BITS_PER_LONG == 32
2817     tmpdiv = rmd->first_block;
2818     rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2819     tmpdiv = rmd->first_row_offset;
2820     rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2821     rmd->r5or6_first_row_offset = rmd->first_row_offset;
2822     tmpdiv = rmd->last_block;
2823     rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2824     tmpdiv = rmd->r5or6_last_row_offset;
2825     rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2826     tmpdiv = rmd->r5or6_first_row_offset;
2827     do_div(tmpdiv, rmd->strip_size);
2828     rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2829     tmpdiv = rmd->r5or6_last_row_offset;
2830     do_div(tmpdiv, rmd->strip_size);
2831     rmd->r5or6_last_column = tmpdiv;
2832 #else
2833     rmd->first_row_offset = rmd->r5or6_first_row_offset =
2834         (u32)((rmd->first_block % rmd->stripesize) %
2835         rmd->blocks_per_row);
2836 
2837     rmd->r5or6_last_row_offset =
2838         (u32)((rmd->last_block % rmd->stripesize) %
2839         rmd->blocks_per_row);
2840 
2841     rmd->first_column =
2842         rmd->r5or6_first_row_offset / rmd->strip_size;
2843     rmd->r5or6_first_column = rmd->first_column;
2844     rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2845 #endif
2846     if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2847         return PQI_RAID_BYPASS_INELIGIBLE;
2848 
2849     /* Request is eligible. */
2850     rmd->map_row =
2851         ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2852         get_unaligned_le16(&raid_map->row_cnt);
2853 
2854     rmd->map_index = (rmd->first_group *
2855         (get_unaligned_le16(&raid_map->row_cnt) *
2856         rmd->total_disks_per_row)) +
2857         (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2858 
2859     if (rmd->is_write) {
2860         u32 index;
2861 
2862         /*
2863          * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2864          * parity entries inside the device's raid_map.
2865          *
2866          * A device's RAID map is bounded by: number of RAID disks squared.
2867          *
2868          * The devices RAID map size is checked during device
2869          * initialization.
2870          */
2871         index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2872         index *= rmd->total_disks_per_row;
2873         index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2874 
2875         rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2876         if (rmd->raid_level == SA_RAID_6) {
2877             rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2878             rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2879         }
2880 #if BITS_PER_LONG == 32
2881         tmpdiv = rmd->first_block;
2882         do_div(tmpdiv, rmd->blocks_per_row);
2883         rmd->row = tmpdiv;
2884 #else
2885         rmd->row = rmd->first_block / rmd->blocks_per_row;
2886 #endif
2887     }
2888 
2889     return 0;
2890 }
2891 
2892 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2893 {
2894     /* Build the new CDB for the physical disk I/O. */
2895     if (rmd->disk_block > 0xffffffff) {
2896         rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2897         rmd->cdb[1] = 0;
2898         put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2899         put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2900         rmd->cdb[14] = 0;
2901         rmd->cdb[15] = 0;
2902         rmd->cdb_length = 16;
2903     } else {
2904         rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2905         rmd->cdb[1] = 0;
2906         put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2907         rmd->cdb[6] = 0;
2908         put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2909         rmd->cdb[9] = 0;
2910         rmd->cdb_length = 10;
2911     }
2912 }
2913 
2914 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2915     struct pqi_scsi_dev_raid_map_data *rmd)
2916 {
2917     u32 index;
2918     u32 group;
2919 
2920     group = rmd->map_index / rmd->data_disks_per_row;
2921 
2922     index = rmd->map_index - (group * rmd->data_disks_per_row);
2923     rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2924     index += rmd->data_disks_per_row;
2925     rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2926     if (rmd->layout_map_count > 2) {
2927         index += rmd->data_disks_per_row;
2928         rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2929     }
2930 
2931     rmd->num_it_nexus_entries = rmd->layout_map_count;
2932 }
2933 
2934 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2935     struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2936     struct pqi_queue_group *queue_group)
2937 {
2938     int rc;
2939     struct raid_map *raid_map;
2940     u32 group;
2941     u32 next_bypass_group;
2942     struct pqi_encryption_info *encryption_info_ptr;
2943     struct pqi_encryption_info encryption_info;
2944     struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2945 
2946     rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2947     if (rc)
2948         return PQI_RAID_BYPASS_INELIGIBLE;
2949 
2950     rmd.raid_level = device->raid_level;
2951 
2952     if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2953         return PQI_RAID_BYPASS_INELIGIBLE;
2954 
2955     if (unlikely(rmd.block_cnt == 0))
2956         return PQI_RAID_BYPASS_INELIGIBLE;
2957 
2958     raid_map = device->raid_map;
2959 
2960     rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2961     if (rc)
2962         return PQI_RAID_BYPASS_INELIGIBLE;
2963 
2964     if (device->raid_level == SA_RAID_1 ||
2965         device->raid_level == SA_RAID_TRIPLE) {
2966         if (rmd.is_write) {
2967             pqi_calc_aio_r1_nexus(raid_map, &rmd);
2968         } else {
2969             group = device->next_bypass_group[rmd.map_index];
2970             next_bypass_group = group + 1;
2971             if (next_bypass_group >= rmd.layout_map_count)
2972                 next_bypass_group = 0;
2973             device->next_bypass_group[rmd.map_index] = next_bypass_group;
2974             rmd.map_index += group * rmd.data_disks_per_row;
2975         }
2976     } else if ((device->raid_level == SA_RAID_5 ||
2977         device->raid_level == SA_RAID_6) &&
2978         (rmd.layout_map_count > 1 || rmd.is_write)) {
2979         rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2980         if (rc)
2981             return PQI_RAID_BYPASS_INELIGIBLE;
2982     }
2983 
2984     if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2985         return PQI_RAID_BYPASS_INELIGIBLE;
2986 
2987     rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2988     rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2989         rmd.first_row * rmd.strip_size +
2990         (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2991     rmd.disk_block_cnt = rmd.block_cnt;
2992 
2993     /* Handle differing logical/physical block sizes. */
2994     if (raid_map->phys_blk_shift) {
2995         rmd.disk_block <<= raid_map->phys_blk_shift;
2996         rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2997     }
2998 
2999     if (unlikely(rmd.disk_block_cnt > 0xffff))
3000         return PQI_RAID_BYPASS_INELIGIBLE;
3001 
3002     pqi_set_aio_cdb(&rmd);
3003 
3004     if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
3005         if (rmd.data_length > device->max_transfer_encrypted)
3006             return PQI_RAID_BYPASS_INELIGIBLE;
3007         pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3008         encryption_info_ptr = &encryption_info;
3009     } else {
3010         encryption_info_ptr = NULL;
3011     }
3012 
3013     if (rmd.is_write) {
3014         switch (device->raid_level) {
3015         case SA_RAID_1:
3016         case SA_RAID_TRIPLE:
3017             return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3018                 encryption_info_ptr, device, &rmd);
3019         case SA_RAID_5:
3020         case SA_RAID_6:
3021             return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3022                 encryption_info_ptr, device, &rmd);
3023         }
3024     }
3025 
3026     return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3027         rmd.cdb, rmd.cdb_length, queue_group,
3028         encryption_info_ptr, true, false);
3029 }
3030 
3031 #define PQI_STATUS_IDLE     0x0
3032 
3033 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3034 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3035 
3036 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET     0x0
3037 #define PQI_DEVICE_STATE_STATUS_AVAILABLE       0x1
3038 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY        0x2
3039 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY     0x3
3040 #define PQI_DEVICE_STATE_ERROR              0x4
3041 
3042 #define PQI_MODE_READY_TIMEOUT_SECS     30
3043 #define PQI_MODE_READY_POLL_INTERVAL_MSECS  1
3044 
3045 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3046 {
3047     struct pqi_device_registers __iomem *pqi_registers;
3048     unsigned long timeout;
3049     u64 signature;
3050     u8 status;
3051 
3052     pqi_registers = ctrl_info->pqi_registers;
3053     timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3054 
3055     while (1) {
3056         signature = readq(&pqi_registers->signature);
3057         if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3058             sizeof(signature)) == 0)
3059             break;
3060         if (time_after(jiffies, timeout)) {
3061             dev_err(&ctrl_info->pci_dev->dev,
3062                 "timed out waiting for PQI signature\n");
3063             return -ETIMEDOUT;
3064         }
3065         msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3066     }
3067 
3068     while (1) {
3069         status = readb(&pqi_registers->function_and_status_code);
3070         if (status == PQI_STATUS_IDLE)
3071             break;
3072         if (time_after(jiffies, timeout)) {
3073             dev_err(&ctrl_info->pci_dev->dev,
3074                 "timed out waiting for PQI IDLE\n");
3075             return -ETIMEDOUT;
3076         }
3077         msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3078     }
3079 
3080     while (1) {
3081         if (readl(&pqi_registers->device_status) ==
3082             PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3083             break;
3084         if (time_after(jiffies, timeout)) {
3085             dev_err(&ctrl_info->pci_dev->dev,
3086                 "timed out waiting for PQI all registers ready\n");
3087             return -ETIMEDOUT;
3088         }
3089         msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3090     }
3091 
3092     return 0;
3093 }
3094 
3095 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3096 {
3097     struct pqi_scsi_dev *device;
3098 
3099     device = io_request->scmd->device->hostdata;
3100     device->raid_bypass_enabled = false;
3101     device->aio_enabled = false;
3102 }
3103 
3104 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3105 {
3106     struct pqi_ctrl_info *ctrl_info;
3107     struct pqi_scsi_dev *device;
3108 
3109     device = sdev->hostdata;
3110     if (device->device_offline)
3111         return;
3112 
3113     device->device_offline = true;
3114     ctrl_info = shost_to_hba(sdev->host);
3115     pqi_schedule_rescan_worker(ctrl_info);
3116     dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3117         path, ctrl_info->scsi_host->host_no, device->bus,
3118         device->target, device->lun);
3119 }
3120 
3121 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3122 {
3123     u8 scsi_status;
3124     u8 host_byte;
3125     struct scsi_cmnd *scmd;
3126     struct pqi_raid_error_info *error_info;
3127     size_t sense_data_length;
3128     int residual_count;
3129     int xfer_count;
3130     struct scsi_sense_hdr sshdr;
3131 
3132     scmd = io_request->scmd;
3133     if (!scmd)
3134         return;
3135 
3136     error_info = io_request->error_info;
3137     scsi_status = error_info->status;
3138     host_byte = DID_OK;
3139 
3140     switch (error_info->data_out_result) {
3141     case PQI_DATA_IN_OUT_GOOD:
3142         break;
3143     case PQI_DATA_IN_OUT_UNDERFLOW:
3144         xfer_count =
3145             get_unaligned_le32(&error_info->data_out_transferred);
3146         residual_count = scsi_bufflen(scmd) - xfer_count;
3147         scsi_set_resid(scmd, residual_count);
3148         if (xfer_count < scmd->underflow)
3149             host_byte = DID_SOFT_ERROR;
3150         break;
3151     case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3152     case PQI_DATA_IN_OUT_ABORTED:
3153         host_byte = DID_ABORT;
3154         break;
3155     case PQI_DATA_IN_OUT_TIMEOUT:
3156         host_byte = DID_TIME_OUT;
3157         break;
3158     case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3159     case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3160     case PQI_DATA_IN_OUT_BUFFER_ERROR:
3161     case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3162     case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3163     case PQI_DATA_IN_OUT_ERROR:
3164     case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3165     case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3166     case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3167     case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3168     case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3169     case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3170     case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3171     case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3172     case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3173     case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3174     default:
3175         host_byte = DID_ERROR;
3176         break;
3177     }
3178 
3179     sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3180     if (sense_data_length == 0)
3181         sense_data_length =
3182             get_unaligned_le16(&error_info->response_data_length);
3183     if (sense_data_length) {
3184         if (sense_data_length > sizeof(error_info->data))
3185             sense_data_length = sizeof(error_info->data);
3186 
3187         if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3188             scsi_normalize_sense(error_info->data,
3189                 sense_data_length, &sshdr) &&
3190                 sshdr.sense_key == HARDWARE_ERROR &&
3191                 sshdr.asc == 0x3e) {
3192             struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3193             struct pqi_scsi_dev *device = scmd->device->hostdata;
3194 
3195             switch (sshdr.ascq) {
3196             case 0x1: /* LOGICAL UNIT FAILURE */
3197                 if (printk_ratelimit())
3198                     scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3199                         ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3200                 pqi_take_device_offline(scmd->device, "RAID");
3201                 host_byte = DID_NO_CONNECT;
3202                 break;
3203 
3204             default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3205                 if (printk_ratelimit())
3206                     scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3207                         sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3208                 break;
3209             }
3210         }
3211 
3212         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3213             sense_data_length = SCSI_SENSE_BUFFERSIZE;
3214         memcpy(scmd->sense_buffer, error_info->data,
3215             sense_data_length);
3216     }
3217 
3218     scmd->result = scsi_status;
3219     set_host_byte(scmd, host_byte);
3220 }
3221 
3222 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3223 {
3224     u8 scsi_status;
3225     u8 host_byte;
3226     struct scsi_cmnd *scmd;
3227     struct pqi_aio_error_info *error_info;
3228     size_t sense_data_length;
3229     int residual_count;
3230     int xfer_count;
3231     bool device_offline;
3232     struct pqi_scsi_dev *device;
3233 
3234     scmd = io_request->scmd;
3235     error_info = io_request->error_info;
3236     host_byte = DID_OK;
3237     sense_data_length = 0;
3238     device_offline = false;
3239     device = scmd->device->hostdata;
3240 
3241     switch (error_info->service_response) {
3242     case PQI_AIO_SERV_RESPONSE_COMPLETE:
3243         scsi_status = error_info->status;
3244         break;
3245     case PQI_AIO_SERV_RESPONSE_FAILURE:
3246         switch (error_info->status) {
3247         case PQI_AIO_STATUS_IO_ABORTED:
3248             scsi_status = SAM_STAT_TASK_ABORTED;
3249             break;
3250         case PQI_AIO_STATUS_UNDERRUN:
3251             scsi_status = SAM_STAT_GOOD;
3252             residual_count = get_unaligned_le32(
3253                         &error_info->residual_count);
3254             scsi_set_resid(scmd, residual_count);
3255             xfer_count = scsi_bufflen(scmd) - residual_count;
3256             if (xfer_count < scmd->underflow)
3257                 host_byte = DID_SOFT_ERROR;
3258             break;
3259         case PQI_AIO_STATUS_OVERRUN:
3260             scsi_status = SAM_STAT_GOOD;
3261             break;
3262         case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3263             pqi_aio_path_disabled(io_request);
3264             if (pqi_is_multipath_device(device)) {
3265                 pqi_device_remove_start(device);
3266                 host_byte = DID_NO_CONNECT;
3267                 scsi_status = SAM_STAT_CHECK_CONDITION;
3268             } else {
3269                 scsi_status = SAM_STAT_GOOD;
3270                 io_request->status = -EAGAIN;
3271             }
3272             break;
3273         case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3274         case PQI_AIO_STATUS_INVALID_DEVICE:
3275             if (!io_request->raid_bypass) {
3276                 device_offline = true;
3277                 pqi_take_device_offline(scmd->device, "AIO");
3278                 host_byte = DID_NO_CONNECT;
3279             }
3280             scsi_status = SAM_STAT_CHECK_CONDITION;
3281             break;
3282         case PQI_AIO_STATUS_IO_ERROR:
3283         default:
3284             scsi_status = SAM_STAT_CHECK_CONDITION;
3285             break;
3286         }
3287         break;
3288     case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3289     case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3290         scsi_status = SAM_STAT_GOOD;
3291         break;
3292     case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3293     case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3294     default:
3295         scsi_status = SAM_STAT_CHECK_CONDITION;
3296         break;
3297     }
3298 
3299     if (error_info->data_present) {
3300         sense_data_length =
3301             get_unaligned_le16(&error_info->data_length);
3302         if (sense_data_length) {
3303             if (sense_data_length > sizeof(error_info->data))
3304                 sense_data_length = sizeof(error_info->data);
3305             if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3306                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3307             memcpy(scmd->sense_buffer, error_info->data,
3308                 sense_data_length);
3309         }
3310     }
3311 
3312     if (device_offline && sense_data_length == 0)
3313         scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3314 
3315     scmd->result = scsi_status;
3316     set_host_byte(scmd, host_byte);
3317 }
3318 
3319 static void pqi_process_io_error(unsigned int iu_type,
3320     struct pqi_io_request *io_request)
3321 {
3322     switch (iu_type) {
3323     case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3324         pqi_process_raid_io_error(io_request);
3325         break;
3326     case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3327         pqi_process_aio_io_error(io_request);
3328         break;
3329     }
3330 }
3331 
3332 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3333     struct pqi_task_management_response *response)
3334 {
3335     int rc;
3336 
3337     switch (response->response_code) {
3338     case SOP_TMF_COMPLETE:
3339     case SOP_TMF_FUNCTION_SUCCEEDED:
3340         rc = 0;
3341         break;
3342     case SOP_TMF_REJECTED:
3343         rc = -EAGAIN;
3344         break;
3345     case SOP_RC_INCORRECT_LOGICAL_UNIT:
3346         rc = -ENODEV;
3347         break;
3348     default:
3349         rc = -EIO;
3350         break;
3351     }
3352 
3353     if (rc)
3354         dev_err(&ctrl_info->pci_dev->dev,
3355             "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3356 
3357     return rc;
3358 }
3359 
3360 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3361     enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3362 {
3363     pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3364 }
3365 
3366 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3367 {
3368     int num_responses;
3369     pqi_index_t oq_pi;
3370     pqi_index_t oq_ci;
3371     struct pqi_io_request *io_request;
3372     struct pqi_io_response *response;
3373     u16 request_id;
3374 
3375     num_responses = 0;
3376     oq_ci = queue_group->oq_ci_copy;
3377 
3378     while (1) {
3379         oq_pi = readl(queue_group->oq_pi);
3380         if (oq_pi >= ctrl_info->num_elements_per_oq) {
3381             pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3382             dev_err(&ctrl_info->pci_dev->dev,
3383                 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3384                 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3385             return -1;
3386         }
3387         if (oq_pi == oq_ci)
3388             break;
3389 
3390         num_responses++;
3391         response = queue_group->oq_element_array +
3392             (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3393 
3394         request_id = get_unaligned_le16(&response->request_id);
3395         if (request_id >= ctrl_info->max_io_slots) {
3396             pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3397             dev_err(&ctrl_info->pci_dev->dev,
3398                 "request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
3399                 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3400             return -1;
3401         }
3402 
3403         io_request = &ctrl_info->io_request_pool[request_id];
3404         if (atomic_read(&io_request->refcount) == 0) {
3405             pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3406             dev_err(&ctrl_info->pci_dev->dev,
3407                 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
3408                 request_id, oq_pi, oq_ci);
3409             return -1;
3410         }
3411 
3412         switch (response->header.iu_type) {
3413         case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3414         case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3415             if (io_request->scmd)
3416                 io_request->scmd->result = 0;
3417             fallthrough;
3418         case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3419             break;
3420         case PQI_RESPONSE_IU_VENDOR_GENERAL:
3421             io_request->status =
3422                 get_unaligned_le16(
3423                 &((struct pqi_vendor_general_response *)response)->status);
3424             break;
3425         case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3426             io_request->status = pqi_interpret_task_management_response(ctrl_info,
3427                 (void *)response);
3428             break;
3429         case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3430             pqi_aio_path_disabled(io_request);
3431             io_request->status = -EAGAIN;
3432             break;
3433         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3434         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3435             io_request->error_info = ctrl_info->error_buffer +
3436                 (get_unaligned_le16(&response->error_index) *
3437                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3438             pqi_process_io_error(response->header.iu_type, io_request);
3439             break;
3440         default:
3441             pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3442             dev_err(&ctrl_info->pci_dev->dev,
3443                 "unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
3444                 response->header.iu_type, oq_pi, oq_ci);
3445             return -1;
3446         }
3447 
3448         io_request->io_complete_callback(io_request, io_request->context);
3449 
3450         /*
3451          * Note that the I/O request structure CANNOT BE TOUCHED after
3452          * returning from the I/O completion callback!
3453          */
3454         oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3455     }
3456 
3457     if (num_responses) {
3458         queue_group->oq_ci_copy = oq_ci;
3459         writel(oq_ci, queue_group->oq_ci);
3460     }
3461 
3462     return num_responses;
3463 }
3464 
3465 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3466     unsigned int ci, unsigned int elements_in_queue)
3467 {
3468     unsigned int num_elements_used;
3469 
3470     if (pi >= ci)
3471         num_elements_used = pi - ci;
3472     else
3473         num_elements_used = elements_in_queue - ci + pi;
3474 
3475     return elements_in_queue - num_elements_used - 1;
3476 }
3477 
3478 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3479     struct pqi_event_acknowledge_request *iu, size_t iu_length)
3480 {
3481     pqi_index_t iq_pi;
3482     pqi_index_t iq_ci;
3483     unsigned long flags;
3484     void *next_element;
3485     struct pqi_queue_group *queue_group;
3486 
3487     queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3488     put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3489 
3490     while (1) {
3491         spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3492 
3493         iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3494         iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3495 
3496         if (pqi_num_elements_free(iq_pi, iq_ci,
3497             ctrl_info->num_elements_per_iq))
3498             break;
3499 
3500         spin_unlock_irqrestore(
3501             &queue_group->submit_lock[RAID_PATH], flags);
3502 
3503         if (pqi_ctrl_offline(ctrl_info))
3504             return;
3505     }
3506 
3507     next_element = queue_group->iq_element_array[RAID_PATH] +
3508         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3509 
3510     memcpy(next_element, iu, iu_length);
3511 
3512     iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3513     queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3514 
3515     /*
3516      * This write notifies the controller that an IU is available to be
3517      * processed.
3518      */
3519     writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3520 
3521     spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3522 }
3523 
3524 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3525     struct pqi_event *event)
3526 {
3527     struct pqi_event_acknowledge_request request;
3528 
3529     memset(&request, 0, sizeof(request));
3530 
3531     request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3532     put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3533         &request.header.iu_length);
3534     request.event_type = event->event_type;
3535     put_unaligned_le16(event->event_id, &request.event_id);
3536     put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3537 
3538     pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3539 }
3540 
3541 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS      30
3542 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS    1
3543 
3544 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3545     struct pqi_ctrl_info *ctrl_info)
3546 {
3547     u8 status;
3548     unsigned long timeout;
3549 
3550     timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3551 
3552     while (1) {
3553         status = pqi_read_soft_reset_status(ctrl_info);
3554         if (status & PQI_SOFT_RESET_INITIATE)
3555             return RESET_INITIATE_DRIVER;
3556 
3557         if (status & PQI_SOFT_RESET_ABORT)
3558             return RESET_ABORT;
3559 
3560         if (!sis_is_firmware_running(ctrl_info))
3561             return RESET_NORESPONSE;
3562 
3563         if (time_after(jiffies, timeout)) {
3564             dev_warn(&ctrl_info->pci_dev->dev,
3565                 "timed out waiting for soft reset status\n");
3566             return RESET_TIMEDOUT;
3567         }
3568 
3569         ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3570     }
3571 }
3572 
3573 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3574 {
3575     int rc;
3576     unsigned int delay_secs;
3577     enum pqi_soft_reset_status reset_status;
3578 
3579     if (ctrl_info->soft_reset_handshake_supported)
3580         reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3581     else
3582         reset_status = RESET_INITIATE_FIRMWARE;
3583 
3584     delay_secs = PQI_POST_RESET_DELAY_SECS;
3585 
3586     switch (reset_status) {
3587     case RESET_TIMEDOUT:
3588         delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3589         fallthrough;
3590     case RESET_INITIATE_DRIVER:
3591         dev_info(&ctrl_info->pci_dev->dev,
3592                 "Online Firmware Activation: resetting controller\n");
3593         sis_soft_reset(ctrl_info);
3594         fallthrough;
3595     case RESET_INITIATE_FIRMWARE:
3596         ctrl_info->pqi_mode_enabled = false;
3597         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3598         rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3599         pqi_ofa_free_host_buffer(ctrl_info);
3600         pqi_ctrl_ofa_done(ctrl_info);
3601         dev_info(&ctrl_info->pci_dev->dev,
3602                 "Online Firmware Activation: %s\n",
3603                 rc == 0 ? "SUCCESS" : "FAILED");
3604         break;
3605     case RESET_ABORT:
3606         dev_info(&ctrl_info->pci_dev->dev,
3607                 "Online Firmware Activation ABORTED\n");
3608         if (ctrl_info->soft_reset_handshake_supported)
3609             pqi_clear_soft_reset_status(ctrl_info);
3610         pqi_ofa_free_host_buffer(ctrl_info);
3611         pqi_ctrl_ofa_done(ctrl_info);
3612         pqi_ofa_ctrl_unquiesce(ctrl_info);
3613         break;
3614     case RESET_NORESPONSE:
3615         fallthrough;
3616     default:
3617         dev_err(&ctrl_info->pci_dev->dev,
3618             "unexpected Online Firmware Activation reset status: 0x%x\n",
3619             reset_status);
3620         pqi_ofa_free_host_buffer(ctrl_info);
3621         pqi_ctrl_ofa_done(ctrl_info);
3622         pqi_ofa_ctrl_unquiesce(ctrl_info);
3623         pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3624         break;
3625     }
3626 }
3627 
3628 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3629 {
3630     struct pqi_ctrl_info *ctrl_info;
3631 
3632     ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3633 
3634     pqi_ctrl_ofa_start(ctrl_info);
3635     pqi_ofa_setup_host_buffer(ctrl_info);
3636     pqi_ofa_host_memory_update(ctrl_info);
3637 }
3638 
3639 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3640 {
3641     struct pqi_ctrl_info *ctrl_info;
3642     struct pqi_event *event;
3643 
3644     ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3645 
3646     event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3647 
3648     pqi_ofa_ctrl_quiesce(ctrl_info);
3649     pqi_acknowledge_event(ctrl_info, event);
3650     pqi_process_soft_reset(ctrl_info);
3651 }
3652 
3653 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3654     struct pqi_event *event)
3655 {
3656     bool ack_event;
3657 
3658     ack_event = true;
3659 
3660     switch (event->event_id) {
3661     case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3662         dev_info(&ctrl_info->pci_dev->dev,
3663             "received Online Firmware Activation memory allocation request\n");
3664         schedule_work(&ctrl_info->ofa_memory_alloc_work);
3665         break;
3666     case PQI_EVENT_OFA_QUIESCE:
3667         dev_info(&ctrl_info->pci_dev->dev,
3668             "received Online Firmware Activation quiesce request\n");
3669         schedule_work(&ctrl_info->ofa_quiesce_work);
3670         ack_event = false;
3671         break;
3672     case PQI_EVENT_OFA_CANCELED:
3673         dev_info(&ctrl_info->pci_dev->dev,
3674             "received Online Firmware Activation cancel request: reason: %u\n",
3675             ctrl_info->ofa_cancel_reason);
3676         pqi_ofa_free_host_buffer(ctrl_info);
3677         pqi_ctrl_ofa_done(ctrl_info);
3678         break;
3679     default:
3680         dev_err(&ctrl_info->pci_dev->dev,
3681             "received unknown Online Firmware Activation request: event ID: %u\n",
3682             event->event_id);
3683         break;
3684     }
3685 
3686     return ack_event;
3687 }
3688 
3689 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3690 {
3691     unsigned long flags;
3692     struct pqi_scsi_dev *device;
3693 
3694     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3695 
3696     list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3697         if (device->raid_bypass_enabled)
3698             device->raid_bypass_enabled = false;
3699 
3700     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3701 }
3702 
3703 static void pqi_event_worker(struct work_struct *work)
3704 {
3705     unsigned int i;
3706     bool rescan_needed;
3707     struct pqi_ctrl_info *ctrl_info;
3708     struct pqi_event *event;
3709     bool ack_event;
3710 
3711     ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3712 
3713     pqi_ctrl_busy(ctrl_info);
3714     pqi_wait_if_ctrl_blocked(ctrl_info);
3715     if (pqi_ctrl_offline(ctrl_info))
3716         goto out;
3717 
3718     rescan_needed = false;
3719     event = ctrl_info->events;
3720     for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3721         if (event->pending) {
3722             event->pending = false;
3723             if (event->event_type == PQI_EVENT_TYPE_OFA) {
3724                 ack_event = pqi_ofa_process_event(ctrl_info, event);
3725             } else {
3726                 ack_event = true;
3727                 rescan_needed = true;
3728                 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3729                     ctrl_info->logical_volume_rescan_needed = true;
3730                 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3731                     pqi_disable_raid_bypass(ctrl_info);
3732             }
3733             if (ack_event)
3734                 pqi_acknowledge_event(ctrl_info, event);
3735         }
3736         event++;
3737     }
3738 
3739 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY     (5 * HZ)
3740 
3741     if (rescan_needed)
3742         pqi_schedule_rescan_worker_with_delay(ctrl_info,
3743             PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3744 
3745 out:
3746     pqi_ctrl_unbusy(ctrl_info);
3747 }
3748 
3749 #define PQI_HEARTBEAT_TIMER_INTERVAL    (10 * HZ)
3750 
3751 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3752 {
3753     int num_interrupts;
3754     u32 heartbeat_count;
3755     struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3756 
3757     pqi_check_ctrl_health(ctrl_info);
3758     if (pqi_ctrl_offline(ctrl_info))
3759         return;
3760 
3761     num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3762     heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3763 
3764     if (num_interrupts == ctrl_info->previous_num_interrupts) {
3765         if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3766             dev_err(&ctrl_info->pci_dev->dev,
3767                 "no heartbeat detected - last heartbeat count: %u\n",
3768                 heartbeat_count);
3769             pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3770             return;
3771         }
3772     } else {
3773         ctrl_info->previous_num_interrupts = num_interrupts;
3774     }
3775 
3776     ctrl_info->previous_heartbeat_count = heartbeat_count;
3777     mod_timer(&ctrl_info->heartbeat_timer,
3778         jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3779 }
3780 
3781 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3782 {
3783     if (!ctrl_info->heartbeat_counter)
3784         return;
3785 
3786     ctrl_info->previous_num_interrupts =
3787         atomic_read(&ctrl_info->num_interrupts);
3788     ctrl_info->previous_heartbeat_count =
3789         pqi_read_heartbeat_counter(ctrl_info);
3790 
3791     ctrl_info->heartbeat_timer.expires =
3792         jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3793     add_timer(&ctrl_info->heartbeat_timer);
3794 }
3795 
3796 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3797 {
3798     del_timer_sync(&ctrl_info->heartbeat_timer);
3799 }
3800 
3801 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3802     struct pqi_event *event, struct pqi_event_response *response)
3803 {
3804     switch (event->event_id) {
3805     case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3806         ctrl_info->ofa_bytes_requested =
3807             get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3808         break;
3809     case PQI_EVENT_OFA_CANCELED:
3810         ctrl_info->ofa_cancel_reason =
3811             get_unaligned_le16(&response->data.ofa_cancelled.reason);
3812         break;
3813     }
3814 }
3815 
3816 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3817 {
3818     int num_events;
3819     pqi_index_t oq_pi;
3820     pqi_index_t oq_ci;
3821     struct pqi_event_queue *event_queue;
3822     struct pqi_event_response *response;
3823     struct pqi_event *event;
3824     int event_index;
3825 
3826     event_queue = &ctrl_info->event_queue;
3827     num_events = 0;
3828     oq_ci = event_queue->oq_ci_copy;
3829 
3830     while (1) {
3831         oq_pi = readl(event_queue->oq_pi);
3832         if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3833             pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3834             dev_err(&ctrl_info->pci_dev->dev,
3835                 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3836                 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3837             return -1;
3838         }
3839 
3840         if (oq_pi == oq_ci)
3841             break;
3842 
3843         num_events++;
3844         response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3845 
3846         event_index = pqi_event_type_to_event_index(response->event_type);
3847 
3848         if (event_index >= 0 && response->request_acknowledge) {
3849             event = &ctrl_info->events[event_index];
3850             event->pending = true;
3851             event->event_type = response->event_type;
3852             event->event_id = get_unaligned_le16(&response->event_id);
3853             event->additional_event_id =
3854                 get_unaligned_le32(&response->additional_event_id);
3855             if (event->event_type == PQI_EVENT_TYPE_OFA)
3856                 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3857         }
3858 
3859         oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3860     }
3861 
3862     if (num_events) {
3863         event_queue->oq_ci_copy = oq_ci;
3864         writel(oq_ci, event_queue->oq_ci);
3865         schedule_work(&ctrl_info->event_work);
3866     }
3867 
3868     return num_events;
3869 }
3870 
3871 #define PQI_LEGACY_INTX_MASK    0x1
3872 
3873 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3874 {
3875     u32 intx_mask;
3876     struct pqi_device_registers __iomem *pqi_registers;
3877     volatile void __iomem *register_addr;
3878 
3879     pqi_registers = ctrl_info->pqi_registers;
3880 
3881     if (enable_intx)
3882         register_addr = &pqi_registers->legacy_intx_mask_clear;
3883     else
3884         register_addr = &pqi_registers->legacy_intx_mask_set;
3885 
3886     intx_mask = readl(register_addr);
3887     intx_mask |= PQI_LEGACY_INTX_MASK;
3888     writel(intx_mask, register_addr);
3889 }
3890 
3891 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3892     enum pqi_irq_mode new_mode)
3893 {
3894     switch (ctrl_info->irq_mode) {
3895     case IRQ_MODE_MSIX:
3896         switch (new_mode) {
3897         case IRQ_MODE_MSIX:
3898             break;
3899         case IRQ_MODE_INTX:
3900             pqi_configure_legacy_intx(ctrl_info, true);
3901             sis_enable_intx(ctrl_info);
3902             break;
3903         case IRQ_MODE_NONE:
3904             break;
3905         }
3906         break;
3907     case IRQ_MODE_INTX:
3908         switch (new_mode) {
3909         case IRQ_MODE_MSIX:
3910             pqi_configure_legacy_intx(ctrl_info, false);
3911             sis_enable_msix(ctrl_info);
3912             break;
3913         case IRQ_MODE_INTX:
3914             break;
3915         case IRQ_MODE_NONE:
3916             pqi_configure_legacy_intx(ctrl_info, false);
3917             break;
3918         }
3919         break;
3920     case IRQ_MODE_NONE:
3921         switch (new_mode) {
3922         case IRQ_MODE_MSIX:
3923             sis_enable_msix(ctrl_info);
3924             break;
3925         case IRQ_MODE_INTX:
3926             pqi_configure_legacy_intx(ctrl_info, true);
3927             sis_enable_intx(ctrl_info);
3928             break;
3929         case IRQ_MODE_NONE:
3930             break;
3931         }
3932         break;
3933     }
3934 
3935     ctrl_info->irq_mode = new_mode;
3936 }
3937 
3938 #define PQI_LEGACY_INTX_PENDING     0x1
3939 
3940 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3941 {
3942     bool valid_irq;
3943     u32 intx_status;
3944 
3945     switch (ctrl_info->irq_mode) {
3946     case IRQ_MODE_MSIX:
3947         valid_irq = true;
3948         break;
3949     case IRQ_MODE_INTX:
3950         intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3951         if (intx_status & PQI_LEGACY_INTX_PENDING)
3952             valid_irq = true;
3953         else
3954             valid_irq = false;
3955         break;
3956     case IRQ_MODE_NONE:
3957     default:
3958         valid_irq = false;
3959         break;
3960     }
3961 
3962     return valid_irq;
3963 }
3964 
3965 static irqreturn_t pqi_irq_handler(int irq, void *data)
3966 {
3967     struct pqi_ctrl_info *ctrl_info;
3968     struct pqi_queue_group *queue_group;
3969     int num_io_responses_handled;
3970     int num_events_handled;
3971 
3972     queue_group = data;
3973     ctrl_info = queue_group->ctrl_info;
3974 
3975     if (!pqi_is_valid_irq(ctrl_info))
3976         return IRQ_NONE;
3977 
3978     num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3979     if (num_io_responses_handled < 0)
3980         goto out;
3981 
3982     if (irq == ctrl_info->event_irq) {
3983         num_events_handled = pqi_process_event_intr(ctrl_info);
3984         if (num_events_handled < 0)
3985             goto out;
3986     } else {
3987         num_events_handled = 0;
3988     }
3989 
3990     if (num_io_responses_handled + num_events_handled > 0)
3991         atomic_inc(&ctrl_info->num_interrupts);
3992 
3993     pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3994     pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3995 
3996 out:
3997     return IRQ_HANDLED;
3998 }
3999 
4000 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4001 {
4002     struct pci_dev *pci_dev = ctrl_info->pci_dev;
4003     int i;
4004     int rc;
4005 
4006     ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
4007 
4008     for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
4009         rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
4010             DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4011         if (rc) {
4012             dev_err(&pci_dev->dev,
4013                 "irq %u init failed with error %d\n",
4014                 pci_irq_vector(pci_dev, i), rc);
4015             return rc;
4016         }
4017         ctrl_info->num_msix_vectors_initialized++;
4018     }
4019 
4020     return 0;
4021 }
4022 
4023 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4024 {
4025     int i;
4026 
4027     for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4028         free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4029             &ctrl_info->queue_groups[i]);
4030 
4031     ctrl_info->num_msix_vectors_initialized = 0;
4032 }
4033 
4034 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4035 {
4036     int num_vectors_enabled;
4037     unsigned int flags = PCI_IRQ_MSIX;
4038 
4039     if (!pqi_disable_managed_interrupts)
4040         flags |= PCI_IRQ_AFFINITY;
4041 
4042     num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4043             PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4044             flags);
4045     if (num_vectors_enabled < 0) {
4046         dev_err(&ctrl_info->pci_dev->dev,
4047             "MSI-X init failed with error %d\n",
4048             num_vectors_enabled);
4049         return num_vectors_enabled;
4050     }
4051 
4052     ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4053     ctrl_info->irq_mode = IRQ_MODE_MSIX;
4054     return 0;
4055 }
4056 
4057 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4058 {
4059     if (ctrl_info->num_msix_vectors_enabled) {
4060         pci_free_irq_vectors(ctrl_info->pci_dev);
4061         ctrl_info->num_msix_vectors_enabled = 0;
4062     }
4063 }
4064 
4065 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4066 {
4067     unsigned int i;
4068     size_t alloc_length;
4069     size_t element_array_length_per_iq;
4070     size_t element_array_length_per_oq;
4071     void *element_array;
4072     void __iomem *next_queue_index;
4073     void *aligned_pointer;
4074     unsigned int num_inbound_queues;
4075     unsigned int num_outbound_queues;
4076     unsigned int num_queue_indexes;
4077     struct pqi_queue_group *queue_group;
4078 
4079     element_array_length_per_iq =
4080         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4081         ctrl_info->num_elements_per_iq;
4082     element_array_length_per_oq =
4083         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4084         ctrl_info->num_elements_per_oq;
4085     num_inbound_queues = ctrl_info->num_queue_groups * 2;
4086     num_outbound_queues = ctrl_info->num_queue_groups;
4087     num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4088 
4089     aligned_pointer = NULL;
4090 
4091     for (i = 0; i < num_inbound_queues; i++) {
4092         aligned_pointer = PTR_ALIGN(aligned_pointer,
4093             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4094         aligned_pointer += element_array_length_per_iq;
4095     }
4096 
4097     for (i = 0; i < num_outbound_queues; i++) {
4098         aligned_pointer = PTR_ALIGN(aligned_pointer,
4099             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4100         aligned_pointer += element_array_length_per_oq;
4101     }
4102 
4103     aligned_pointer = PTR_ALIGN(aligned_pointer,
4104         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4105     aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4106         PQI_EVENT_OQ_ELEMENT_LENGTH;
4107 
4108     for (i = 0; i < num_queue_indexes; i++) {
4109         aligned_pointer = PTR_ALIGN(aligned_pointer,
4110             PQI_OPERATIONAL_INDEX_ALIGNMENT);
4111         aligned_pointer += sizeof(pqi_index_t);
4112     }
4113 
4114     alloc_length = (size_t)aligned_pointer +
4115         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4116 
4117     alloc_length += PQI_EXTRA_SGL_MEMORY;
4118 
4119     ctrl_info->queue_memory_base =
4120         dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4121                    &ctrl_info->queue_memory_base_dma_handle,
4122                    GFP_KERNEL);
4123 
4124     if (!ctrl_info->queue_memory_base)
4125         return -ENOMEM;
4126 
4127     ctrl_info->queue_memory_length = alloc_length;
4128 
4129     element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4130         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4131 
4132     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4133         queue_group = &ctrl_info->queue_groups[i];
4134         queue_group->iq_element_array[RAID_PATH] = element_array;
4135         queue_group->iq_element_array_bus_addr[RAID_PATH] =
4136             ctrl_info->queue_memory_base_dma_handle +
4137                 (element_array - ctrl_info->queue_memory_base);
4138         element_array += element_array_length_per_iq;
4139         element_array = PTR_ALIGN(element_array,
4140             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4141         queue_group->iq_element_array[AIO_PATH] = element_array;
4142         queue_group->iq_element_array_bus_addr[AIO_PATH] =
4143             ctrl_info->queue_memory_base_dma_handle +
4144             (element_array - ctrl_info->queue_memory_base);
4145         element_array += element_array_length_per_iq;
4146         element_array = PTR_ALIGN(element_array,
4147             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4148     }
4149 
4150     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4151         queue_group = &ctrl_info->queue_groups[i];
4152         queue_group->oq_element_array = element_array;
4153         queue_group->oq_element_array_bus_addr =
4154             ctrl_info->queue_memory_base_dma_handle +
4155             (element_array - ctrl_info->queue_memory_base);
4156         element_array += element_array_length_per_oq;
4157         element_array = PTR_ALIGN(element_array,
4158             PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4159     }
4160 
4161     ctrl_info->event_queue.oq_element_array = element_array;
4162     ctrl_info->event_queue.oq_element_array_bus_addr =
4163         ctrl_info->queue_memory_base_dma_handle +
4164         (element_array - ctrl_info->queue_memory_base);
4165     element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4166         PQI_EVENT_OQ_ELEMENT_LENGTH;
4167 
4168     next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4169         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4170 
4171     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4172         queue_group = &ctrl_info->queue_groups[i];
4173         queue_group->iq_ci[RAID_PATH] = next_queue_index;
4174         queue_group->iq_ci_bus_addr[RAID_PATH] =
4175             ctrl_info->queue_memory_base_dma_handle +
4176             (next_queue_index -
4177             (void __iomem *)ctrl_info->queue_memory_base);
4178         next_queue_index += sizeof(pqi_index_t);
4179         next_queue_index = PTR_ALIGN(next_queue_index,
4180             PQI_OPERATIONAL_INDEX_ALIGNMENT);
4181         queue_group->iq_ci[AIO_PATH] = next_queue_index;
4182         queue_group->iq_ci_bus_addr[AIO_PATH] =
4183             ctrl_info->queue_memory_base_dma_handle +
4184             (next_queue_index -
4185             (void __iomem *)ctrl_info->queue_memory_base);
4186         next_queue_index += sizeof(pqi_index_t);
4187         next_queue_index = PTR_ALIGN(next_queue_index,
4188             PQI_OPERATIONAL_INDEX_ALIGNMENT);
4189         queue_group->oq_pi = next_queue_index;
4190         queue_group->oq_pi_bus_addr =
4191             ctrl_info->queue_memory_base_dma_handle +
4192             (next_queue_index -
4193             (void __iomem *)ctrl_info->queue_memory_base);
4194         next_queue_index += sizeof(pqi_index_t);
4195         next_queue_index = PTR_ALIGN(next_queue_index,
4196             PQI_OPERATIONAL_INDEX_ALIGNMENT);
4197     }
4198 
4199     ctrl_info->event_queue.oq_pi = next_queue_index;
4200     ctrl_info->event_queue.oq_pi_bus_addr =
4201         ctrl_info->queue_memory_base_dma_handle +
4202         (next_queue_index -
4203         (void __iomem *)ctrl_info->queue_memory_base);
4204 
4205     return 0;
4206 }
4207 
4208 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4209 {
4210     unsigned int i;
4211     u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4212     u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4213 
4214     /*
4215      * Initialize the backpointers to the controller structure in
4216      * each operational queue group structure.
4217      */
4218     for (i = 0; i < ctrl_info->num_queue_groups; i++)
4219         ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4220 
4221     /*
4222      * Assign IDs to all operational queues.  Note that the IDs
4223      * assigned to operational IQs are independent of the IDs
4224      * assigned to operational OQs.
4225      */
4226     ctrl_info->event_queue.oq_id = next_oq_id++;
4227     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4228         ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4229         ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4230         ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4231     }
4232 
4233     /*
4234      * Assign MSI-X table entry indexes to all queues.  Note that the
4235      * interrupt for the event queue is shared with the first queue group.
4236      */
4237     ctrl_info->event_queue.int_msg_num = 0;
4238     for (i = 0; i < ctrl_info->num_queue_groups; i++)
4239         ctrl_info->queue_groups[i].int_msg_num = i;
4240 
4241     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4242         spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4243         spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4244         INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4245         INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4246     }
4247 }
4248 
4249 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4250 {
4251     size_t alloc_length;
4252     struct pqi_admin_queues_aligned *admin_queues_aligned;
4253     struct pqi_admin_queues *admin_queues;
4254 
4255     alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4256         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4257 
4258     ctrl_info->admin_queue_memory_base =
4259         dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4260                    &ctrl_info->admin_queue_memory_base_dma_handle,
4261                    GFP_KERNEL);
4262 
4263     if (!ctrl_info->admin_queue_memory_base)
4264         return -ENOMEM;
4265 
4266     ctrl_info->admin_queue_memory_length = alloc_length;
4267 
4268     admin_queues = &ctrl_info->admin_queues;
4269     admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4270         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4271     admin_queues->iq_element_array =
4272         &admin_queues_aligned->iq_element_array;
4273     admin_queues->oq_element_array =
4274         &admin_queues_aligned->oq_element_array;
4275     admin_queues->iq_ci =
4276         (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4277     admin_queues->oq_pi =
4278         (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4279 
4280     admin_queues->iq_element_array_bus_addr =
4281         ctrl_info->admin_queue_memory_base_dma_handle +
4282         (admin_queues->iq_element_array -
4283         ctrl_info->admin_queue_memory_base);
4284     admin_queues->oq_element_array_bus_addr =
4285         ctrl_info->admin_queue_memory_base_dma_handle +
4286         (admin_queues->oq_element_array -
4287         ctrl_info->admin_queue_memory_base);
4288     admin_queues->iq_ci_bus_addr =
4289         ctrl_info->admin_queue_memory_base_dma_handle +
4290         ((void __iomem *)admin_queues->iq_ci -
4291         (void __iomem *)ctrl_info->admin_queue_memory_base);
4292     admin_queues->oq_pi_bus_addr =
4293         ctrl_info->admin_queue_memory_base_dma_handle +
4294         ((void __iomem *)admin_queues->oq_pi -
4295         (void __iomem *)ctrl_info->admin_queue_memory_base);
4296 
4297     return 0;
4298 }
4299 
4300 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES      HZ
4301 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS  1
4302 
4303 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4304 {
4305     struct pqi_device_registers __iomem *pqi_registers;
4306     struct pqi_admin_queues *admin_queues;
4307     unsigned long timeout;
4308     u8 status;
4309     u32 reg;
4310 
4311     pqi_registers = ctrl_info->pqi_registers;
4312     admin_queues = &ctrl_info->admin_queues;
4313 
4314     writeq((u64)admin_queues->iq_element_array_bus_addr,
4315         &pqi_registers->admin_iq_element_array_addr);
4316     writeq((u64)admin_queues->oq_element_array_bus_addr,
4317         &pqi_registers->admin_oq_element_array_addr);
4318     writeq((u64)admin_queues->iq_ci_bus_addr,
4319         &pqi_registers->admin_iq_ci_addr);
4320     writeq((u64)admin_queues->oq_pi_bus_addr,
4321         &pqi_registers->admin_oq_pi_addr);
4322 
4323     reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4324         (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4325         (admin_queues->int_msg_num << 16);
4326     writel(reg, &pqi_registers->admin_iq_num_elements);
4327 
4328     writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4329         &pqi_registers->function_and_status_code);
4330 
4331     timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4332     while (1) {
4333         msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4334         status = readb(&pqi_registers->function_and_status_code);
4335         if (status == PQI_STATUS_IDLE)
4336             break;
4337         if (time_after(jiffies, timeout))
4338             return -ETIMEDOUT;
4339     }
4340 
4341     /*
4342      * The offset registers are not initialized to the correct
4343      * offsets until *after* the create admin queue pair command
4344      * completes successfully.
4345      */
4346     admin_queues->iq_pi = ctrl_info->iomem_base +
4347         PQI_DEVICE_REGISTERS_OFFSET +
4348         readq(&pqi_registers->admin_iq_pi_offset);
4349     admin_queues->oq_ci = ctrl_info->iomem_base +
4350         PQI_DEVICE_REGISTERS_OFFSET +
4351         readq(&pqi_registers->admin_oq_ci_offset);
4352 
4353     return 0;
4354 }
4355 
4356 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4357     struct pqi_general_admin_request *request)
4358 {
4359     struct pqi_admin_queues *admin_queues;
4360     void *next_element;
4361     pqi_index_t iq_pi;
4362 
4363     admin_queues = &ctrl_info->admin_queues;
4364     iq_pi = admin_queues->iq_pi_copy;
4365 
4366     next_element = admin_queues->iq_element_array +
4367         (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4368 
4369     memcpy(next_element, request, sizeof(*request));
4370 
4371     iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4372     admin_queues->iq_pi_copy = iq_pi;
4373 
4374     /*
4375      * This write notifies the controller that an IU is available to be
4376      * processed.
4377      */
4378     writel(iq_pi, admin_queues->iq_pi);
4379 }
4380 
4381 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS  60
4382 
4383 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4384     struct pqi_general_admin_response *response)
4385 {
4386     struct pqi_admin_queues *admin_queues;
4387     pqi_index_t oq_pi;
4388     pqi_index_t oq_ci;
4389     unsigned long timeout;
4390 
4391     admin_queues = &ctrl_info->admin_queues;
4392     oq_ci = admin_queues->oq_ci_copy;
4393 
4394     timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4395 
4396     while (1) {
4397         oq_pi = readl(admin_queues->oq_pi);
4398         if (oq_pi != oq_ci)
4399             break;
4400         if (time_after(jiffies, timeout)) {
4401             dev_err(&ctrl_info->pci_dev->dev,
4402                 "timed out waiting for admin response\n");
4403             return -ETIMEDOUT;
4404         }
4405         if (!sis_is_firmware_running(ctrl_info))
4406             return -ENXIO;
4407         usleep_range(1000, 2000);
4408     }
4409 
4410     memcpy(response, admin_queues->oq_element_array +
4411         (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4412 
4413     oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4414     admin_queues->oq_ci_copy = oq_ci;
4415     writel(oq_ci, admin_queues->oq_ci);
4416 
4417     return 0;
4418 }
4419 
4420 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4421     struct pqi_queue_group *queue_group, enum pqi_io_path path,
4422     struct pqi_io_request *io_request)
4423 {
4424     struct pqi_io_request *next;
4425     void *next_element;
4426     pqi_index_t iq_pi;
4427     pqi_index_t iq_ci;
4428     size_t iu_length;
4429     unsigned long flags;
4430     unsigned int num_elements_needed;
4431     unsigned int num_elements_to_end_of_queue;
4432     size_t copy_count;
4433     struct pqi_iu_header *request;
4434 
4435     spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4436 
4437     if (io_request) {
4438         io_request->queue_group = queue_group;
4439         list_add_tail(&io_request->request_list_entry,
4440             &queue_group->request_list[path]);
4441     }
4442 
4443     iq_pi = queue_group->iq_pi_copy[path];
4444 
4445     list_for_each_entry_safe(io_request, next,
4446         &queue_group->request_list[path], request_list_entry) {
4447 
4448         request = io_request->iu;
4449 
4450         iu_length = get_unaligned_le16(&request->iu_length) +
4451             PQI_REQUEST_HEADER_LENGTH;
4452         num_elements_needed =
4453             DIV_ROUND_UP(iu_length,
4454                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4455 
4456         iq_ci = readl(queue_group->iq_ci[path]);
4457 
4458         if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4459             ctrl_info->num_elements_per_iq))
4460             break;
4461 
4462         put_unaligned_le16(queue_group->oq_id,
4463             &request->response_queue_id);
4464 
4465         next_element = queue_group->iq_element_array[path] +
4466             (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4467 
4468         num_elements_to_end_of_queue =
4469             ctrl_info->num_elements_per_iq - iq_pi;
4470 
4471         if (num_elements_needed <= num_elements_to_end_of_queue) {
4472             memcpy(next_element, request, iu_length);
4473         } else {
4474             copy_count = num_elements_to_end_of_queue *
4475                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4476             memcpy(next_element, request, copy_count);
4477             memcpy(queue_group->iq_element_array[path],
4478                 (u8 *)request + copy_count,
4479                 iu_length - copy_count);
4480         }
4481 
4482         iq_pi = (iq_pi + num_elements_needed) %
4483             ctrl_info->num_elements_per_iq;
4484 
4485         list_del(&io_request->request_list_entry);
4486     }
4487 
4488     if (iq_pi != queue_group->iq_pi_copy[path]) {
4489         queue_group->iq_pi_copy[path] = iq_pi;
4490         /*
4491          * This write notifies the controller that one or more IUs are
4492          * available to be processed.
4493          */
4494         writel(iq_pi, queue_group->iq_pi[path]);
4495     }
4496 
4497     spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4498 }
4499 
4500 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS     10
4501 
4502 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4503     struct completion *wait)
4504 {
4505     int rc;
4506 
4507     while (1) {
4508         if (wait_for_completion_io_timeout(wait,
4509             PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4510             rc = 0;
4511             break;
4512         }
4513 
4514         pqi_check_ctrl_health(ctrl_info);
4515         if (pqi_ctrl_offline(ctrl_info)) {
4516             rc = -ENXIO;
4517             break;
4518         }
4519     }
4520 
4521     return rc;
4522 }
4523 
4524 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4525     void *context)
4526 {
4527     struct completion *waiting = context;
4528 
4529     complete(waiting);
4530 }
4531 
4532 static int pqi_process_raid_io_error_synchronous(
4533     struct pqi_raid_error_info *error_info)
4534 {
4535     int rc = -EIO;
4536 
4537     switch (error_info->data_out_result) {
4538     case PQI_DATA_IN_OUT_GOOD:
4539         if (error_info->status == SAM_STAT_GOOD)
4540             rc = 0;
4541         break;
4542     case PQI_DATA_IN_OUT_UNDERFLOW:
4543         if (error_info->status == SAM_STAT_GOOD ||
4544             error_info->status == SAM_STAT_CHECK_CONDITION)
4545             rc = 0;
4546         break;
4547     case PQI_DATA_IN_OUT_ABORTED:
4548         rc = PQI_CMD_STATUS_ABORTED;
4549         break;
4550     }
4551 
4552     return rc;
4553 }
4554 
4555 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4556 {
4557     return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4558 }
4559 
4560 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4561     struct pqi_iu_header *request, unsigned int flags,
4562     struct pqi_raid_error_info *error_info)
4563 {
4564     int rc = 0;
4565     struct pqi_io_request *io_request;
4566     size_t iu_length;
4567     DECLARE_COMPLETION_ONSTACK(wait);
4568 
4569     if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4570         if (down_interruptible(&ctrl_info->sync_request_sem))
4571             return -ERESTARTSYS;
4572     } else {
4573         down(&ctrl_info->sync_request_sem);
4574     }
4575 
4576     pqi_ctrl_busy(ctrl_info);
4577     /*
4578      * Wait for other admin queue updates such as;
4579      * config table changes, OFA memory updates, ...
4580      */
4581     if (pqi_is_blockable_request(request))
4582         pqi_wait_if_ctrl_blocked(ctrl_info);
4583 
4584     if (pqi_ctrl_offline(ctrl_info)) {
4585         rc = -ENXIO;
4586         goto out;
4587     }
4588 
4589     io_request = pqi_alloc_io_request(ctrl_info);
4590 
4591     put_unaligned_le16(io_request->index,
4592         &(((struct pqi_raid_path_request *)request)->request_id));
4593 
4594     if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4595         ((struct pqi_raid_path_request *)request)->error_index =
4596             ((struct pqi_raid_path_request *)request)->request_id;
4597 
4598     iu_length = get_unaligned_le16(&request->iu_length) +
4599         PQI_REQUEST_HEADER_LENGTH;
4600     memcpy(io_request->iu, request, iu_length);
4601 
4602     io_request->io_complete_callback = pqi_raid_synchronous_complete;
4603     io_request->context = &wait;
4604 
4605     pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4606         io_request);
4607 
4608     pqi_wait_for_completion_io(ctrl_info, &wait);
4609 
4610     if (error_info) {
4611         if (io_request->error_info)
4612             memcpy(error_info, io_request->error_info, sizeof(*error_info));
4613         else
4614             memset(error_info, 0, sizeof(*error_info));
4615     } else if (rc == 0 && io_request->error_info) {
4616         rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4617     }
4618 
4619     pqi_free_io_request(io_request);
4620 
4621 out:
4622     pqi_ctrl_unbusy(ctrl_info);
4623     up(&ctrl_info->sync_request_sem);
4624 
4625     return rc;
4626 }
4627 
4628 static int pqi_validate_admin_response(
4629     struct pqi_general_admin_response *response, u8 expected_function_code)
4630 {
4631     if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4632         return -EINVAL;
4633 
4634     if (get_unaligned_le16(&response->header.iu_length) !=
4635         PQI_GENERAL_ADMIN_IU_LENGTH)
4636         return -EINVAL;
4637 
4638     if (response->function_code != expected_function_code)
4639         return -EINVAL;
4640 
4641     if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4642         return -EINVAL;
4643 
4644     return 0;
4645 }
4646 
4647 static int pqi_submit_admin_request_synchronous(
4648     struct pqi_ctrl_info *ctrl_info,
4649     struct pqi_general_admin_request *request,
4650     struct pqi_general_admin_response *response)
4651 {
4652     int rc;
4653 
4654     pqi_submit_admin_request(ctrl_info, request);
4655 
4656     rc = pqi_poll_for_admin_response(ctrl_info, response);
4657 
4658     if (rc == 0)
4659         rc = pqi_validate_admin_response(response, request->function_code);
4660 
4661     return rc;
4662 }
4663 
4664 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4665 {
4666     int rc;
4667     struct pqi_general_admin_request request;
4668     struct pqi_general_admin_response response;
4669     struct pqi_device_capability *capability;
4670     struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4671 
4672     capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4673     if (!capability)
4674         return -ENOMEM;
4675 
4676     memset(&request, 0, sizeof(request));
4677 
4678     request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4679     put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4680         &request.header.iu_length);
4681     request.function_code =
4682         PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4683     put_unaligned_le32(sizeof(*capability),
4684         &request.data.report_device_capability.buffer_length);
4685 
4686     rc = pqi_map_single(ctrl_info->pci_dev,
4687         &request.data.report_device_capability.sg_descriptor,
4688         capability, sizeof(*capability),
4689         DMA_FROM_DEVICE);
4690     if (rc)
4691         goto out;
4692 
4693     rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4694 
4695     pqi_pci_unmap(ctrl_info->pci_dev,
4696         &request.data.report_device_capability.sg_descriptor, 1,
4697         DMA_FROM_DEVICE);
4698 
4699     if (rc)
4700         goto out;
4701 
4702     if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4703         rc = -EIO;
4704         goto out;
4705     }
4706 
4707     ctrl_info->max_inbound_queues =
4708         get_unaligned_le16(&capability->max_inbound_queues);
4709     ctrl_info->max_elements_per_iq =
4710         get_unaligned_le16(&capability->max_elements_per_iq);
4711     ctrl_info->max_iq_element_length =
4712         get_unaligned_le16(&capability->max_iq_element_length)
4713         * 16;
4714     ctrl_info->max_outbound_queues =
4715         get_unaligned_le16(&capability->max_outbound_queues);
4716     ctrl_info->max_elements_per_oq =
4717         get_unaligned_le16(&capability->max_elements_per_oq);
4718     ctrl_info->max_oq_element_length =
4719         get_unaligned_le16(&capability->max_oq_element_length)
4720         * 16;
4721 
4722     sop_iu_layer_descriptor =
4723         &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4724 
4725     ctrl_info->max_inbound_iu_length_per_firmware =
4726         get_unaligned_le16(
4727             &sop_iu_layer_descriptor->max_inbound_iu_length);
4728     ctrl_info->inbound_spanning_supported =
4729         sop_iu_layer_descriptor->inbound_spanning_supported;
4730     ctrl_info->outbound_spanning_supported =
4731         sop_iu_layer_descriptor->outbound_spanning_supported;
4732 
4733 out:
4734     kfree(capability);
4735 
4736     return rc;
4737 }
4738 
4739 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4740 {
4741     if (ctrl_info->max_iq_element_length <
4742         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4743         dev_err(&ctrl_info->pci_dev->dev,
4744             "max. inbound queue element length of %d is less than the required length of %d\n",
4745             ctrl_info->max_iq_element_length,
4746             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4747         return -EINVAL;
4748     }
4749 
4750     if (ctrl_info->max_oq_element_length <
4751         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4752         dev_err(&ctrl_info->pci_dev->dev,
4753             "max. outbound queue element length of %d is less than the required length of %d\n",
4754             ctrl_info->max_oq_element_length,
4755             PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4756         return -EINVAL;
4757     }
4758 
4759     if (ctrl_info->max_inbound_iu_length_per_firmware <
4760         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4761         dev_err(&ctrl_info->pci_dev->dev,
4762             "max. inbound IU length of %u is less than the min. required length of %d\n",
4763             ctrl_info->max_inbound_iu_length_per_firmware,
4764             PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4765         return -EINVAL;
4766     }
4767 
4768     if (!ctrl_info->inbound_spanning_supported) {
4769         dev_err(&ctrl_info->pci_dev->dev,
4770             "the controller does not support inbound spanning\n");
4771         return -EINVAL;
4772     }
4773 
4774     if (ctrl_info->outbound_spanning_supported) {
4775         dev_err(&ctrl_info->pci_dev->dev,
4776             "the controller supports outbound spanning but this driver does not\n");
4777         return -EINVAL;
4778     }
4779 
4780     return 0;
4781 }
4782 
4783 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4784 {
4785     int rc;
4786     struct pqi_event_queue *event_queue;
4787     struct pqi_general_admin_request request;
4788     struct pqi_general_admin_response response;
4789 
4790     event_queue = &ctrl_info->event_queue;
4791 
4792     /*
4793      * Create OQ (Outbound Queue - device to host queue) to dedicate
4794      * to events.
4795      */
4796     memset(&request, 0, sizeof(request));
4797     request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4798     put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4799         &request.header.iu_length);
4800     request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4801     put_unaligned_le16(event_queue->oq_id,
4802         &request.data.create_operational_oq.queue_id);
4803     put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4804         &request.data.create_operational_oq.element_array_addr);
4805     put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4806         &request.data.create_operational_oq.pi_addr);
4807     put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4808         &request.data.create_operational_oq.num_elements);
4809     put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4810         &request.data.create_operational_oq.element_length);
4811     request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4812     put_unaligned_le16(event_queue->int_msg_num,
4813         &request.data.create_operational_oq.int_msg_num);
4814 
4815     rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4816         &response);
4817     if (rc)
4818         return rc;
4819 
4820     event_queue->oq_ci = ctrl_info->iomem_base +
4821         PQI_DEVICE_REGISTERS_OFFSET +
4822         get_unaligned_le64(
4823             &response.data.create_operational_oq.oq_ci_offset);
4824 
4825     return 0;
4826 }
4827 
4828 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4829     unsigned int group_number)
4830 {
4831     int rc;
4832     struct pqi_queue_group *queue_group;
4833     struct pqi_general_admin_request request;
4834     struct pqi_general_admin_response response;
4835 
4836     queue_group = &ctrl_info->queue_groups[group_number];
4837 
4838     /*
4839      * Create IQ (Inbound Queue - host to device queue) for
4840      * RAID path.
4841      */
4842     memset(&request, 0, sizeof(request));
4843     request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4844     put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4845         &request.header.iu_length);
4846     request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4847     put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4848         &request.data.create_operational_iq.queue_id);
4849     put_unaligned_le64(
4850         (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4851         &request.data.create_operational_iq.element_array_addr);
4852     put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4853         &request.data.create_operational_iq.ci_addr);
4854     put_unaligned_le16(ctrl_info->num_elements_per_iq,
4855         &request.data.create_operational_iq.num_elements);
4856     put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4857         &request.data.create_operational_iq.element_length);
4858     request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4859 
4860     rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4861         &response);
4862     if (rc) {
4863         dev_err(&ctrl_info->pci_dev->dev,
4864             "error creating inbound RAID queue\n");
4865         return rc;
4866     }
4867 
4868     queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4869         PQI_DEVICE_REGISTERS_OFFSET +
4870         get_unaligned_le64(
4871             &response.data.create_operational_iq.iq_pi_offset);
4872 
4873     /*
4874      * Create IQ (Inbound Queue - host to device queue) for
4875      * Advanced I/O (AIO) path.
4876      */
4877     memset(&request, 0, sizeof(request));
4878     request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4879     put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4880         &request.header.iu_length);
4881     request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4882     put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4883         &request.data.create_operational_iq.queue_id);
4884     put_unaligned_le64((u64)queue_group->
4885         iq_element_array_bus_addr[AIO_PATH],
4886         &request.data.create_operational_iq.element_array_addr);
4887     put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4888         &request.data.create_operational_iq.ci_addr);
4889     put_unaligned_le16(ctrl_info->num_elements_per_iq,
4890         &request.data.create_operational_iq.num_elements);
4891     put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4892         &request.data.create_operational_iq.element_length);
4893     request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4894 
4895     rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4896         &response);
4897     if (rc) {
4898         dev_err(&ctrl_info->pci_dev->dev,
4899             "error creating inbound AIO queue\n");
4900         return rc;
4901     }
4902 
4903     queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4904         PQI_DEVICE_REGISTERS_OFFSET +
4905         get_unaligned_le64(
4906             &response.data.create_operational_iq.iq_pi_offset);
4907 
4908     /*
4909      * Designate the 2nd IQ as the AIO path.  By default, all IQs are
4910      * assumed to be for RAID path I/O unless we change the queue's
4911      * property.
4912      */
4913     memset(&request, 0, sizeof(request));
4914     request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4915     put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4916         &request.header.iu_length);
4917     request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4918     put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4919         &request.data.change_operational_iq_properties.queue_id);
4920     put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4921         &request.data.change_operational_iq_properties.vendor_specific);
4922 
4923     rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4924         &response);
4925     if (rc) {
4926         dev_err(&ctrl_info->pci_dev->dev,
4927             "error changing queue property\n");
4928         return rc;
4929     }
4930 
4931     /*
4932      * Create OQ (Outbound Queue - device to host queue).
4933      */
4934     memset(&request, 0, sizeof(request));
4935     request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4936     put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4937         &request.header.iu_length);
4938     request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4939     put_unaligned_le16(queue_group->oq_id,
4940         &request.data.create_operational_oq.queue_id);
4941     put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4942         &request.data.create_operational_oq.element_array_addr);
4943     put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4944         &request.data.create_operational_oq.pi_addr);
4945     put_unaligned_le16(ctrl_info->num_elements_per_oq,
4946         &request.data.create_operational_oq.num_elements);
4947     put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4948         &request.data.create_operational_oq.element_length);
4949     request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4950     put_unaligned_le16(queue_group->int_msg_num,
4951         &request.data.create_operational_oq.int_msg_num);
4952 
4953     rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4954         &response);
4955     if (rc) {
4956         dev_err(&ctrl_info->pci_dev->dev,
4957             "error creating outbound queue\n");
4958         return rc;
4959     }
4960 
4961     queue_group->oq_ci = ctrl_info->iomem_base +
4962         PQI_DEVICE_REGISTERS_OFFSET +
4963         get_unaligned_le64(
4964             &response.data.create_operational_oq.oq_ci_offset);
4965 
4966     return 0;
4967 }
4968 
4969 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4970 {
4971     int rc;
4972     unsigned int i;
4973 
4974     rc = pqi_create_event_queue(ctrl_info);
4975     if (rc) {
4976         dev_err(&ctrl_info->pci_dev->dev,
4977             "error creating event queue\n");
4978         return rc;
4979     }
4980 
4981     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4982         rc = pqi_create_queue_group(ctrl_info, i);
4983         if (rc) {
4984             dev_err(&ctrl_info->pci_dev->dev,
4985                 "error creating queue group number %u/%u\n",
4986                 i, ctrl_info->num_queue_groups);
4987             return rc;
4988         }
4989     }
4990 
4991     return 0;
4992 }
4993 
4994 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
4995     struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4996 
4997 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4998     bool enable_events)
4999 {
5000     int rc;
5001     unsigned int i;
5002     struct pqi_event_config *event_config;
5003     struct pqi_event_descriptor *event_descriptor;
5004     struct pqi_general_management_request request;
5005 
5006     event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5007         GFP_KERNEL);
5008     if (!event_config)
5009         return -ENOMEM;
5010 
5011     memset(&request, 0, sizeof(request));
5012 
5013     request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5014     put_unaligned_le16(offsetof(struct pqi_general_management_request,
5015         data.report_event_configuration.sg_descriptors[1]) -
5016         PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5017     put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5018         &request.data.report_event_configuration.buffer_length);
5019 
5020     rc = pqi_map_single(ctrl_info->pci_dev,
5021         request.data.report_event_configuration.sg_descriptors,
5022         event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5023         DMA_FROM_DEVICE);
5024     if (rc)
5025         goto out;
5026 
5027     rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5028 
5029     pqi_pci_unmap(ctrl_info->pci_dev,
5030         request.data.report_event_configuration.sg_descriptors, 1,
5031         DMA_FROM_DEVICE);
5032 
5033     if (rc)
5034         goto out;
5035 
5036     for (i = 0; i < event_config->num_event_descriptors; i++) {
5037         event_descriptor = &event_config->descriptors[i];
5038         if (enable_events &&
5039             pqi_is_supported_event(event_descriptor->event_type))
5040                 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5041                     &event_descriptor->oq_id);
5042         else
5043             put_unaligned_le16(0, &event_descriptor->oq_id);
5044     }
5045 
5046     memset(&request, 0, sizeof(request));
5047 
5048     request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5049     put_unaligned_le16(offsetof(struct pqi_general_management_request,
5050         data.report_event_configuration.sg_descriptors[1]) -
5051         PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5052     put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5053         &request.data.report_event_configuration.buffer_length);
5054 
5055     rc = pqi_map_single(ctrl_info->pci_dev,
5056         request.data.report_event_configuration.sg_descriptors,
5057         event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5058         DMA_TO_DEVICE);
5059     if (rc)
5060         goto out;
5061 
5062     rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5063 
5064     pqi_pci_unmap(ctrl_info->pci_dev,
5065         request.data.report_event_configuration.sg_descriptors, 1,
5066         DMA_TO_DEVICE);
5067 
5068 out:
5069     kfree(event_config);
5070 
5071     return rc;
5072 }
5073 
5074 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5075 {
5076     return pqi_configure_events(ctrl_info, true);
5077 }
5078 
5079 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5080 {
5081     unsigned int i;
5082     struct device *dev;
5083     size_t sg_chain_buffer_length;
5084     struct pqi_io_request *io_request;
5085 
5086     if (!ctrl_info->io_request_pool)
5087         return;
5088 
5089     dev = &ctrl_info->pci_dev->dev;
5090     sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5091     io_request = ctrl_info->io_request_pool;
5092 
5093     for (i = 0; i < ctrl_info->max_io_slots; i++) {
5094         kfree(io_request->iu);
5095         if (!io_request->sg_chain_buffer)
5096             break;
5097         dma_free_coherent(dev, sg_chain_buffer_length,
5098             io_request->sg_chain_buffer,
5099             io_request->sg_chain_buffer_dma_handle);
5100         io_request++;
5101     }
5102 
5103     kfree(ctrl_info->io_request_pool);
5104     ctrl_info->io_request_pool = NULL;
5105 }
5106 
5107 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5108 {
5109     ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5110                      ctrl_info->error_buffer_length,
5111                      &ctrl_info->error_buffer_dma_handle,
5112                      GFP_KERNEL);
5113     if (!ctrl_info->error_buffer)
5114         return -ENOMEM;
5115 
5116     return 0;
5117 }
5118 
5119 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5120 {
5121     unsigned int i;
5122     void *sg_chain_buffer;
5123     size_t sg_chain_buffer_length;
5124     dma_addr_t sg_chain_buffer_dma_handle;
5125     struct device *dev;
5126     struct pqi_io_request *io_request;
5127 
5128     ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5129         sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5130 
5131     if (!ctrl_info->io_request_pool) {
5132         dev_err(&ctrl_info->pci_dev->dev,
5133             "failed to allocate I/O request pool\n");
5134         goto error;
5135     }
5136 
5137     dev = &ctrl_info->pci_dev->dev;
5138     sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5139     io_request = ctrl_info->io_request_pool;
5140 
5141     for (i = 0; i < ctrl_info->max_io_slots; i++) {
5142         io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5143 
5144         if (!io_request->iu) {
5145             dev_err(&ctrl_info->pci_dev->dev,
5146                 "failed to allocate IU buffers\n");
5147             goto error;
5148         }
5149 
5150         sg_chain_buffer = dma_alloc_coherent(dev,
5151             sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5152             GFP_KERNEL);
5153 
5154         if (!sg_chain_buffer) {
5155             dev_err(&ctrl_info->pci_dev->dev,
5156                 "failed to allocate PQI scatter-gather chain buffers\n");
5157             goto error;
5158         }
5159 
5160         io_request->index = i;
5161         io_request->sg_chain_buffer = sg_chain_buffer;
5162         io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5163         io_request++;
5164     }
5165 
5166     return 0;
5167 
5168 error:
5169     pqi_free_all_io_requests(ctrl_info);
5170 
5171     return -ENOMEM;
5172 }
5173 
5174 /*
5175  * Calculate required resources that are sized based on max. outstanding
5176  * requests and max. transfer size.
5177  */
5178 
5179 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5180 {
5181     u32 max_transfer_size;
5182     u32 max_sg_entries;
5183 
5184     ctrl_info->scsi_ml_can_queue =
5185         ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5186     ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5187 
5188     ctrl_info->error_buffer_length =
5189         ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5190 
5191     if (reset_devices)
5192         max_transfer_size = min(ctrl_info->max_transfer_size,
5193             PQI_MAX_TRANSFER_SIZE_KDUMP);
5194     else
5195         max_transfer_size = min(ctrl_info->max_transfer_size,
5196             PQI_MAX_TRANSFER_SIZE);
5197 
5198     max_sg_entries = max_transfer_size / PAGE_SIZE;
5199 
5200     /* +1 to cover when the buffer is not page-aligned. */
5201     max_sg_entries++;
5202 
5203     max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5204 
5205     max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5206 
5207     ctrl_info->sg_chain_buffer_length =
5208         (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5209         PQI_EXTRA_SGL_MEMORY;
5210     ctrl_info->sg_tablesize = max_sg_entries;
5211     ctrl_info->max_sectors = max_transfer_size / 512;
5212 }
5213 
5214 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5215 {
5216     int num_queue_groups;
5217     u16 num_elements_per_iq;
5218     u16 num_elements_per_oq;
5219 
5220     if (reset_devices) {
5221         num_queue_groups = 1;
5222     } else {
5223         int num_cpus;
5224         int max_queue_groups;
5225 
5226         max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5227             ctrl_info->max_outbound_queues - 1);
5228         max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5229 
5230         num_cpus = num_online_cpus();
5231         num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5232         num_queue_groups = min(num_queue_groups, max_queue_groups);
5233     }
5234 
5235     ctrl_info->num_queue_groups = num_queue_groups;
5236     ctrl_info->max_hw_queue_index = num_queue_groups - 1;
5237 
5238     /*
5239      * Make sure that the max. inbound IU length is an even multiple
5240      * of our inbound element length.
5241      */
5242     ctrl_info->max_inbound_iu_length =
5243         (ctrl_info->max_inbound_iu_length_per_firmware /
5244         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5245         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5246 
5247     num_elements_per_iq =
5248         (ctrl_info->max_inbound_iu_length /
5249         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5250 
5251     /* Add one because one element in each queue is unusable. */
5252     num_elements_per_iq++;
5253 
5254     num_elements_per_iq = min(num_elements_per_iq,
5255         ctrl_info->max_elements_per_iq);
5256 
5257     num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5258     num_elements_per_oq = min(num_elements_per_oq,
5259         ctrl_info->max_elements_per_oq);
5260 
5261     ctrl_info->num_elements_per_iq = num_elements_per_iq;
5262     ctrl_info->num_elements_per_oq = num_elements_per_oq;
5263 
5264     ctrl_info->max_sg_per_iu =
5265         ((ctrl_info->max_inbound_iu_length -
5266         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5267         sizeof(struct pqi_sg_descriptor)) +
5268         PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5269 
5270     ctrl_info->max_sg_per_r56_iu =
5271         ((ctrl_info->max_inbound_iu_length -
5272         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5273         sizeof(struct pqi_sg_descriptor)) +
5274         PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5275 }
5276 
5277 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5278     struct scatterlist *sg)
5279 {
5280     u64 address = (u64)sg_dma_address(sg);
5281     unsigned int length = sg_dma_len(sg);
5282 
5283     put_unaligned_le64(address, &sg_descriptor->address);
5284     put_unaligned_le32(length, &sg_descriptor->length);
5285     put_unaligned_le32(0, &sg_descriptor->flags);
5286 }
5287 
5288 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5289     struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5290     int max_sg_per_iu, bool *chained)
5291 {
5292     int i;
5293     unsigned int num_sg_in_iu;
5294 
5295     *chained = false;
5296     i = 0;
5297     num_sg_in_iu = 0;
5298     max_sg_per_iu--;    /* Subtract 1 to leave room for chain marker. */
5299 
5300     while (1) {
5301         pqi_set_sg_descriptor(sg_descriptor, sg);
5302         if (!*chained)
5303             num_sg_in_iu++;
5304         i++;
5305         if (i == sg_count)
5306             break;
5307         sg_descriptor++;
5308         if (i == max_sg_per_iu) {
5309             put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5310                 &sg_descriptor->address);
5311             put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5312                 &sg_descriptor->length);
5313             put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5314             *chained = true;
5315             num_sg_in_iu++;
5316             sg_descriptor = io_request->sg_chain_buffer;
5317         }
5318         sg = sg_next(sg);
5319     }
5320 
5321     put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5322 
5323     return num_sg_in_iu;
5324 }
5325 
5326 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5327     struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5328     struct pqi_io_request *io_request)
5329 {
5330     u16 iu_length;
5331     int sg_count;
5332     bool chained;
5333     unsigned int num_sg_in_iu;
5334     struct scatterlist *sg;
5335     struct pqi_sg_descriptor *sg_descriptor;
5336 
5337     sg_count = scsi_dma_map(scmd);
5338     if (sg_count < 0)
5339         return sg_count;
5340 
5341     iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5342         PQI_REQUEST_HEADER_LENGTH;
5343 
5344     if (sg_count == 0)
5345         goto out;
5346 
5347     sg = scsi_sglist(scmd);
5348     sg_descriptor = request->sg_descriptors;
5349 
5350     num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5351         ctrl_info->max_sg_per_iu, &chained);
5352 
5353     request->partial = chained;
5354     iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5355 
5356 out:
5357     put_unaligned_le16(iu_length, &request->header.iu_length);
5358 
5359     return 0;
5360 }
5361 
5362 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5363     struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5364     struct pqi_io_request *io_request)
5365 {
5366     u16 iu_length;
5367     int sg_count;
5368     bool chained;
5369     unsigned int num_sg_in_iu;
5370     struct scatterlist *sg;
5371     struct pqi_sg_descriptor *sg_descriptor;
5372 
5373     sg_count = scsi_dma_map(scmd);
5374     if (sg_count < 0)
5375         return sg_count;
5376 
5377     iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5378         PQI_REQUEST_HEADER_LENGTH;
5379     num_sg_in_iu = 0;
5380 
5381     if (sg_count == 0)
5382         goto out;
5383 
5384     sg = scsi_sglist(scmd);
5385     sg_descriptor = request->sg_descriptors;
5386 
5387     num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5388         ctrl_info->max_sg_per_iu, &chained);
5389 
5390     request->partial = chained;
5391     iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5392 
5393 out:
5394     put_unaligned_le16(iu_length, &request->header.iu_length);
5395     request->num_sg_descriptors = num_sg_in_iu;
5396 
5397     return 0;
5398 }
5399 
5400 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5401     struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5402     struct pqi_io_request *io_request)
5403 {
5404     u16 iu_length;
5405     int sg_count;
5406     bool chained;
5407     unsigned int num_sg_in_iu;
5408     struct scatterlist *sg;
5409     struct pqi_sg_descriptor *sg_descriptor;
5410 
5411     sg_count = scsi_dma_map(scmd);
5412     if (sg_count < 0)
5413         return sg_count;
5414 
5415     iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5416         PQI_REQUEST_HEADER_LENGTH;
5417     num_sg_in_iu = 0;
5418 
5419     if (sg_count != 0) {
5420         sg = scsi_sglist(scmd);
5421         sg_descriptor = request->sg_descriptors;
5422 
5423         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5424             ctrl_info->max_sg_per_r56_iu, &chained);
5425 
5426         request->partial = chained;
5427         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5428     }
5429 
5430     put_unaligned_le16(iu_length, &request->header.iu_length);
5431     request->num_sg_descriptors = num_sg_in_iu;
5432 
5433     return 0;
5434 }
5435 
5436 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5437     struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5438     struct pqi_io_request *io_request)
5439 {
5440     u16 iu_length;
5441     int sg_count;
5442     bool chained;
5443     unsigned int num_sg_in_iu;
5444     struct scatterlist *sg;
5445     struct pqi_sg_descriptor *sg_descriptor;
5446 
5447     sg_count = scsi_dma_map(scmd);
5448     if (sg_count < 0)
5449         return sg_count;
5450 
5451     iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5452         PQI_REQUEST_HEADER_LENGTH;
5453     num_sg_in_iu = 0;
5454 
5455     if (sg_count == 0)
5456         goto out;
5457 
5458     sg = scsi_sglist(scmd);
5459     sg_descriptor = request->sg_descriptors;
5460 
5461     num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5462         ctrl_info->max_sg_per_iu, &chained);
5463 
5464     request->partial = chained;
5465     iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5466 
5467 out:
5468     put_unaligned_le16(iu_length, &request->header.iu_length);
5469     request->num_sg_descriptors = num_sg_in_iu;
5470 
5471     return 0;
5472 }
5473 
5474 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5475     void *context)
5476 {
5477     struct scsi_cmnd *scmd;
5478 
5479     scmd = io_request->scmd;
5480     pqi_free_io_request(io_request);
5481     scsi_dma_unmap(scmd);
5482     pqi_scsi_done(scmd);
5483 }
5484 
5485 static int pqi_raid_submit_scsi_cmd_with_io_request(
5486     struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5487     struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5488     struct pqi_queue_group *queue_group)
5489 {
5490     int rc;
5491     size_t cdb_length;
5492     struct pqi_raid_path_request *request;
5493 
5494     io_request->io_complete_callback = pqi_raid_io_complete;
5495     io_request->scmd = scmd;
5496 
5497     request = io_request->iu;
5498     memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5499 
5500     request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5501     put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5502     request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5503     put_unaligned_le16(io_request->index, &request->request_id);
5504     request->error_index = request->request_id;
5505     memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5506     request->ml_device_lun_number = (u8)scmd->device->lun;
5507 
5508     cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5509     memcpy(request->cdb, scmd->cmnd, cdb_length);
5510 
5511     switch (cdb_length) {
5512     case 6:
5513     case 10:
5514     case 12:
5515     case 16:
5516         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5517         break;
5518     case 20:
5519         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5520         break;
5521     case 24:
5522         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5523         break;
5524     case 28:
5525         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5526         break;
5527     case 32:
5528     default:
5529         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5530         break;
5531     }
5532 
5533     switch (scmd->sc_data_direction) {
5534     case DMA_FROM_DEVICE:
5535         request->data_direction = SOP_READ_FLAG;
5536         break;
5537     case DMA_TO_DEVICE:
5538         request->data_direction = SOP_WRITE_FLAG;
5539         break;
5540     case DMA_NONE:
5541         request->data_direction = SOP_NO_DIRECTION_FLAG;
5542         break;
5543     case DMA_BIDIRECTIONAL:
5544         request->data_direction = SOP_BIDIRECTIONAL;
5545         break;
5546     default:
5547         dev_err(&ctrl_info->pci_dev->dev,
5548             "unknown data direction: %d\n",
5549             scmd->sc_data_direction);
5550         break;
5551     }
5552 
5553     rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5554     if (rc) {
5555         pqi_free_io_request(io_request);
5556         return SCSI_MLQUEUE_HOST_BUSY;
5557     }
5558 
5559     pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5560 
5561     return 0;
5562 }
5563 
5564 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5565     struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5566     struct pqi_queue_group *queue_group)
5567 {
5568     struct pqi_io_request *io_request;
5569 
5570     io_request = pqi_alloc_io_request(ctrl_info);
5571 
5572     return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5573         device, scmd, queue_group);
5574 }
5575 
5576 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5577 {
5578     struct scsi_cmnd *scmd;
5579     struct pqi_scsi_dev *device;
5580     struct pqi_ctrl_info *ctrl_info;
5581 
5582     if (!io_request->raid_bypass)
5583         return false;
5584 
5585     scmd = io_request->scmd;
5586     if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5587         return false;
5588     if (host_byte(scmd->result) == DID_NO_CONNECT)
5589         return false;
5590 
5591     device = scmd->device->hostdata;
5592     if (pqi_device_offline(device) || pqi_device_in_remove(device))
5593         return false;
5594 
5595     ctrl_info = shost_to_hba(scmd->device->host);
5596     if (pqi_ctrl_offline(ctrl_info))
5597         return false;
5598 
5599     return true;
5600 }
5601 
5602 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5603     void *context)
5604 {
5605     struct scsi_cmnd *scmd;
5606 
5607     scmd = io_request->scmd;
5608     scsi_dma_unmap(scmd);
5609     if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5610         set_host_byte(scmd, DID_IMM_RETRY);
5611         pqi_cmd_priv(scmd)->this_residual++;
5612     }
5613 
5614     pqi_free_io_request(io_request);
5615     pqi_scsi_done(scmd);
5616 }
5617 
5618 static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
5619     struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
5620 {
5621     bool io_high_prio;
5622     int priority_class;
5623 
5624     io_high_prio = false;
5625 
5626     if (device->ncq_prio_enable) {
5627         priority_class =
5628             IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
5629         if (priority_class == IOPRIO_CLASS_RT) {
5630             /* Set NCQ priority for read/write commands. */
5631             switch (scmd->cmnd[0]) {
5632             case WRITE_16:
5633             case READ_16:
5634             case WRITE_12:
5635             case READ_12:
5636             case WRITE_10:
5637             case READ_10:
5638             case WRITE_6:
5639             case READ_6:
5640                 io_high_prio = true;
5641                 break;
5642             }
5643         }
5644     }
5645 
5646     return io_high_prio;
5647 }
5648 
5649 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5650     struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5651     struct pqi_queue_group *queue_group)
5652 {
5653     bool io_high_prio;
5654 
5655     io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
5656 
5657     return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5658         scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5659         false, io_high_prio);
5660 }
5661 
5662 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5663     struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5664     unsigned int cdb_length, struct pqi_queue_group *queue_group,
5665     struct pqi_encryption_info *encryption_info, bool raid_bypass,
5666     bool io_high_prio)
5667 {
5668     int rc;
5669     struct pqi_io_request *io_request;
5670     struct pqi_aio_path_request *request;
5671     struct pqi_scsi_dev *device;
5672 
5673     device = scmd->device->hostdata;
5674     io_request = pqi_alloc_io_request(ctrl_info);
5675     io_request->io_complete_callback = pqi_aio_io_complete;
5676     io_request->scmd = scmd;
5677     io_request->raid_bypass = raid_bypass;
5678 
5679     request = io_request->iu;
5680     memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5681 
5682     request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5683     put_unaligned_le32(aio_handle, &request->nexus_id);
5684     put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5685     request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5686     request->command_priority = io_high_prio;
5687     put_unaligned_le16(io_request->index, &request->request_id);
5688     request->error_index = request->request_id;
5689     if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
5690         put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
5691     if (cdb_length > sizeof(request->cdb))
5692         cdb_length = sizeof(request->cdb);
5693     request->cdb_length = cdb_length;
5694     memcpy(request->cdb, cdb, cdb_length);
5695 
5696     switch (scmd->sc_data_direction) {
5697     case DMA_TO_DEVICE:
5698         request->data_direction = SOP_READ_FLAG;
5699         break;
5700     case DMA_FROM_DEVICE:
5701         request->data_direction = SOP_WRITE_FLAG;
5702         break;
5703     case DMA_NONE:
5704         request->data_direction = SOP_NO_DIRECTION_FLAG;
5705         break;
5706     case DMA_BIDIRECTIONAL:
5707         request->data_direction = SOP_BIDIRECTIONAL;
5708         break;
5709     default:
5710         dev_err(&ctrl_info->pci_dev->dev,
5711             "unknown data direction: %d\n",
5712             scmd->sc_data_direction);
5713         break;
5714     }
5715 
5716     if (encryption_info) {
5717         request->encryption_enable = true;
5718         put_unaligned_le16(encryption_info->data_encryption_key_index,
5719             &request->data_encryption_key_index);
5720         put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5721             &request->encrypt_tweak_lower);
5722         put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5723             &request->encrypt_tweak_upper);
5724     }
5725 
5726     rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5727     if (rc) {
5728         pqi_free_io_request(io_request);
5729         return SCSI_MLQUEUE_HOST_BUSY;
5730     }
5731 
5732     pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5733 
5734     return 0;
5735 }
5736 
5737 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5738     struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5739     struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5740     struct pqi_scsi_dev_raid_map_data *rmd)
5741 {
5742     int rc;
5743     struct pqi_io_request *io_request;
5744     struct pqi_aio_r1_path_request *r1_request;
5745 
5746     io_request = pqi_alloc_io_request(ctrl_info);
5747     io_request->io_complete_callback = pqi_aio_io_complete;
5748     io_request->scmd = scmd;
5749     io_request->raid_bypass = true;
5750 
5751     r1_request = io_request->iu;
5752     memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5753 
5754     r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5755     put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5756     r1_request->num_drives = rmd->num_it_nexus_entries;
5757     put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5758     put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5759     if (rmd->num_it_nexus_entries == 3)
5760         put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5761 
5762     put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5763     r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5764     put_unaligned_le16(io_request->index, &r1_request->request_id);
5765     r1_request->error_index = r1_request->request_id;
5766     if (rmd->cdb_length > sizeof(r1_request->cdb))
5767         rmd->cdb_length = sizeof(r1_request->cdb);
5768     r1_request->cdb_length = rmd->cdb_length;
5769     memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5770 
5771     /* The direction is always write. */
5772     r1_request->data_direction = SOP_READ_FLAG;
5773 
5774     if (encryption_info) {
5775         r1_request->encryption_enable = true;
5776         put_unaligned_le16(encryption_info->data_encryption_key_index,
5777                 &r1_request->data_encryption_key_index);
5778         put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5779                 &r1_request->encrypt_tweak_lower);
5780         put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5781                 &r1_request->encrypt_tweak_upper);
5782     }
5783 
5784     rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5785     if (rc) {
5786         pqi_free_io_request(io_request);
5787         return SCSI_MLQUEUE_HOST_BUSY;
5788     }
5789 
5790     pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5791 
5792     return 0;
5793 }
5794 
5795 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5796     struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5797     struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5798     struct pqi_scsi_dev_raid_map_data *rmd)
5799 {
5800     int rc;
5801     struct pqi_io_request *io_request;
5802     struct pqi_aio_r56_path_request *r56_request;
5803 
5804     io_request = pqi_alloc_io_request(ctrl_info);
5805     io_request->io_complete_callback = pqi_aio_io_complete;
5806     io_request->scmd = scmd;
5807     io_request->raid_bypass = true;
5808 
5809     r56_request = io_request->iu;
5810     memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5811 
5812     if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5813         r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5814     else
5815         r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5816 
5817     put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5818     put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5819     put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5820     if (rmd->raid_level == SA_RAID_6) {
5821         put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5822         r56_request->xor_multiplier = rmd->xor_mult;
5823     }
5824     put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5825     r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5826     put_unaligned_le64(rmd->row, &r56_request->row);
5827 
5828     put_unaligned_le16(io_request->index, &r56_request->request_id);
5829     r56_request->error_index = r56_request->request_id;
5830 
5831     if (rmd->cdb_length > sizeof(r56_request->cdb))
5832         rmd->cdb_length = sizeof(r56_request->cdb);
5833     r56_request->cdb_length = rmd->cdb_length;
5834     memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5835 
5836     /* The direction is always write. */
5837     r56_request->data_direction = SOP_READ_FLAG;
5838 
5839     if (encryption_info) {
5840         r56_request->encryption_enable = true;
5841         put_unaligned_le16(encryption_info->data_encryption_key_index,
5842                 &r56_request->data_encryption_key_index);
5843         put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5844                 &r56_request->encrypt_tweak_lower);
5845         put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5846                 &r56_request->encrypt_tweak_upper);
5847     }
5848 
5849     rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5850     if (rc) {
5851         pqi_free_io_request(io_request);
5852         return SCSI_MLQUEUE_HOST_BUSY;
5853     }
5854 
5855     pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5856 
5857     return 0;
5858 }
5859 
5860 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5861     struct scsi_cmnd *scmd)
5862 {
5863     u16 hw_queue;
5864 
5865     hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5866     if (hw_queue > ctrl_info->max_hw_queue_index)
5867         hw_queue = 0;
5868 
5869     return hw_queue;
5870 }
5871 
5872 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5873 {
5874     if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5875         return false;
5876 
5877     return pqi_cmd_priv(scmd)->this_residual == 0;
5878 }
5879 
5880 /*
5881  * This function gets called just before we hand the completed SCSI request
5882  * back to the SML.
5883  */
5884 
5885 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5886 {
5887     struct pqi_scsi_dev *device;
5888 
5889     if (!scmd->device) {
5890         set_host_byte(scmd, DID_NO_CONNECT);
5891         return;
5892     }
5893 
5894     device = scmd->device->hostdata;
5895     if (!device) {
5896         set_host_byte(scmd, DID_NO_CONNECT);
5897         return;
5898     }
5899 
5900     atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5901 }
5902 
5903 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5904     struct scsi_cmnd *scmd)
5905 {
5906     u32 oldest_jiffies;
5907     u8 lru_index;
5908     int i;
5909     int rc;
5910     struct pqi_scsi_dev *device;
5911     struct pqi_stream_data *pqi_stream_data;
5912     struct pqi_scsi_dev_raid_map_data rmd;
5913 
5914     if (!ctrl_info->enable_stream_detection)
5915         return false;
5916 
5917     rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5918     if (rc)
5919         return false;
5920 
5921     /* Check writes only. */
5922     if (!rmd.is_write)
5923         return false;
5924 
5925     device = scmd->device->hostdata;
5926 
5927     /* Check for RAID 5/6 streams. */
5928     if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5929         return false;
5930 
5931     /*
5932      * If controller does not support AIO RAID{5,6} writes, need to send
5933      * requests down non-AIO path.
5934      */
5935     if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5936         (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5937         return true;
5938 
5939     lru_index = 0;
5940     oldest_jiffies = INT_MAX;
5941     for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5942         pqi_stream_data = &device->stream_data[i];
5943         /*
5944          * Check for adjacent request or request is within
5945          * the previous request.
5946          */
5947         if ((pqi_stream_data->next_lba &&
5948             rmd.first_block >= pqi_stream_data->next_lba) &&
5949             rmd.first_block <= pqi_stream_data->next_lba +
5950                 rmd.block_cnt) {
5951             pqi_stream_data->next_lba = rmd.first_block +
5952                 rmd.block_cnt;
5953             pqi_stream_data->last_accessed = jiffies;
5954             return true;
5955         }
5956 
5957         /* unused entry */
5958         if (pqi_stream_data->last_accessed == 0) {
5959             lru_index = i;
5960             break;
5961         }
5962 
5963         /* Find entry with oldest last accessed time. */
5964         if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5965             oldest_jiffies = pqi_stream_data->last_accessed;
5966             lru_index = i;
5967         }
5968     }
5969 
5970     /* Set LRU entry. */
5971     pqi_stream_data = &device->stream_data[lru_index];
5972     pqi_stream_data->last_accessed = jiffies;
5973     pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5974 
5975     return false;
5976 }
5977 
5978 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5979 {
5980     int rc;
5981     struct pqi_ctrl_info *ctrl_info;
5982     struct pqi_scsi_dev *device;
5983     u16 hw_queue;
5984     struct pqi_queue_group *queue_group;
5985     bool raid_bypassed;
5986 
5987     device = scmd->device->hostdata;
5988 
5989     if (!device) {
5990         set_host_byte(scmd, DID_NO_CONNECT);
5991         pqi_scsi_done(scmd);
5992         return 0;
5993     }
5994 
5995     atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
5996 
5997     ctrl_info = shost_to_hba(shost);
5998 
5999     if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
6000         set_host_byte(scmd, DID_NO_CONNECT);
6001         pqi_scsi_done(scmd);
6002         return 0;
6003     }
6004 
6005     if (pqi_ctrl_blocked(ctrl_info)) {
6006         rc = SCSI_MLQUEUE_HOST_BUSY;
6007         goto out;
6008     }
6009 
6010     /*
6011      * This is necessary because the SML doesn't zero out this field during
6012      * error recovery.
6013      */
6014     scmd->result = 0;
6015 
6016     hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6017     queue_group = &ctrl_info->queue_groups[hw_queue];
6018 
6019     if (pqi_is_logical_device(device)) {
6020         raid_bypassed = false;
6021         if (device->raid_bypass_enabled &&
6022             pqi_is_bypass_eligible_request(scmd) &&
6023             !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6024             rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6025             if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
6026                 raid_bypassed = true;
6027                 atomic_inc(&device->raid_bypass_cnt);
6028             }
6029         }
6030         if (!raid_bypassed)
6031             rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6032     } else {
6033         if (device->aio_enabled)
6034             rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6035         else
6036             rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6037     }
6038 
6039 out:
6040     if (rc)
6041         atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
6042 
6043     return rc;
6044 }
6045 
6046 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6047 {
6048     unsigned int i;
6049     unsigned int path;
6050     unsigned long flags;
6051     unsigned int queued_io_count;
6052     struct pqi_queue_group *queue_group;
6053     struct pqi_io_request *io_request;
6054 
6055     queued_io_count = 0;
6056 
6057     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6058         queue_group = &ctrl_info->queue_groups[i];
6059         for (path = 0; path < 2; path++) {
6060             spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6061             list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6062                 queued_io_count++;
6063             spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6064         }
6065     }
6066 
6067     return queued_io_count;
6068 }
6069 
6070 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6071 {
6072     unsigned int i;
6073     unsigned int path;
6074     unsigned int nonempty_inbound_queue_count;
6075     struct pqi_queue_group *queue_group;
6076     pqi_index_t iq_pi;
6077     pqi_index_t iq_ci;
6078 
6079     nonempty_inbound_queue_count = 0;
6080 
6081     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6082         queue_group = &ctrl_info->queue_groups[i];
6083         for (path = 0; path < 2; path++) {
6084             iq_pi = queue_group->iq_pi_copy[path];
6085             iq_ci = readl(queue_group->iq_ci[path]);
6086             if (iq_ci != iq_pi)
6087                 nonempty_inbound_queue_count++;
6088         }
6089     }
6090 
6091     return nonempty_inbound_queue_count;
6092 }
6093 
6094 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS    10
6095 
6096 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6097 {
6098     unsigned long start_jiffies;
6099     unsigned long warning_timeout;
6100     unsigned int queued_io_count;
6101     unsigned int nonempty_inbound_queue_count;
6102     bool displayed_warning;
6103 
6104     displayed_warning = false;
6105     start_jiffies = jiffies;
6106     warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6107 
6108     while (1) {
6109         queued_io_count = pqi_queued_io_count(ctrl_info);
6110         nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6111         if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6112             break;
6113         pqi_check_ctrl_health(ctrl_info);
6114         if (pqi_ctrl_offline(ctrl_info))
6115             return -ENXIO;
6116         if (time_after(jiffies, warning_timeout)) {
6117             dev_warn(&ctrl_info->pci_dev->dev,
6118                 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6119                 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6120             displayed_warning = true;
6121             warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6122         }
6123         usleep_range(1000, 2000);
6124     }
6125 
6126     if (displayed_warning)
6127         dev_warn(&ctrl_info->pci_dev->dev,
6128             "queued I/O drained after waiting for %u seconds\n",
6129             jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6130 
6131     return 0;
6132 }
6133 
6134 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6135     struct pqi_scsi_dev *device)
6136 {
6137     unsigned int i;
6138     unsigned int path;
6139     struct pqi_queue_group *queue_group;
6140     unsigned long flags;
6141     struct pqi_io_request *io_request;
6142     struct pqi_io_request *next;
6143     struct scsi_cmnd *scmd;
6144     struct pqi_scsi_dev *scsi_device;
6145 
6146     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6147         queue_group = &ctrl_info->queue_groups[i];
6148 
6149         for (path = 0; path < 2; path++) {
6150             spin_lock_irqsave(
6151                 &queue_group->submit_lock[path], flags);
6152 
6153             list_for_each_entry_safe(io_request, next,
6154                 &queue_group->request_list[path],
6155                 request_list_entry) {
6156 
6157                 scmd = io_request->scmd;
6158                 if (!scmd)
6159                     continue;
6160 
6161                 scsi_device = scmd->device->hostdata;
6162                 if (scsi_device != device)
6163                     continue;
6164 
6165                 list_del(&io_request->request_list_entry);
6166                 set_host_byte(scmd, DID_RESET);
6167                 pqi_free_io_request(io_request);
6168                 scsi_dma_unmap(scmd);
6169                 pqi_scsi_done(scmd);
6170             }
6171 
6172             spin_unlock_irqrestore(
6173                 &queue_group->submit_lock[path], flags);
6174         }
6175     }
6176 }
6177 
6178 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6179 
6180 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6181     struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6182 {
6183     int cmds_outstanding;
6184     unsigned long start_jiffies;
6185     unsigned long warning_timeout;
6186     unsigned long msecs_waiting;
6187 
6188     start_jiffies = jiffies;
6189     warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6190 
6191     while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6192         if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6193             pqi_check_ctrl_health(ctrl_info);
6194             if (pqi_ctrl_offline(ctrl_info))
6195                 return -ENXIO;
6196         }
6197         msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6198         if (msecs_waiting >= timeout_msecs) {
6199             dev_err(&ctrl_info->pci_dev->dev,
6200                 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6201                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6202                 lun, msecs_waiting / 1000, cmds_outstanding);
6203             return -ETIMEDOUT;
6204         }
6205         if (time_after(jiffies, warning_timeout)) {
6206             dev_warn(&ctrl_info->pci_dev->dev,
6207                 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6208                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6209                 lun, msecs_waiting / 1000, cmds_outstanding);
6210             warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6211         }
6212         usleep_range(1000, 2000);
6213     }
6214 
6215     return 0;
6216 }
6217 
6218 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6219     void *context)
6220 {
6221     struct completion *waiting = context;
6222 
6223     complete(waiting);
6224 }
6225 
6226 #define PQI_LUN_RESET_POLL_COMPLETION_SECS  10
6227 
6228 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6229     struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6230 {
6231     int rc;
6232     unsigned int wait_secs;
6233     int cmds_outstanding;
6234 
6235     wait_secs = 0;
6236 
6237     while (1) {
6238         if (wait_for_completion_io_timeout(wait,
6239             PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6240             rc = 0;
6241             break;
6242         }
6243 
6244         pqi_check_ctrl_health(ctrl_info);
6245         if (pqi_ctrl_offline(ctrl_info)) {
6246             rc = -ENXIO;
6247             break;
6248         }
6249 
6250         wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6251         cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6252         dev_warn(&ctrl_info->pci_dev->dev,
6253             "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6254             ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6255     }
6256 
6257     return rc;
6258 }
6259 
6260 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6261 
6262 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6263 {
6264     int rc;
6265     struct pqi_io_request *io_request;
6266     DECLARE_COMPLETION_ONSTACK(wait);
6267     struct pqi_task_management_request *request;
6268     struct pqi_scsi_dev *device;
6269 
6270     device = scmd->device->hostdata;
6271     io_request = pqi_alloc_io_request(ctrl_info);
6272     io_request->io_complete_callback = pqi_lun_reset_complete;
6273     io_request->context = &wait;
6274 
6275     request = io_request->iu;
6276     memset(request, 0, sizeof(*request));
6277 
6278     request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6279     put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6280         &request->header.iu_length);
6281     put_unaligned_le16(io_request->index, &request->request_id);
6282     memcpy(request->lun_number, device->scsi3addr,
6283         sizeof(request->lun_number));
6284     if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6285         request->ml_device_lun_number = (u8)scmd->device->lun;
6286     request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6287     if (ctrl_info->tmf_iu_timeout_supported)
6288         put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6289 
6290     pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6291         io_request);
6292 
6293     rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
6294     if (rc == 0)
6295         rc = io_request->status;
6296 
6297     pqi_free_io_request(io_request);
6298 
6299     return rc;
6300 }
6301 
6302 #define PQI_LUN_RESET_RETRIES               3
6303 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS      (10 * 1000)
6304 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS      (10 * 60 * 1000)
6305 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS   (2 * 60 * 1000)
6306 
6307 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6308 {
6309     int reset_rc;
6310     int wait_rc;
6311     unsigned int retries;
6312     unsigned long timeout_msecs;
6313     struct pqi_scsi_dev *device;
6314 
6315     device = scmd->device->hostdata;
6316     for (retries = 0;;) {
6317         reset_rc = pqi_lun_reset(ctrl_info, scmd);
6318         if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
6319             break;
6320         msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6321     }
6322 
6323     timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6324         PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6325 
6326     wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
6327     if (wait_rc && reset_rc == 0)
6328         reset_rc = wait_rc;
6329 
6330     return reset_rc == 0 ? SUCCESS : FAILED;
6331 }
6332 
6333 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6334 {
6335     int rc;
6336     struct pqi_scsi_dev *device;
6337 
6338     device = scmd->device->hostdata;
6339     pqi_ctrl_block_requests(ctrl_info);
6340     pqi_ctrl_wait_until_quiesced(ctrl_info);
6341     pqi_fail_io_queued_for_device(ctrl_info, device);
6342     rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6343     if (rc)
6344         rc = FAILED;
6345     else
6346         rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
6347     pqi_ctrl_unblock_requests(ctrl_info);
6348 
6349     return rc;
6350 }
6351 
6352 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6353 {
6354     int rc;
6355     struct Scsi_Host *shost;
6356     struct pqi_ctrl_info *ctrl_info;
6357     struct pqi_scsi_dev *device;
6358 
6359     shost = scmd->device->host;
6360     ctrl_info = shost_to_hba(shost);
6361     device = scmd->device->hostdata;
6362 
6363     mutex_lock(&ctrl_info->lun_reset_mutex);
6364 
6365     dev_err(&ctrl_info->pci_dev->dev,
6366         "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6367         shost->host_no,
6368         device->bus, device->target, (u32)scmd->device->lun,
6369         scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6370 
6371     pqi_check_ctrl_health(ctrl_info);
6372     if (pqi_ctrl_offline(ctrl_info))
6373         rc = FAILED;
6374     else
6375         rc = pqi_device_reset(ctrl_info, scmd);
6376 
6377     dev_err(&ctrl_info->pci_dev->dev,
6378         "reset of scsi %d:%d:%d:%d: %s\n",
6379         shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
6380         rc == SUCCESS ? "SUCCESS" : "FAILED");
6381 
6382     mutex_unlock(&ctrl_info->lun_reset_mutex);
6383 
6384     return rc;
6385 }
6386 
6387 static int pqi_slave_alloc(struct scsi_device *sdev)
6388 {
6389     struct pqi_scsi_dev *device;
6390     unsigned long flags;
6391     struct pqi_ctrl_info *ctrl_info;
6392     struct scsi_target *starget;
6393     struct sas_rphy *rphy;
6394 
6395     ctrl_info = shost_to_hba(sdev->host);
6396 
6397     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6398 
6399     if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6400         starget = scsi_target(sdev);
6401         rphy = target_to_rphy(starget);
6402         device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6403         if (device) {
6404             if (device->target_lun_valid) {
6405                 device->ignore_device = true;
6406             } else {
6407                 device->target = sdev_id(sdev);
6408                 device->lun = sdev->lun;
6409                 device->target_lun_valid = true;
6410             }
6411         }
6412     } else {
6413         device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6414             sdev_id(sdev), sdev->lun);
6415     }
6416 
6417     if (device) {
6418         sdev->hostdata = device;
6419         device->sdev = sdev;
6420         if (device->queue_depth) {
6421             device->advertised_queue_depth = device->queue_depth;
6422             scsi_change_queue_depth(sdev,
6423                 device->advertised_queue_depth);
6424         }
6425         if (pqi_is_logical_device(device)) {
6426             pqi_disable_write_same(sdev);
6427         } else {
6428             sdev->allow_restart = 1;
6429             if (device->device_type == SA_DEVICE_TYPE_NVME)
6430                 pqi_disable_write_same(sdev);
6431         }
6432     }
6433 
6434     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6435 
6436     return 0;
6437 }
6438 
6439 static int pqi_map_queues(struct Scsi_Host *shost)
6440 {
6441     struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6442 
6443     return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6444                     ctrl_info->pci_dev, 0);
6445 }
6446 
6447 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6448 {
6449     return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6450 }
6451 
6452 static int pqi_slave_configure(struct scsi_device *sdev)
6453 {
6454     int rc = 0;
6455     struct pqi_scsi_dev *device;
6456 
6457     device = sdev->hostdata;
6458     device->devtype = sdev->type;
6459 
6460     if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6461         rc = -ENXIO;
6462         device->ignore_device = false;
6463     }
6464 
6465     return rc;
6466 }
6467 
6468 static void pqi_slave_destroy(struct scsi_device *sdev)
6469 {
6470     struct pqi_ctrl_info *ctrl_info;
6471     struct pqi_scsi_dev *device;
6472     int mutex_acquired;
6473     unsigned long flags;
6474 
6475     ctrl_info = shost_to_hba(sdev->host);
6476 
6477     mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6478     if (!mutex_acquired)
6479         return;
6480 
6481     device = sdev->hostdata;
6482     if (!device) {
6483         mutex_unlock(&ctrl_info->scan_mutex);
6484         return;
6485     }
6486 
6487     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6488     list_del(&device->scsi_device_list_entry);
6489     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6490 
6491     mutex_unlock(&ctrl_info->scan_mutex);
6492 
6493     pqi_dev_info(ctrl_info, "removed", device);
6494     pqi_free_device(device);
6495 }
6496 
6497 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6498 {
6499     struct pci_dev *pci_dev;
6500     u32 subsystem_vendor;
6501     u32 subsystem_device;
6502     cciss_pci_info_struct pciinfo;
6503 
6504     if (!arg)
6505         return -EINVAL;
6506 
6507     pci_dev = ctrl_info->pci_dev;
6508 
6509     pciinfo.domain = pci_domain_nr(pci_dev->bus);
6510     pciinfo.bus = pci_dev->bus->number;
6511     pciinfo.dev_fn = pci_dev->devfn;
6512     subsystem_vendor = pci_dev->subsystem_vendor;
6513     subsystem_device = pci_dev->subsystem_device;
6514     pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6515 
6516     if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6517         return -EFAULT;
6518 
6519     return 0;
6520 }
6521 
6522 static int pqi_getdrivver_ioctl(void __user *arg)
6523 {
6524     u32 version;
6525 
6526     if (!arg)
6527         return -EINVAL;
6528 
6529     version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6530         (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6531 
6532     if (copy_to_user(arg, &version, sizeof(version)))
6533         return -EFAULT;
6534 
6535     return 0;
6536 }
6537 
6538 struct ciss_error_info {
6539     u8  scsi_status;
6540     int command_status;
6541     size_t  sense_data_length;
6542 };
6543 
6544 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6545     struct ciss_error_info *ciss_error_info)
6546 {
6547     int ciss_cmd_status;
6548     size_t sense_data_length;
6549 
6550     switch (pqi_error_info->data_out_result) {
6551     case PQI_DATA_IN_OUT_GOOD:
6552         ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6553         break;
6554     case PQI_DATA_IN_OUT_UNDERFLOW:
6555         ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6556         break;
6557     case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6558         ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6559         break;
6560     case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6561     case PQI_DATA_IN_OUT_BUFFER_ERROR:
6562     case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6563     case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6564     case PQI_DATA_IN_OUT_ERROR:
6565         ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6566         break;
6567     case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6568     case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6569     case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6570     case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6571     case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6572     case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6573     case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6574     case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6575     case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6576     case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6577         ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6578         break;
6579     case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6580         ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6581         break;
6582     case PQI_DATA_IN_OUT_ABORTED:
6583         ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6584         break;
6585     case PQI_DATA_IN_OUT_TIMEOUT:
6586         ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6587         break;
6588     default:
6589         ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6590         break;
6591     }
6592 
6593     sense_data_length =
6594         get_unaligned_le16(&pqi_error_info->sense_data_length);
6595     if (sense_data_length == 0)
6596         sense_data_length =
6597         get_unaligned_le16(&pqi_error_info->response_data_length);
6598     if (sense_data_length)
6599         if (sense_data_length > sizeof(pqi_error_info->data))
6600             sense_data_length = sizeof(pqi_error_info->data);
6601 
6602     ciss_error_info->scsi_status = pqi_error_info->status;
6603     ciss_error_info->command_status = ciss_cmd_status;
6604     ciss_error_info->sense_data_length = sense_data_length;
6605 }
6606 
6607 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6608 {
6609     int rc;
6610     char *kernel_buffer = NULL;
6611     u16 iu_length;
6612     size_t sense_data_length;
6613     IOCTL_Command_struct iocommand;
6614     struct pqi_raid_path_request request;
6615     struct pqi_raid_error_info pqi_error_info;
6616     struct ciss_error_info ciss_error_info;
6617 
6618     if (pqi_ctrl_offline(ctrl_info))
6619         return -ENXIO;
6620     if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6621         return -EBUSY;
6622     if (!arg)
6623         return -EINVAL;
6624     if (!capable(CAP_SYS_RAWIO))
6625         return -EPERM;
6626     if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6627         return -EFAULT;
6628     if (iocommand.buf_size < 1 &&
6629         iocommand.Request.Type.Direction != XFER_NONE)
6630         return -EINVAL;
6631     if (iocommand.Request.CDBLen > sizeof(request.cdb))
6632         return -EINVAL;
6633     if (iocommand.Request.Type.Type != TYPE_CMD)
6634         return -EINVAL;
6635 
6636     switch (iocommand.Request.Type.Direction) {
6637     case XFER_NONE:
6638     case XFER_WRITE:
6639     case XFER_READ:
6640     case XFER_READ | XFER_WRITE:
6641         break;
6642     default:
6643         return -EINVAL;
6644     }
6645 
6646     if (iocommand.buf_size > 0) {
6647         kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6648         if (!kernel_buffer)
6649             return -ENOMEM;
6650         if (iocommand.Request.Type.Direction & XFER_WRITE) {
6651             if (copy_from_user(kernel_buffer, iocommand.buf,
6652                 iocommand.buf_size)) {
6653                 rc = -EFAULT;
6654                 goto out;
6655             }
6656         } else {
6657             memset(kernel_buffer, 0, iocommand.buf_size);
6658         }
6659     }
6660 
6661     memset(&request, 0, sizeof(request));
6662 
6663     request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6664     iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6665         PQI_REQUEST_HEADER_LENGTH;
6666     memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6667         sizeof(request.lun_number));
6668     memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6669     request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6670 
6671     switch (iocommand.Request.Type.Direction) {
6672     case XFER_NONE:
6673         request.data_direction = SOP_NO_DIRECTION_FLAG;
6674         break;
6675     case XFER_WRITE:
6676         request.data_direction = SOP_WRITE_FLAG;
6677         break;
6678     case XFER_READ:
6679         request.data_direction = SOP_READ_FLAG;
6680         break;
6681     case XFER_READ | XFER_WRITE:
6682         request.data_direction = SOP_BIDIRECTIONAL;
6683         break;
6684     }
6685 
6686     request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6687 
6688     if (iocommand.buf_size > 0) {
6689         put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6690 
6691         rc = pqi_map_single(ctrl_info->pci_dev,
6692             &request.sg_descriptors[0], kernel_buffer,
6693             iocommand.buf_size, DMA_BIDIRECTIONAL);
6694         if (rc)
6695             goto out;
6696 
6697         iu_length += sizeof(request.sg_descriptors[0]);
6698     }
6699 
6700     put_unaligned_le16(iu_length, &request.header.iu_length);
6701 
6702     if (ctrl_info->raid_iu_timeout_supported)
6703         put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6704 
6705     rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6706         PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6707 
6708     if (iocommand.buf_size > 0)
6709         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6710             DMA_BIDIRECTIONAL);
6711 
6712     memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6713 
6714     if (rc == 0) {
6715         pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6716         iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6717         iocommand.error_info.CommandStatus =
6718             ciss_error_info.command_status;
6719         sense_data_length = ciss_error_info.sense_data_length;
6720         if (sense_data_length) {
6721             if (sense_data_length >
6722                 sizeof(iocommand.error_info.SenseInfo))
6723                 sense_data_length =
6724                     sizeof(iocommand.error_info.SenseInfo);
6725             memcpy(iocommand.error_info.SenseInfo,
6726                 pqi_error_info.data, sense_data_length);
6727             iocommand.error_info.SenseLen = sense_data_length;
6728         }
6729     }
6730 
6731     if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6732         rc = -EFAULT;
6733         goto out;
6734     }
6735 
6736     if (rc == 0 && iocommand.buf_size > 0 &&
6737         (iocommand.Request.Type.Direction & XFER_READ)) {
6738         if (copy_to_user(iocommand.buf, kernel_buffer,
6739             iocommand.buf_size)) {
6740             rc = -EFAULT;
6741         }
6742     }
6743 
6744 out:
6745     kfree(kernel_buffer);
6746 
6747     return rc;
6748 }
6749 
6750 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6751              void __user *arg)
6752 {
6753     int rc;
6754     struct pqi_ctrl_info *ctrl_info;
6755 
6756     ctrl_info = shost_to_hba(sdev->host);
6757 
6758     switch (cmd) {
6759     case CCISS_DEREGDISK:
6760     case CCISS_REGNEWDISK:
6761     case CCISS_REGNEWD:
6762         rc = pqi_scan_scsi_devices(ctrl_info);
6763         break;
6764     case CCISS_GETPCIINFO:
6765         rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6766         break;
6767     case CCISS_GETDRIVVER:
6768         rc = pqi_getdrivver_ioctl(arg);
6769         break;
6770     case CCISS_PASSTHRU:
6771         rc = pqi_passthru_ioctl(ctrl_info, arg);
6772         break;
6773     default:
6774         rc = -EINVAL;
6775         break;
6776     }
6777 
6778     return rc;
6779 }
6780 
6781 static ssize_t pqi_firmware_version_show(struct device *dev,
6782     struct device_attribute *attr, char *buffer)
6783 {
6784     struct Scsi_Host *shost;
6785     struct pqi_ctrl_info *ctrl_info;
6786 
6787     shost = class_to_shost(dev);
6788     ctrl_info = shost_to_hba(shost);
6789 
6790     return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6791 }
6792 
6793 static ssize_t pqi_driver_version_show(struct device *dev,
6794     struct device_attribute *attr, char *buffer)
6795 {
6796     return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6797 }
6798 
6799 static ssize_t pqi_serial_number_show(struct device *dev,
6800     struct device_attribute *attr, char *buffer)
6801 {
6802     struct Scsi_Host *shost;
6803     struct pqi_ctrl_info *ctrl_info;
6804 
6805     shost = class_to_shost(dev);
6806     ctrl_info = shost_to_hba(shost);
6807 
6808     return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6809 }
6810 
6811 static ssize_t pqi_model_show(struct device *dev,
6812     struct device_attribute *attr, char *buffer)
6813 {
6814     struct Scsi_Host *shost;
6815     struct pqi_ctrl_info *ctrl_info;
6816 
6817     shost = class_to_shost(dev);
6818     ctrl_info = shost_to_hba(shost);
6819 
6820     return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6821 }
6822 
6823 static ssize_t pqi_vendor_show(struct device *dev,
6824     struct device_attribute *attr, char *buffer)
6825 {
6826     struct Scsi_Host *shost;
6827     struct pqi_ctrl_info *ctrl_info;
6828 
6829     shost = class_to_shost(dev);
6830     ctrl_info = shost_to_hba(shost);
6831 
6832     return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6833 }
6834 
6835 static ssize_t pqi_host_rescan_store(struct device *dev,
6836     struct device_attribute *attr, const char *buffer, size_t count)
6837 {
6838     struct Scsi_Host *shost = class_to_shost(dev);
6839 
6840     pqi_scan_start(shost);
6841 
6842     return count;
6843 }
6844 
6845 static ssize_t pqi_lockup_action_show(struct device *dev,
6846     struct device_attribute *attr, char *buffer)
6847 {
6848     int count = 0;
6849     unsigned int i;
6850 
6851     for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6852         if (pqi_lockup_actions[i].action == pqi_lockup_action)
6853             count += scnprintf(buffer + count, PAGE_SIZE - count,
6854                 "[%s] ", pqi_lockup_actions[i].name);
6855         else
6856             count += scnprintf(buffer + count, PAGE_SIZE - count,
6857                 "%s ", pqi_lockup_actions[i].name);
6858     }
6859 
6860     count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6861 
6862     return count;
6863 }
6864 
6865 static ssize_t pqi_lockup_action_store(struct device *dev,
6866     struct device_attribute *attr, const char *buffer, size_t count)
6867 {
6868     unsigned int i;
6869     char *action_name;
6870     char action_name_buffer[32];
6871 
6872     strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6873     action_name = strstrip(action_name_buffer);
6874 
6875     for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6876         if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6877             pqi_lockup_action = pqi_lockup_actions[i].action;
6878             return count;
6879         }
6880     }
6881 
6882     return -EINVAL;
6883 }
6884 
6885 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6886     struct device_attribute *attr, char *buffer)
6887 {
6888     struct Scsi_Host *shost = class_to_shost(dev);
6889     struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6890 
6891     return scnprintf(buffer, 10, "%x\n",
6892             ctrl_info->enable_stream_detection);
6893 }
6894 
6895 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6896     struct device_attribute *attr, const char *buffer, size_t count)
6897 {
6898     struct Scsi_Host *shost = class_to_shost(dev);
6899     struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6900     u8 set_stream_detection = 0;
6901 
6902     if (kstrtou8(buffer, 0, &set_stream_detection))
6903         return -EINVAL;
6904 
6905     if (set_stream_detection > 0)
6906         set_stream_detection = 1;
6907 
6908     ctrl_info->enable_stream_detection = set_stream_detection;
6909 
6910     return count;
6911 }
6912 
6913 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6914     struct device_attribute *attr, char *buffer)
6915 {
6916     struct Scsi_Host *shost = class_to_shost(dev);
6917     struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6918 
6919     return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6920 }
6921 
6922 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6923     struct device_attribute *attr, const char *buffer, size_t count)
6924 {
6925     struct Scsi_Host *shost = class_to_shost(dev);
6926     struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6927     u8 set_r5_writes = 0;
6928 
6929     if (kstrtou8(buffer, 0, &set_r5_writes))
6930         return -EINVAL;
6931 
6932     if (set_r5_writes > 0)
6933         set_r5_writes = 1;
6934 
6935     ctrl_info->enable_r5_writes = set_r5_writes;
6936 
6937     return count;
6938 }
6939 
6940 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6941     struct device_attribute *attr, char *buffer)
6942 {
6943     struct Scsi_Host *shost = class_to_shost(dev);
6944     struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6945 
6946     return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6947 }
6948 
6949 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6950     struct device_attribute *attr, const char *buffer, size_t count)
6951 {
6952     struct Scsi_Host *shost = class_to_shost(dev);
6953     struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6954     u8 set_r6_writes = 0;
6955 
6956     if (kstrtou8(buffer, 0, &set_r6_writes))
6957         return -EINVAL;
6958 
6959     if (set_r6_writes > 0)
6960         set_r6_writes = 1;
6961 
6962     ctrl_info->enable_r6_writes = set_r6_writes;
6963 
6964     return count;
6965 }
6966 
6967 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6968 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6969 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6970 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6971 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6972 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6973 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6974     pqi_lockup_action_store);
6975 static DEVICE_ATTR(enable_stream_detection, 0644,
6976     pqi_host_enable_stream_detection_show,
6977     pqi_host_enable_stream_detection_store);
6978 static DEVICE_ATTR(enable_r5_writes, 0644,
6979     pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6980 static DEVICE_ATTR(enable_r6_writes, 0644,
6981     pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6982 
6983 static struct attribute *pqi_shost_attrs[] = {
6984     &dev_attr_driver_version.attr,
6985     &dev_attr_firmware_version.attr,
6986     &dev_attr_model.attr,
6987     &dev_attr_serial_number.attr,
6988     &dev_attr_vendor.attr,
6989     &dev_attr_rescan.attr,
6990     &dev_attr_lockup_action.attr,
6991     &dev_attr_enable_stream_detection.attr,
6992     &dev_attr_enable_r5_writes.attr,
6993     &dev_attr_enable_r6_writes.attr,
6994     NULL
6995 };
6996 
6997 ATTRIBUTE_GROUPS(pqi_shost);
6998 
6999 static ssize_t pqi_unique_id_show(struct device *dev,
7000     struct device_attribute *attr, char *buffer)
7001 {
7002     struct pqi_ctrl_info *ctrl_info;
7003     struct scsi_device *sdev;
7004     struct pqi_scsi_dev *device;
7005     unsigned long flags;
7006     u8 unique_id[16];
7007 
7008     sdev = to_scsi_device(dev);
7009     ctrl_info = shost_to_hba(sdev->host);
7010 
7011     if (pqi_ctrl_offline(ctrl_info))
7012         return -ENODEV;
7013 
7014     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7015 
7016     device = sdev->hostdata;
7017     if (!device) {
7018         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7019         return -ENODEV;
7020     }
7021 
7022     if (device->is_physical_device)
7023         memcpy(unique_id, device->wwid, sizeof(device->wwid));
7024     else
7025         memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
7026 
7027     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7028 
7029     return scnprintf(buffer, PAGE_SIZE,
7030         "%02X%02X%02X%02X%02X%02X%02X%02X"
7031         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7032         unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7033         unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7034         unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7035         unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7036 }
7037 
7038 static ssize_t pqi_lunid_show(struct device *dev,
7039     struct device_attribute *attr, char *buffer)
7040 {
7041     struct pqi_ctrl_info *ctrl_info;
7042     struct scsi_device *sdev;
7043     struct pqi_scsi_dev *device;
7044     unsigned long flags;
7045     u8 lunid[8];
7046 
7047     sdev = to_scsi_device(dev);
7048     ctrl_info = shost_to_hba(sdev->host);
7049 
7050     if (pqi_ctrl_offline(ctrl_info))
7051         return -ENODEV;
7052 
7053     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7054 
7055     device = sdev->hostdata;
7056     if (!device) {
7057         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7058         return -ENODEV;
7059     }
7060 
7061     memcpy(lunid, device->scsi3addr, sizeof(lunid));
7062 
7063     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7064 
7065     return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7066 }
7067 
7068 #define MAX_PATHS   8
7069 
7070 static ssize_t pqi_path_info_show(struct device *dev,
7071     struct device_attribute *attr, char *buf)
7072 {
7073     struct pqi_ctrl_info *ctrl_info;
7074     struct scsi_device *sdev;
7075     struct pqi_scsi_dev *device;
7076     unsigned long flags;
7077     int i;
7078     int output_len = 0;
7079     u8 box;
7080     u8 bay;
7081     u8 path_map_index;
7082     char *active;
7083     u8 phys_connector[2];
7084 
7085     sdev = to_scsi_device(dev);
7086     ctrl_info = shost_to_hba(sdev->host);
7087 
7088     if (pqi_ctrl_offline(ctrl_info))
7089         return -ENODEV;
7090 
7091     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7092 
7093     device = sdev->hostdata;
7094     if (!device) {
7095         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7096         return -ENODEV;
7097     }
7098 
7099     bay = device->bay;
7100     for (i = 0; i < MAX_PATHS; i++) {
7101         path_map_index = 1 << i;
7102         if (i == device->active_path_index)
7103             active = "Active";
7104         else if (device->path_map & path_map_index)
7105             active = "Inactive";
7106         else
7107             continue;
7108 
7109         output_len += scnprintf(buf + output_len,
7110                     PAGE_SIZE - output_len,
7111                     "[%d:%d:%d:%d] %20.20s ",
7112                     ctrl_info->scsi_host->host_no,
7113                     device->bus, device->target,
7114                     device->lun,
7115                     scsi_device_type(device->devtype));
7116 
7117         if (device->devtype == TYPE_RAID ||
7118             pqi_is_logical_device(device))
7119             goto end_buffer;
7120 
7121         memcpy(&phys_connector, &device->phys_connector[i],
7122             sizeof(phys_connector));
7123         if (phys_connector[0] < '0')
7124             phys_connector[0] = '0';
7125         if (phys_connector[1] < '0')
7126             phys_connector[1] = '0';
7127 
7128         output_len += scnprintf(buf + output_len,
7129                     PAGE_SIZE - output_len,
7130                     "PORT: %.2s ", phys_connector);
7131 
7132         box = device->box[i];
7133         if (box != 0 && box != 0xFF)
7134             output_len += scnprintf(buf + output_len,
7135                         PAGE_SIZE - output_len,
7136                         "BOX: %hhu ", box);
7137 
7138         if ((device->devtype == TYPE_DISK ||
7139             device->devtype == TYPE_ZBC) &&
7140             pqi_expose_device(device))
7141             output_len += scnprintf(buf + output_len,
7142                         PAGE_SIZE - output_len,
7143                         "BAY: %hhu ", bay);
7144 
7145 end_buffer:
7146         output_len += scnprintf(buf + output_len,
7147                     PAGE_SIZE - output_len,
7148                     "%s\n", active);
7149     }
7150 
7151     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7152 
7153     return output_len;
7154 }
7155 
7156 static ssize_t pqi_sas_address_show(struct device *dev,
7157     struct device_attribute *attr, char *buffer)
7158 {
7159     struct pqi_ctrl_info *ctrl_info;
7160     struct scsi_device *sdev;
7161     struct pqi_scsi_dev *device;
7162     unsigned long flags;
7163     u64 sas_address;
7164 
7165     sdev = to_scsi_device(dev);
7166     ctrl_info = shost_to_hba(sdev->host);
7167 
7168     if (pqi_ctrl_offline(ctrl_info))
7169         return -ENODEV;
7170 
7171     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7172 
7173     device = sdev->hostdata;
7174     if (!device) {
7175         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7176         return -ENODEV;
7177     }
7178 
7179     sas_address = device->sas_address;
7180 
7181     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7182 
7183     return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7184 }
7185 
7186 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7187     struct device_attribute *attr, char *buffer)
7188 {
7189     struct pqi_ctrl_info *ctrl_info;
7190     struct scsi_device *sdev;
7191     struct pqi_scsi_dev *device;
7192     unsigned long flags;
7193 
7194     sdev = to_scsi_device(dev);
7195     ctrl_info = shost_to_hba(sdev->host);
7196 
7197     if (pqi_ctrl_offline(ctrl_info))
7198         return -ENODEV;
7199 
7200     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7201 
7202     device = sdev->hostdata;
7203     if (!device) {
7204         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7205         return -ENODEV;
7206     }
7207 
7208     buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7209     buffer[1] = '\n';
7210     buffer[2] = '\0';
7211 
7212     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7213 
7214     return 2;
7215 }
7216 
7217 static ssize_t pqi_raid_level_show(struct device *dev,
7218     struct device_attribute *attr, char *buffer)
7219 {
7220     struct pqi_ctrl_info *ctrl_info;
7221     struct scsi_device *sdev;
7222     struct pqi_scsi_dev *device;
7223     unsigned long flags;
7224     char *raid_level;
7225 
7226     sdev = to_scsi_device(dev);
7227     ctrl_info = shost_to_hba(sdev->host);
7228 
7229     if (pqi_ctrl_offline(ctrl_info))
7230         return -ENODEV;
7231 
7232     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7233 
7234     device = sdev->hostdata;
7235     if (!device) {
7236         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7237         return -ENODEV;
7238     }
7239 
7240     if (pqi_is_logical_device(device))
7241         raid_level = pqi_raid_level_to_string(device->raid_level);
7242     else
7243         raid_level = "N/A";
7244 
7245     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7246 
7247     return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7248 }
7249 
7250 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7251     struct device_attribute *attr, char *buffer)
7252 {
7253     struct pqi_ctrl_info *ctrl_info;
7254     struct scsi_device *sdev;
7255     struct pqi_scsi_dev *device;
7256     unsigned long flags;
7257     int raid_bypass_cnt;
7258 
7259     sdev = to_scsi_device(dev);
7260     ctrl_info = shost_to_hba(sdev->host);
7261 
7262     if (pqi_ctrl_offline(ctrl_info))
7263         return -ENODEV;
7264 
7265     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7266 
7267     device = sdev->hostdata;
7268     if (!device) {
7269         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7270         return -ENODEV;
7271     }
7272 
7273     raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7274 
7275     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7276 
7277     return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7278 }
7279 
7280 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7281         struct device_attribute *attr, char *buf)
7282 {
7283     struct pqi_ctrl_info *ctrl_info;
7284     struct scsi_device *sdev;
7285     struct pqi_scsi_dev *device;
7286     unsigned long flags;
7287     int output_len = 0;
7288 
7289     sdev = to_scsi_device(dev);
7290     ctrl_info = shost_to_hba(sdev->host);
7291 
7292     if (pqi_ctrl_offline(ctrl_info))
7293         return -ENODEV;
7294 
7295     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7296 
7297     device = sdev->hostdata;
7298     if (!device) {
7299         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7300         return -ENODEV;
7301     }
7302 
7303     output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7304                 device->ncq_prio_enable);
7305     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7306 
7307     return output_len;
7308 }
7309 
7310 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7311             struct device_attribute *attr,
7312             const char *buf, size_t count)
7313 {
7314     struct pqi_ctrl_info *ctrl_info;
7315     struct scsi_device *sdev;
7316     struct pqi_scsi_dev *device;
7317     unsigned long flags;
7318     u8 ncq_prio_enable = 0;
7319 
7320     if (kstrtou8(buf, 0, &ncq_prio_enable))
7321         return -EINVAL;
7322 
7323     sdev = to_scsi_device(dev);
7324     ctrl_info = shost_to_hba(sdev->host);
7325 
7326     spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7327 
7328     device = sdev->hostdata;
7329 
7330     if (!device) {
7331         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7332         return -ENODEV;
7333     }
7334 
7335     if (!device->ncq_prio_support ||
7336         !device->is_physical_device) {
7337         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7338         return -EINVAL;
7339     }
7340 
7341     device->ncq_prio_enable = ncq_prio_enable;
7342 
7343     spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7344 
7345     return  strlen(buf);
7346 }
7347 
7348 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7349 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7350 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7351 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7352 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7353 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7354 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7355 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7356         pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7357 
7358 static struct attribute *pqi_sdev_attrs[] = {
7359     &dev_attr_lunid.attr,
7360     &dev_attr_unique_id.attr,
7361     &dev_attr_path_info.attr,
7362     &dev_attr_sas_address.attr,
7363     &dev_attr_ssd_smart_path_enabled.attr,
7364     &dev_attr_raid_level.attr,
7365     &dev_attr_raid_bypass_cnt.attr,
7366     &dev_attr_sas_ncq_prio_enable.attr,
7367     NULL
7368 };
7369 
7370 ATTRIBUTE_GROUPS(pqi_sdev);
7371 
7372 static struct scsi_host_template pqi_driver_template = {
7373     .module = THIS_MODULE,
7374     .name = DRIVER_NAME_SHORT,
7375     .proc_name = DRIVER_NAME_SHORT,
7376     .queuecommand = pqi_scsi_queue_command,
7377     .scan_start = pqi_scan_start,
7378     .scan_finished = pqi_scan_finished,
7379     .this_id = -1,
7380     .eh_device_reset_handler = pqi_eh_device_reset_handler,
7381     .ioctl = pqi_ioctl,
7382     .slave_alloc = pqi_slave_alloc,
7383     .slave_configure = pqi_slave_configure,
7384     .slave_destroy = pqi_slave_destroy,
7385     .map_queues = pqi_map_queues,
7386     .sdev_groups = pqi_sdev_groups,
7387     .shost_groups = pqi_shost_groups,
7388     .cmd_size = sizeof(struct pqi_cmd_priv),
7389 };
7390 
7391 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7392 {
7393     int rc;
7394     struct Scsi_Host *shost;
7395 
7396     shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7397     if (!shost) {
7398         dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7399         return -ENOMEM;
7400     }
7401 
7402     shost->io_port = 0;
7403     shost->n_io_port = 0;
7404     shost->this_id = -1;
7405     shost->max_channel = PQI_MAX_BUS;
7406     shost->max_cmd_len = MAX_COMMAND_SIZE;
7407     shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7408     shost->max_lun = ~0;
7409     shost->max_id = ~0;
7410     shost->max_sectors = ctrl_info->max_sectors;
7411     shost->can_queue = ctrl_info->scsi_ml_can_queue;
7412     shost->cmd_per_lun = shost->can_queue;
7413     shost->sg_tablesize = ctrl_info->sg_tablesize;
7414     shost->transportt = pqi_sas_transport_template;
7415     shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7416     shost->unique_id = shost->irq;
7417     shost->nr_hw_queues = ctrl_info->num_queue_groups;
7418     shost->host_tagset = 1;
7419     shost->hostdata[0] = (unsigned long)ctrl_info;
7420 
7421     rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7422     if (rc) {
7423         dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7424         goto free_host;
7425     }
7426 
7427     rc = pqi_add_sas_host(shost, ctrl_info);
7428     if (rc) {
7429         dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7430         goto remove_host;
7431     }
7432 
7433     ctrl_info->scsi_host = shost;
7434 
7435     return 0;
7436 
7437 remove_host:
7438     scsi_remove_host(shost);
7439 free_host:
7440     scsi_host_put(shost);
7441 
7442     return rc;
7443 }
7444 
7445 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7446 {
7447     struct Scsi_Host *shost;
7448 
7449     pqi_delete_sas_host(ctrl_info);
7450 
7451     shost = ctrl_info->scsi_host;
7452     if (!shost)
7453         return;
7454 
7455     scsi_remove_host(shost);
7456     scsi_host_put(shost);
7457 }
7458 
7459 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7460 {
7461     int rc = 0;
7462     struct pqi_device_registers __iomem *pqi_registers;
7463     unsigned long timeout;
7464     unsigned int timeout_msecs;
7465     union pqi_reset_register reset_reg;
7466 
7467     pqi_registers = ctrl_info->pqi_registers;
7468     timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7469     timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7470 
7471     while (1) {
7472         msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7473         reset_reg.all_bits = readl(&pqi_registers->device_reset);
7474         if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7475             break;
7476         if (!sis_is_firmware_running(ctrl_info)) {
7477             rc = -ENXIO;
7478             break;
7479         }
7480         if (time_after(jiffies, timeout)) {
7481             rc = -ETIMEDOUT;
7482             break;
7483         }
7484     }
7485 
7486     return rc;
7487 }
7488 
7489 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7490 {
7491     int rc;
7492     union pqi_reset_register reset_reg;
7493 
7494     if (ctrl_info->pqi_reset_quiesce_supported) {
7495         rc = sis_pqi_reset_quiesce(ctrl_info);
7496         if (rc) {
7497             dev_err(&ctrl_info->pci_dev->dev,
7498                 "PQI reset failed during quiesce with error %d\n", rc);
7499             return rc;
7500         }
7501     }
7502 
7503     reset_reg.all_bits = 0;
7504     reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7505     reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7506 
7507     writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7508 
7509     rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7510     if (rc)
7511         dev_err(&ctrl_info->pci_dev->dev,
7512             "PQI reset failed with error %d\n", rc);
7513 
7514     return rc;
7515 }
7516 
7517 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7518 {
7519     int rc;
7520     struct bmic_sense_subsystem_info *sense_info;
7521 
7522     sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7523     if (!sense_info)
7524         return -ENOMEM;
7525 
7526     rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7527     if (rc)
7528         goto out;
7529 
7530     memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7531         sizeof(sense_info->ctrl_serial_number));
7532     ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7533 
7534 out:
7535     kfree(sense_info);
7536 
7537     return rc;
7538 }
7539 
7540 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7541 {
7542     int rc;
7543     struct bmic_identify_controller *identify;
7544 
7545     identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7546     if (!identify)
7547         return -ENOMEM;
7548 
7549     rc = pqi_identify_controller(ctrl_info, identify);
7550     if (rc)
7551         goto out;
7552 
7553     if (get_unaligned_le32(&identify->extra_controller_flags) &
7554         BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7555         memcpy(ctrl_info->firmware_version,
7556             identify->firmware_version_long,
7557             sizeof(identify->firmware_version_long));
7558     } else {
7559         memcpy(ctrl_info->firmware_version,
7560             identify->firmware_version_short,
7561             sizeof(identify->firmware_version_short));
7562         ctrl_info->firmware_version
7563             [sizeof(identify->firmware_version_short)] = '\0';
7564         snprintf(ctrl_info->firmware_version +
7565             strlen(ctrl_info->firmware_version),
7566             sizeof(ctrl_info->firmware_version) -
7567             sizeof(identify->firmware_version_short),
7568             "-%u",
7569             get_unaligned_le16(&identify->firmware_build_number));
7570     }
7571 
7572     memcpy(ctrl_info->model, identify->product_id,
7573         sizeof(identify->product_id));
7574     ctrl_info->model[sizeof(identify->product_id)] = '\0';
7575 
7576     memcpy(ctrl_info->vendor, identify->vendor_id,
7577         sizeof(identify->vendor_id));
7578     ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7579 
7580     dev_info(&ctrl_info->pci_dev->dev,
7581         "Firmware version: %s\n", ctrl_info->firmware_version);
7582 
7583 out:
7584     kfree(identify);
7585 
7586     return rc;
7587 }
7588 
7589 struct pqi_config_table_section_info {
7590     struct pqi_ctrl_info *ctrl_info;
7591     void        *section;
7592     u32     section_offset;
7593     void __iomem    *section_iomem_addr;
7594 };
7595 
7596 static inline bool pqi_is_firmware_feature_supported(
7597     struct pqi_config_table_firmware_features *firmware_features,
7598     unsigned int bit_position)
7599 {
7600     unsigned int byte_index;
7601 
7602     byte_index = bit_position / BITS_PER_BYTE;
7603 
7604     if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7605         return false;
7606 
7607     return firmware_features->features_supported[byte_index] &
7608         (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7609 }
7610 
7611 static inline bool pqi_is_firmware_feature_enabled(
7612     struct pqi_config_table_firmware_features *firmware_features,
7613     void __iomem *firmware_features_iomem_addr,
7614     unsigned int bit_position)
7615 {
7616     unsigned int byte_index;
7617     u8 __iomem *features_enabled_iomem_addr;
7618 
7619     byte_index = (bit_position / BITS_PER_BYTE) +
7620         (le16_to_cpu(firmware_features->num_elements) * 2);
7621 
7622     features_enabled_iomem_addr = firmware_features_iomem_addr +
7623         offsetof(struct pqi_config_table_firmware_features,
7624             features_supported) + byte_index;
7625 
7626     return *((__force u8 *)features_enabled_iomem_addr) &
7627         (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7628 }
7629 
7630 static inline void pqi_request_firmware_feature(
7631     struct pqi_config_table_firmware_features *firmware_features,
7632     unsigned int bit_position)
7633 {
7634     unsigned int byte_index;
7635 
7636     byte_index = (bit_position / BITS_PER_BYTE) +
7637         le16_to_cpu(firmware_features->num_elements);
7638 
7639     firmware_features->features_supported[byte_index] |=
7640         (1 << (bit_position % BITS_PER_BYTE));
7641 }
7642 
7643 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7644     u16 first_section, u16 last_section)
7645 {
7646     struct pqi_vendor_general_request request;
7647 
7648     memset(&request, 0, sizeof(request));
7649 
7650     request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7651     put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7652         &request.header.iu_length);
7653     put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7654         &request.function_code);
7655     put_unaligned_le16(first_section,
7656         &request.data.config_table_update.first_section);
7657     put_unaligned_le16(last_section,
7658         &request.data.config_table_update.last_section);
7659 
7660     return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7661 }
7662 
7663 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7664     struct pqi_config_table_firmware_features *firmware_features,
7665     void __iomem *firmware_features_iomem_addr)
7666 {
7667     void *features_requested;
7668     void __iomem *features_requested_iomem_addr;
7669     void __iomem *host_max_known_feature_iomem_addr;
7670 
7671     features_requested = firmware_features->features_supported +
7672         le16_to_cpu(firmware_features->num_elements);
7673 
7674     features_requested_iomem_addr = firmware_features_iomem_addr +
7675         (features_requested - (void *)firmware_features);
7676 
7677     memcpy_toio(features_requested_iomem_addr, features_requested,
7678         le16_to_cpu(firmware_features->num_elements));
7679 
7680     if (pqi_is_firmware_feature_supported(firmware_features,
7681         PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7682         host_max_known_feature_iomem_addr =
7683             features_requested_iomem_addr +
7684             (le16_to_cpu(firmware_features->num_elements) * 2) +
7685             sizeof(__le16);
7686         writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7687             host_max_known_feature_iomem_addr);
7688     }
7689 
7690     return pqi_config_table_update(ctrl_info,
7691         PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7692         PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7693 }
7694 
7695 struct pqi_firmware_feature {
7696     char        *feature_name;
7697     unsigned int    feature_bit;
7698     bool        supported;
7699     bool        enabled;
7700     void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7701         struct pqi_firmware_feature *firmware_feature);
7702 };
7703 
7704 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7705     struct pqi_firmware_feature *firmware_feature)
7706 {
7707     if (!firmware_feature->supported) {
7708         dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7709             firmware_feature->feature_name);
7710         return;
7711     }
7712 
7713     if (firmware_feature->enabled) {
7714         dev_info(&ctrl_info->pci_dev->dev,
7715             "%s enabled\n", firmware_feature->feature_name);
7716         return;
7717     }
7718 
7719     dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7720         firmware_feature->feature_name);
7721 }
7722 
7723 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7724     struct pqi_firmware_feature *firmware_feature)
7725 {
7726     switch (firmware_feature->feature_bit) {
7727     case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7728         ctrl_info->enable_r1_writes = firmware_feature->enabled;
7729         break;
7730     case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7731         ctrl_info->enable_r5_writes = firmware_feature->enabled;
7732         break;
7733     case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7734         ctrl_info->enable_r6_writes = firmware_feature->enabled;
7735         break;
7736     case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7737         ctrl_info->soft_reset_handshake_supported =
7738             firmware_feature->enabled &&
7739             pqi_read_soft_reset_status(ctrl_info);
7740         break;
7741     case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7742         ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7743         break;
7744     case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7745         ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7746         break;
7747     case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7748         ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7749         pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7750         break;
7751     case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7752         ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7753         break;
7754     case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7755         ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7756         break;
7757     }
7758 
7759     pqi_firmware_feature_status(ctrl_info, firmware_feature);
7760 }
7761 
7762 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7763     struct pqi_firmware_feature *firmware_feature)
7764 {
7765     if (firmware_feature->feature_status)
7766         firmware_feature->feature_status(ctrl_info, firmware_feature);
7767 }
7768 
7769 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7770 
7771 static struct pqi_firmware_feature pqi_firmware_features[] = {
7772     {
7773         .feature_name = "Online Firmware Activation",
7774         .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7775         .feature_status = pqi_firmware_feature_status,
7776     },
7777     {
7778         .feature_name = "Serial Management Protocol",
7779         .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7780         .feature_status = pqi_firmware_feature_status,
7781     },
7782     {
7783         .feature_name = "Maximum Known Feature",
7784         .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7785         .feature_status = pqi_firmware_feature_status,
7786     },
7787     {
7788         .feature_name = "RAID 0 Read Bypass",
7789         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7790         .feature_status = pqi_firmware_feature_status,
7791     },
7792     {
7793         .feature_name = "RAID 1 Read Bypass",
7794         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7795         .feature_status = pqi_firmware_feature_status,
7796     },
7797     {
7798         .feature_name = "RAID 5 Read Bypass",
7799         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7800         .feature_status = pqi_firmware_feature_status,
7801     },
7802     {
7803         .feature_name = "RAID 6 Read Bypass",
7804         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7805         .feature_status = pqi_firmware_feature_status,
7806     },
7807     {
7808         .feature_name = "RAID 0 Write Bypass",
7809         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7810         .feature_status = pqi_firmware_feature_status,
7811     },
7812     {
7813         .feature_name = "RAID 1 Write Bypass",
7814         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7815         .feature_status = pqi_ctrl_update_feature_flags,
7816     },
7817     {
7818         .feature_name = "RAID 5 Write Bypass",
7819         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7820         .feature_status = pqi_ctrl_update_feature_flags,
7821     },
7822     {
7823         .feature_name = "RAID 6 Write Bypass",
7824         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7825         .feature_status = pqi_ctrl_update_feature_flags,
7826     },
7827     {
7828         .feature_name = "New Soft Reset Handshake",
7829         .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7830         .feature_status = pqi_ctrl_update_feature_flags,
7831     },
7832     {
7833         .feature_name = "RAID IU Timeout",
7834         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7835         .feature_status = pqi_ctrl_update_feature_flags,
7836     },
7837     {
7838         .feature_name = "TMF IU Timeout",
7839         .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7840         .feature_status = pqi_ctrl_update_feature_flags,
7841     },
7842     {
7843         .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7844         .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7845         .feature_status = pqi_firmware_feature_status,
7846     },
7847     {
7848         .feature_name = "Firmware Triage",
7849         .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7850         .feature_status = pqi_ctrl_update_feature_flags,
7851     },
7852     {
7853         .feature_name = "RPL Extended Formats 4 and 5",
7854         .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7855         .feature_status = pqi_ctrl_update_feature_flags,
7856     },
7857     {
7858         .feature_name = "Multi-LUN Target",
7859         .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
7860         .feature_status = pqi_ctrl_update_feature_flags,
7861     },
7862 };
7863 
7864 static void pqi_process_firmware_features(
7865     struct pqi_config_table_section_info *section_info)
7866 {
7867     int rc;
7868     struct pqi_ctrl_info *ctrl_info;
7869     struct pqi_config_table_firmware_features *firmware_features;
7870     void __iomem *firmware_features_iomem_addr;
7871     unsigned int i;
7872     unsigned int num_features_supported;
7873 
7874     ctrl_info = section_info->ctrl_info;
7875     firmware_features = section_info->section;
7876     firmware_features_iomem_addr = section_info->section_iomem_addr;
7877 
7878     for (i = 0, num_features_supported = 0;
7879         i < ARRAY_SIZE(pqi_firmware_features); i++) {
7880         if (pqi_is_firmware_feature_supported(firmware_features,
7881             pqi_firmware_features[i].feature_bit)) {
7882             pqi_firmware_features[i].supported = true;
7883             num_features_supported++;
7884         } else {
7885             pqi_firmware_feature_update(ctrl_info,
7886                 &pqi_firmware_features[i]);
7887         }
7888     }
7889 
7890     if (num_features_supported == 0)
7891         return;
7892 
7893     for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7894         if (!pqi_firmware_features[i].supported)
7895             continue;
7896         pqi_request_firmware_feature(firmware_features,
7897             pqi_firmware_features[i].feature_bit);
7898     }
7899 
7900     rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7901         firmware_features_iomem_addr);
7902     if (rc) {
7903         dev_err(&ctrl_info->pci_dev->dev,
7904             "failed to enable firmware features in PQI configuration table\n");
7905         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7906             if (!pqi_firmware_features[i].supported)
7907                 continue;
7908             pqi_firmware_feature_update(ctrl_info,
7909                 &pqi_firmware_features[i]);
7910         }
7911         return;
7912     }
7913 
7914     for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7915         if (!pqi_firmware_features[i].supported)
7916             continue;
7917         if (pqi_is_firmware_feature_enabled(firmware_features,
7918             firmware_features_iomem_addr,
7919             pqi_firmware_features[i].feature_bit)) {
7920                 pqi_firmware_features[i].enabled = true;
7921         }
7922         pqi_firmware_feature_update(ctrl_info,
7923             &pqi_firmware_features[i]);
7924     }
7925 }
7926 
7927 static void pqi_init_firmware_features(void)
7928 {
7929     unsigned int i;
7930 
7931     for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7932         pqi_firmware_features[i].supported = false;
7933         pqi_firmware_features[i].enabled = false;
7934     }
7935 }
7936 
7937 static void pqi_process_firmware_features_section(
7938     struct pqi_config_table_section_info *section_info)
7939 {
7940     mutex_lock(&pqi_firmware_features_mutex);
7941     pqi_init_firmware_features();
7942     pqi_process_firmware_features(section_info);
7943     mutex_unlock(&pqi_firmware_features_mutex);
7944 }
7945 
7946 /*
7947  * Reset all controller settings that can be initialized during the processing
7948  * of the PQI Configuration Table.
7949  */
7950 
7951 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7952 {
7953     ctrl_info->heartbeat_counter = NULL;
7954     ctrl_info->soft_reset_status = NULL;
7955     ctrl_info->soft_reset_handshake_supported = false;
7956     ctrl_info->enable_r1_writes = false;
7957     ctrl_info->enable_r5_writes = false;
7958     ctrl_info->enable_r6_writes = false;
7959     ctrl_info->raid_iu_timeout_supported = false;
7960     ctrl_info->tmf_iu_timeout_supported = false;
7961     ctrl_info->firmware_triage_supported = false;
7962     ctrl_info->rpl_extended_format_4_5_supported = false;
7963     ctrl_info->multi_lun_device_supported = false;
7964 }
7965 
7966 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7967 {
7968     u32 table_length;
7969     u32 section_offset;
7970     bool firmware_feature_section_present;
7971     void __iomem *table_iomem_addr;
7972     struct pqi_config_table *config_table;
7973     struct pqi_config_table_section_header *section;
7974     struct pqi_config_table_section_info section_info;
7975     struct pqi_config_table_section_info feature_section_info;
7976 
7977     table_length = ctrl_info->config_table_length;
7978     if (table_length == 0)
7979         return 0;
7980 
7981     config_table = kmalloc(table_length, GFP_KERNEL);
7982     if (!config_table) {
7983         dev_err(&ctrl_info->pci_dev->dev,
7984             "failed to allocate memory for PQI configuration table\n");
7985         return -ENOMEM;
7986     }
7987 
7988     /*
7989      * Copy the config table contents from I/O memory space into the
7990      * temporary buffer.
7991      */
7992     table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7993     memcpy_fromio(config_table, table_iomem_addr, table_length);
7994 
7995     firmware_feature_section_present = false;
7996     section_info.ctrl_info = ctrl_info;
7997     section_offset = get_unaligned_le32(&config_table->first_section_offset);
7998 
7999     while (section_offset) {
8000         section = (void *)config_table + section_offset;
8001 
8002         section_info.section = section;
8003         section_info.section_offset = section_offset;
8004         section_info.section_iomem_addr = table_iomem_addr + section_offset;
8005 
8006         switch (get_unaligned_le16(&section->section_id)) {
8007         case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
8008             firmware_feature_section_present = true;
8009             feature_section_info = section_info;
8010             break;
8011         case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
8012             if (pqi_disable_heartbeat)
8013                 dev_warn(&ctrl_info->pci_dev->dev,
8014                 "heartbeat disabled by module parameter\n");
8015             else
8016                 ctrl_info->heartbeat_counter =
8017                     table_iomem_addr +
8018                     section_offset +
8019                     offsetof(struct pqi_config_table_heartbeat,
8020                         heartbeat_counter);
8021             break;
8022         case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8023             ctrl_info->soft_reset_status =
8024                 table_iomem_addr +
8025                 section_offset +
8026                 offsetof(struct pqi_config_table_soft_reset,
8027                     soft_reset_status);
8028             break;
8029         }
8030 
8031         section_offset = get_unaligned_le16(&section->next_section_offset);
8032     }
8033 
8034     /*
8035      * We process the firmware feature section after all other sections
8036      * have been processed so that the feature bit callbacks can take
8037      * into account the settings configured by other sections.
8038      */
8039     if (firmware_feature_section_present)
8040         pqi_process_firmware_features_section(&feature_section_info);
8041 
8042     kfree(config_table);
8043 
8044     return 0;
8045 }
8046 
8047 /* Switches the controller from PQI mode back into SIS mode. */
8048 
8049 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8050 {
8051     int rc;
8052 
8053     pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8054     rc = pqi_reset(ctrl_info);
8055     if (rc)
8056         return rc;
8057     rc = sis_reenable_sis_mode(ctrl_info);
8058     if (rc) {
8059         dev_err(&ctrl_info->pci_dev->dev,
8060             "re-enabling SIS mode failed with error %d\n", rc);
8061         return rc;
8062     }
8063     pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8064 
8065     return 0;
8066 }
8067 
8068 /*
8069  * If the controller isn't already in SIS mode, this function forces it into
8070  * SIS mode.
8071  */
8072 
8073 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8074 {
8075     if (!sis_is_firmware_running(ctrl_info))
8076         return -ENXIO;
8077 
8078     if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8079         return 0;
8080 
8081     if (sis_is_kernel_up(ctrl_info)) {
8082         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8083         return 0;
8084     }
8085 
8086     return pqi_revert_to_sis_mode(ctrl_info);
8087 }
8088 
8089 static void pqi_perform_lockup_action(void)
8090 {
8091     switch (pqi_lockup_action) {
8092     case PANIC:
8093         panic("FATAL: Smart Family Controller lockup detected");
8094         break;
8095     case REBOOT:
8096         emergency_restart();
8097         break;
8098     case NONE:
8099     default:
8100         break;
8101     }
8102 }
8103 
8104 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8105 {
8106     int rc;
8107     u32 product_id;
8108 
8109     if (reset_devices) {
8110         if (pqi_is_fw_triage_supported(ctrl_info)) {
8111             rc = sis_wait_for_fw_triage_completion(ctrl_info);
8112             if (rc)
8113                 return rc;
8114         }
8115         sis_soft_reset(ctrl_info);
8116         ssleep(PQI_POST_RESET_DELAY_SECS);
8117     } else {
8118         rc = pqi_force_sis_mode(ctrl_info);
8119         if (rc)
8120             return rc;
8121     }
8122 
8123     /*
8124      * Wait until the controller is ready to start accepting SIS
8125      * commands.
8126      */
8127     rc = sis_wait_for_ctrl_ready(ctrl_info);
8128     if (rc) {
8129         if (reset_devices) {
8130             dev_err(&ctrl_info->pci_dev->dev,
8131                 "kdump init failed with error %d\n", rc);
8132             pqi_lockup_action = REBOOT;
8133             pqi_perform_lockup_action();
8134         }
8135         return rc;
8136     }
8137 
8138     /*
8139      * Get the controller properties.  This allows us to determine
8140      * whether or not it supports PQI mode.
8141      */
8142     rc = sis_get_ctrl_properties(ctrl_info);
8143     if (rc) {
8144         dev_err(&ctrl_info->pci_dev->dev,
8145             "error obtaining controller properties\n");
8146         return rc;
8147     }
8148 
8149     rc = sis_get_pqi_capabilities(ctrl_info);
8150     if (rc) {
8151         dev_err(&ctrl_info->pci_dev->dev,
8152             "error obtaining controller capabilities\n");
8153         return rc;
8154     }
8155 
8156     product_id = sis_get_product_id(ctrl_info);
8157     ctrl_info->product_id = (u8)product_id;
8158     ctrl_info->product_revision = (u8)(product_id >> 8);
8159 
8160     if (reset_devices) {
8161         if (ctrl_info->max_outstanding_requests >
8162             PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8163                 ctrl_info->max_outstanding_requests =
8164                     PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8165     } else {
8166         if (ctrl_info->max_outstanding_requests >
8167             PQI_MAX_OUTSTANDING_REQUESTS)
8168                 ctrl_info->max_outstanding_requests =
8169                     PQI_MAX_OUTSTANDING_REQUESTS;
8170     }
8171 
8172     pqi_calculate_io_resources(ctrl_info);
8173 
8174     rc = pqi_alloc_error_buffer(ctrl_info);
8175     if (rc) {
8176         dev_err(&ctrl_info->pci_dev->dev,
8177             "failed to allocate PQI error buffer\n");
8178         return rc;
8179     }
8180 
8181     /*
8182      * If the function we are about to call succeeds, the
8183      * controller will transition from legacy SIS mode
8184      * into PQI mode.
8185      */
8186     rc = sis_init_base_struct_addr(ctrl_info);
8187     if (rc) {
8188         dev_err(&ctrl_info->pci_dev->dev,
8189             "error initializing PQI mode\n");
8190         return rc;
8191     }
8192 
8193     /* Wait for the controller to complete the SIS -> PQI transition. */
8194     rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8195     if (rc) {
8196         dev_err(&ctrl_info->pci_dev->dev,
8197             "transition to PQI mode failed\n");
8198         return rc;
8199     }
8200 
8201     /* From here on, we are running in PQI mode. */
8202     ctrl_info->pqi_mode_enabled = true;
8203     pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8204 
8205     rc = pqi_alloc_admin_queues(ctrl_info);
8206     if (rc) {
8207         dev_err(&ctrl_info->pci_dev->dev,
8208             "failed to allocate admin queues\n");
8209         return rc;
8210     }
8211 
8212     rc = pqi_create_admin_queues(ctrl_info);
8213     if (rc) {
8214         dev_err(&ctrl_info->pci_dev->dev,
8215             "error creating admin queues\n");
8216         return rc;
8217     }
8218 
8219     rc = pqi_report_device_capability(ctrl_info);
8220     if (rc) {
8221         dev_err(&ctrl_info->pci_dev->dev,
8222             "obtaining device capability failed\n");
8223         return rc;
8224     }
8225 
8226     rc = pqi_validate_device_capability(ctrl_info);
8227     if (rc)
8228         return rc;
8229 
8230     pqi_calculate_queue_resources(ctrl_info);
8231 
8232     rc = pqi_enable_msix_interrupts(ctrl_info);
8233     if (rc)
8234         return rc;
8235 
8236     if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8237         ctrl_info->max_msix_vectors =
8238             ctrl_info->num_msix_vectors_enabled;
8239         pqi_calculate_queue_resources(ctrl_info);
8240     }
8241 
8242     rc = pqi_alloc_io_resources(ctrl_info);
8243     if (rc)
8244         return rc;
8245 
8246     rc = pqi_alloc_operational_queues(ctrl_info);
8247     if (rc) {
8248         dev_err(&ctrl_info->pci_dev->dev,
8249             "failed to allocate operational queues\n");
8250         return rc;
8251     }
8252 
8253     pqi_init_operational_queues(ctrl_info);
8254 
8255     rc = pqi_create_queues(ctrl_info);
8256     if (rc)
8257         return rc;
8258 
8259     rc = pqi_request_irqs(ctrl_info);
8260     if (rc)
8261         return rc;
8262 
8263     pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8264 
8265     ctrl_info->controller_online = true;
8266 
8267     rc = pqi_process_config_table(ctrl_info);
8268     if (rc)
8269         return rc;
8270 
8271     pqi_start_heartbeat_timer(ctrl_info);
8272 
8273     if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8274         rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8275         if (rc) { /* Supported features not returned correctly. */
8276             dev_err(&ctrl_info->pci_dev->dev,
8277                 "error obtaining advanced RAID bypass configuration\n");
8278             return rc;
8279         }
8280         ctrl_info->ciss_report_log_flags |=
8281             CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8282     }
8283 
8284     rc = pqi_enable_events(ctrl_info);
8285     if (rc) {
8286         dev_err(&ctrl_info->pci_dev->dev,
8287             "error enabling events\n");
8288         return rc;
8289     }
8290 
8291     /* Register with the SCSI subsystem. */
8292     rc = pqi_register_scsi(ctrl_info);
8293     if (rc)
8294         return rc;
8295 
8296     rc = pqi_get_ctrl_product_details(ctrl_info);
8297     if (rc) {
8298         dev_err(&ctrl_info->pci_dev->dev,
8299             "error obtaining product details\n");
8300         return rc;
8301     }
8302 
8303     rc = pqi_get_ctrl_serial_number(ctrl_info);
8304     if (rc) {
8305         dev_err(&ctrl_info->pci_dev->dev,
8306             "error obtaining ctrl serial number\n");
8307         return rc;
8308     }
8309 
8310     rc = pqi_set_diag_rescan(ctrl_info);
8311     if (rc) {
8312         dev_err(&ctrl_info->pci_dev->dev,
8313             "error enabling multi-lun rescan\n");
8314         return rc;
8315     }
8316 
8317     rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8318     if (rc) {
8319         dev_err(&ctrl_info->pci_dev->dev,
8320             "error updating host wellness\n");
8321         return rc;
8322     }
8323 
8324     pqi_schedule_update_time_worker(ctrl_info);
8325 
8326     pqi_scan_scsi_devices(ctrl_info);
8327 
8328     return 0;
8329 }
8330 
8331 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8332 {
8333     unsigned int i;
8334     struct pqi_admin_queues *admin_queues;
8335     struct pqi_event_queue *event_queue;
8336 
8337     admin_queues = &ctrl_info->admin_queues;
8338     admin_queues->iq_pi_copy = 0;
8339     admin_queues->oq_ci_copy = 0;
8340     writel(0, admin_queues->oq_pi);
8341 
8342     for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8343         ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8344         ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8345         ctrl_info->queue_groups[i].oq_ci_copy = 0;
8346 
8347         writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8348         writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8349         writel(0, ctrl_info->queue_groups[i].oq_pi);
8350     }
8351 
8352     event_queue = &ctrl_info->event_queue;
8353     writel(0, event_queue->oq_pi);
8354     event_queue->oq_ci_copy = 0;
8355 }
8356 
8357 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8358 {
8359     int rc;
8360 
8361     rc = pqi_force_sis_mode(ctrl_info);
8362     if (rc)
8363         return rc;
8364 
8365     /*
8366      * Wait until the controller is ready to start accepting SIS
8367      * commands.
8368      */
8369     rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8370     if (rc)
8371         return rc;
8372 
8373     /*
8374      * Get the controller properties.  This allows us to determine
8375      * whether or not it supports PQI mode.
8376      */
8377     rc = sis_get_ctrl_properties(ctrl_info);
8378     if (rc) {
8379         dev_err(&ctrl_info->pci_dev->dev,
8380             "error obtaining controller properties\n");
8381         return rc;
8382     }
8383 
8384     rc = sis_get_pqi_capabilities(ctrl_info);
8385     if (rc) {
8386         dev_err(&ctrl_info->pci_dev->dev,
8387             "error obtaining controller capabilities\n");
8388         return rc;
8389     }
8390 
8391     /*
8392      * If the function we are about to call succeeds, the
8393      * controller will transition from legacy SIS mode
8394      * into PQI mode.
8395      */
8396     rc = sis_init_base_struct_addr(ctrl_info);
8397     if (rc) {
8398         dev_err(&ctrl_info->pci_dev->dev,
8399             "error initializing PQI mode\n");
8400         return rc;
8401     }
8402 
8403     /* Wait for the controller to complete the SIS -> PQI transition. */
8404     rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8405     if (rc) {
8406         dev_err(&ctrl_info->pci_dev->dev,
8407             "transition to PQI mode failed\n");
8408         return rc;
8409     }
8410 
8411     /* From here on, we are running in PQI mode. */
8412     ctrl_info->pqi_mode_enabled = true;
8413     pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8414 
8415     pqi_reinit_queues(ctrl_info);
8416 
8417     rc = pqi_create_admin_queues(ctrl_info);
8418     if (rc) {
8419         dev_err(&ctrl_info->pci_dev->dev,
8420             "error creating admin queues\n");
8421         return rc;
8422     }
8423 
8424     rc = pqi_create_queues(ctrl_info);
8425     if (rc)
8426         return rc;
8427 
8428     pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8429 
8430     ctrl_info->controller_online = true;
8431     pqi_ctrl_unblock_requests(ctrl_info);
8432 
8433     pqi_ctrl_reset_config(ctrl_info);
8434 
8435     rc = pqi_process_config_table(ctrl_info);
8436     if (rc)
8437         return rc;
8438 
8439     pqi_start_heartbeat_timer(ctrl_info);
8440 
8441     if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8442         rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8443         if (rc) {
8444             dev_err(&ctrl_info->pci_dev->dev,
8445                 "error obtaining advanced RAID bypass configuration\n");
8446             return rc;
8447         }
8448         ctrl_info->ciss_report_log_flags |=
8449             CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8450     }
8451 
8452     rc = pqi_enable_events(ctrl_info);
8453     if (rc) {
8454         dev_err(&ctrl_info->pci_dev->dev,
8455             "error enabling events\n");
8456         return rc;
8457     }
8458 
8459     rc = pqi_get_ctrl_product_details(ctrl_info);
8460     if (rc) {
8461         dev_err(&ctrl_info->pci_dev->dev,
8462             "error obtaining product details\n");
8463         return rc;
8464     }
8465 
8466     rc = pqi_set_diag_rescan(ctrl_info);
8467     if (rc) {
8468         dev_err(&ctrl_info->pci_dev->dev,
8469             "error enabling multi-lun rescan\n");
8470         return rc;
8471     }
8472 
8473     rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8474     if (rc) {
8475         dev_err(&ctrl_info->pci_dev->dev,
8476             "error updating host wellness\n");
8477         return rc;
8478     }
8479 
8480     if (pqi_ofa_in_progress(ctrl_info))
8481         pqi_ctrl_unblock_scan(ctrl_info);
8482 
8483     pqi_scan_scsi_devices(ctrl_info);
8484 
8485     return 0;
8486 }
8487 
8488 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8489 {
8490     int rc;
8491 
8492     rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8493         PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8494 
8495     return pcibios_err_to_errno(rc);
8496 }
8497 
8498 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8499 {
8500     int rc;
8501     u64 mask;
8502 
8503     rc = pci_enable_device(ctrl_info->pci_dev);
8504     if (rc) {
8505         dev_err(&ctrl_info->pci_dev->dev,
8506             "failed to enable PCI device\n");
8507         return rc;
8508     }
8509 
8510     if (sizeof(dma_addr_t) > 4)
8511         mask = DMA_BIT_MASK(64);
8512     else
8513         mask = DMA_BIT_MASK(32);
8514 
8515     rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8516     if (rc) {
8517         dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8518         goto disable_device;
8519     }
8520 
8521     rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8522     if (rc) {
8523         dev_err(&ctrl_info->pci_dev->dev,
8524             "failed to obtain PCI resources\n");
8525         goto disable_device;
8526     }
8527 
8528     ctrl_info->iomem_base = ioremap(pci_resource_start(
8529         ctrl_info->pci_dev, 0),
8530         sizeof(struct pqi_ctrl_registers));
8531     if (!ctrl_info->iomem_base) {
8532         dev_err(&ctrl_info->pci_dev->dev,
8533             "failed to map memory for controller registers\n");
8534         rc = -ENOMEM;
8535         goto release_regions;
8536     }
8537 
8538 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS       0x6
8539 
8540     /* Increase the PCIe completion timeout. */
8541     rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8542         PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8543     if (rc) {
8544         dev_err(&ctrl_info->pci_dev->dev,
8545             "failed to set PCIe completion timeout\n");
8546         goto release_regions;
8547     }
8548 
8549     /* Enable bus mastering. */
8550     pci_set_master(ctrl_info->pci_dev);
8551 
8552     ctrl_info->registers = ctrl_info->iomem_base;
8553     ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8554 
8555     pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8556 
8557     return 0;
8558 
8559 release_regions:
8560     pci_release_regions(ctrl_info->pci_dev);
8561 disable_device:
8562     pci_disable_device(ctrl_info->pci_dev);
8563 
8564     return rc;
8565 }
8566 
8567 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8568 {
8569     iounmap(ctrl_info->iomem_base);
8570     pci_release_regions(ctrl_info->pci_dev);
8571     if (pci_is_enabled(ctrl_info->pci_dev))
8572         pci_disable_device(ctrl_info->pci_dev);
8573     pci_set_drvdata(ctrl_info->pci_dev, NULL);
8574 }
8575 
8576 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8577 {
8578     struct pqi_ctrl_info *ctrl_info;
8579 
8580     ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8581             GFP_KERNEL, numa_node);
8582     if (!ctrl_info)
8583         return NULL;
8584 
8585     mutex_init(&ctrl_info->scan_mutex);
8586     mutex_init(&ctrl_info->lun_reset_mutex);
8587     mutex_init(&ctrl_info->ofa_mutex);
8588 
8589     INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8590     spin_lock_init(&ctrl_info->scsi_device_list_lock);
8591 
8592     INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8593     atomic_set(&ctrl_info->num_interrupts, 0);
8594 
8595     INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8596     INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8597 
8598     timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8599     INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8600 
8601     INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8602     INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8603 
8604     sema_init(&ctrl_info->sync_request_sem,
8605         PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8606     init_waitqueue_head(&ctrl_info->block_requests_wait);
8607 
8608     ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8609     ctrl_info->irq_mode = IRQ_MODE_NONE;
8610     ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8611 
8612     ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8613     ctrl_info->max_transfer_encrypted_sas_sata =
8614         PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8615     ctrl_info->max_transfer_encrypted_nvme =
8616         PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8617     ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8618     ctrl_info->max_write_raid_1_10_2drive = ~0;
8619     ctrl_info->max_write_raid_1_10_3drive = ~0;
8620     ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8621 
8622     return ctrl_info;
8623 }
8624 
8625 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8626 {
8627     kfree(ctrl_info);
8628 }
8629 
8630 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8631 {
8632     pqi_free_irqs(ctrl_info);
8633     pqi_disable_msix_interrupts(ctrl_info);
8634 }
8635 
8636 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8637 {
8638     pqi_free_interrupts(ctrl_info);
8639     if (ctrl_info->queue_memory_base)
8640         dma_free_coherent(&ctrl_info->pci_dev->dev,
8641             ctrl_info->queue_memory_length,
8642             ctrl_info->queue_memory_base,
8643             ctrl_info->queue_memory_base_dma_handle);
8644     if (ctrl_info->admin_queue_memory_base)
8645         dma_free_coherent(&ctrl_info->pci_dev->dev,
8646             ctrl_info->admin_queue_memory_length,
8647             ctrl_info->admin_queue_memory_base,
8648             ctrl_info->admin_queue_memory_base_dma_handle);
8649     pqi_free_all_io_requests(ctrl_info);
8650     if (ctrl_info->error_buffer)
8651         dma_free_coherent(&ctrl_info->pci_dev->dev,
8652             ctrl_info->error_buffer_length,
8653             ctrl_info->error_buffer,
8654             ctrl_info->error_buffer_dma_handle);
8655     if (ctrl_info->iomem_base)
8656         pqi_cleanup_pci_init(ctrl_info);
8657     pqi_free_ctrl_info(ctrl_info);
8658 }
8659 
8660 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8661 {
8662     ctrl_info->controller_online = false;
8663     pqi_stop_heartbeat_timer(ctrl_info);
8664     pqi_ctrl_block_requests(ctrl_info);
8665     pqi_cancel_rescan_worker(ctrl_info);
8666     pqi_cancel_update_time_worker(ctrl_info);
8667     if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8668         pqi_fail_all_outstanding_requests(ctrl_info);
8669         ctrl_info->pqi_mode_enabled = false;
8670     }
8671     pqi_unregister_scsi(ctrl_info);
8672     if (ctrl_info->pqi_mode_enabled)
8673         pqi_revert_to_sis_mode(ctrl_info);
8674     pqi_free_ctrl_resources(ctrl_info);
8675 }
8676 
8677 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8678 {
8679     pqi_ctrl_block_scan(ctrl_info);
8680     pqi_scsi_block_requests(ctrl_info);
8681     pqi_ctrl_block_device_reset(ctrl_info);
8682     pqi_ctrl_block_requests(ctrl_info);
8683     pqi_ctrl_wait_until_quiesced(ctrl_info);
8684     pqi_stop_heartbeat_timer(ctrl_info);
8685 }
8686 
8687 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8688 {
8689     pqi_start_heartbeat_timer(ctrl_info);
8690     pqi_ctrl_unblock_requests(ctrl_info);
8691     pqi_ctrl_unblock_device_reset(ctrl_info);
8692     pqi_scsi_unblock_requests(ctrl_info);
8693     pqi_ctrl_unblock_scan(ctrl_info);
8694 }
8695 
8696 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8697 {
8698     int i;
8699     u32 sg_count;
8700     struct device *dev;
8701     struct pqi_ofa_memory *ofap;
8702     struct pqi_sg_descriptor *mem_descriptor;
8703     dma_addr_t dma_handle;
8704 
8705     ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8706 
8707     sg_count = DIV_ROUND_UP(total_size, chunk_size);
8708     if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8709         goto out;
8710 
8711     ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8712     if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8713         goto out;
8714 
8715     dev = &ctrl_info->pci_dev->dev;
8716 
8717     for (i = 0; i < sg_count; i++) {
8718         ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8719             dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8720         if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8721             goto out_free_chunks;
8722         mem_descriptor = &ofap->sg_descriptor[i];
8723         put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8724         put_unaligned_le32(chunk_size, &mem_descriptor->length);
8725     }
8726 
8727     put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8728     put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8729     put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8730 
8731     return 0;
8732 
8733 out_free_chunks:
8734     while (--i >= 0) {
8735         mem_descriptor = &ofap->sg_descriptor[i];
8736         dma_free_coherent(dev, chunk_size,
8737             ctrl_info->pqi_ofa_chunk_virt_addr[i],
8738             get_unaligned_le64(&mem_descriptor->address));
8739     }
8740     kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8741 
8742 out:
8743     return -ENOMEM;
8744 }
8745 
8746 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8747 {
8748     u32 total_size;
8749     u32 chunk_size;
8750     u32 min_chunk_size;
8751 
8752     if (ctrl_info->ofa_bytes_requested == 0)
8753         return 0;
8754 
8755     total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8756     min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8757     min_chunk_size = PAGE_ALIGN(min_chunk_size);
8758 
8759     for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8760         if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8761             return 0;
8762         chunk_size /= 2;
8763         chunk_size = PAGE_ALIGN(chunk_size);
8764     }
8765 
8766     return -ENOMEM;
8767 }
8768 
8769 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8770 {
8771     struct device *dev;
8772     struct pqi_ofa_memory *ofap;
8773 
8774     dev = &ctrl_info->pci_dev->dev;
8775 
8776     ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8777         &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8778     if (!ofap)
8779         return;
8780 
8781     ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8782 
8783     if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8784         dev_err(dev,
8785             "failed to allocate host buffer for Online Firmware Activation\n");
8786         dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8787         ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8788         return;
8789     }
8790 
8791     put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8792     memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8793 }
8794 
8795 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8796 {
8797     unsigned int i;
8798     struct device *dev;
8799     struct pqi_ofa_memory *ofap;
8800     struct pqi_sg_descriptor *mem_descriptor;
8801     unsigned int num_memory_descriptors;
8802 
8803     ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8804     if (!ofap)
8805         return;
8806 
8807     dev = &ctrl_info->pci_dev->dev;
8808 
8809     if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8810         goto out;
8811 
8812     mem_descriptor = ofap->sg_descriptor;
8813     num_memory_descriptors =
8814         get_unaligned_le16(&ofap->num_memory_descriptors);
8815 
8816     for (i = 0; i < num_memory_descriptors; i++) {
8817         dma_free_coherent(dev,
8818             get_unaligned_le32(&mem_descriptor[i].length),
8819             ctrl_info->pqi_ofa_chunk_virt_addr[i],
8820             get_unaligned_le64(&mem_descriptor[i].address));
8821     }
8822     kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8823 
8824 out:
8825     dma_free_coherent(dev, sizeof(*ofap), ofap,
8826         ctrl_info->pqi_ofa_mem_dma_handle);
8827     ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8828 }
8829 
8830 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8831 {
8832     u32 buffer_length;
8833     struct pqi_vendor_general_request request;
8834     struct pqi_ofa_memory *ofap;
8835 
8836     memset(&request, 0, sizeof(request));
8837 
8838     request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8839     put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8840         &request.header.iu_length);
8841     put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8842         &request.function_code);
8843 
8844     ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8845 
8846     if (ofap) {
8847         buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8848             get_unaligned_le16(&ofap->num_memory_descriptors) *
8849             sizeof(struct pqi_sg_descriptor);
8850 
8851         put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8852             &request.data.ofa_memory_allocation.buffer_address);
8853         put_unaligned_le32(buffer_length,
8854             &request.data.ofa_memory_allocation.buffer_length);
8855     }
8856 
8857     return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8858 }
8859 
8860 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8861 {
8862     ssleep(delay_secs);
8863 
8864     return pqi_ctrl_init_resume(ctrl_info);
8865 }
8866 
8867 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8868     .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8869     .status = SAM_STAT_CHECK_CONDITION,
8870 };
8871 
8872 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8873 {
8874     unsigned int i;
8875     struct pqi_io_request *io_request;
8876     struct scsi_cmnd *scmd;
8877     struct scsi_device *sdev;
8878 
8879     for (i = 0; i < ctrl_info->max_io_slots; i++) {
8880         io_request = &ctrl_info->io_request_pool[i];
8881         if (atomic_read(&io_request->refcount) == 0)
8882             continue;
8883 
8884         scmd = io_request->scmd;
8885         if (scmd) {
8886             sdev = scmd->device;
8887             if (!sdev || !scsi_device_online(sdev)) {
8888                 pqi_free_io_request(io_request);
8889                 continue;
8890             } else {
8891                 set_host_byte(scmd, DID_NO_CONNECT);
8892             }
8893         } else {
8894             io_request->status = -ENXIO;
8895             io_request->error_info =
8896                 &pqi_ctrl_offline_raid_error_info;
8897         }
8898 
8899         io_request->io_complete_callback(io_request,
8900             io_request->context);
8901     }
8902 }
8903 
8904 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8905 {
8906     pqi_perform_lockup_action();
8907     pqi_stop_heartbeat_timer(ctrl_info);
8908     pqi_free_interrupts(ctrl_info);
8909     pqi_cancel_rescan_worker(ctrl_info);
8910     pqi_cancel_update_time_worker(ctrl_info);
8911     pqi_ctrl_wait_until_quiesced(ctrl_info);
8912     pqi_fail_all_outstanding_requests(ctrl_info);
8913     pqi_ctrl_unblock_requests(ctrl_info);
8914 }
8915 
8916 static void pqi_ctrl_offline_worker(struct work_struct *work)
8917 {
8918     struct pqi_ctrl_info *ctrl_info;
8919 
8920     ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8921     pqi_take_ctrl_offline_deferred(ctrl_info);
8922 }
8923 
8924 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8925     enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8926 {
8927     if (!ctrl_info->controller_online)
8928         return;
8929 
8930     ctrl_info->controller_online = false;
8931     ctrl_info->pqi_mode_enabled = false;
8932     pqi_ctrl_block_requests(ctrl_info);
8933     if (!pqi_disable_ctrl_shutdown)
8934         sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8935     pci_disable_device(ctrl_info->pci_dev);
8936     dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8937     schedule_work(&ctrl_info->ctrl_offline_work);
8938 }
8939 
8940 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8941     const struct pci_device_id *id)
8942 {
8943     char *ctrl_description;
8944 
8945     if (id->driver_data)
8946         ctrl_description = (char *)id->driver_data;
8947     else
8948         ctrl_description = "Microchip Smart Family Controller";
8949 
8950     dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8951 }
8952 
8953 static int pqi_pci_probe(struct pci_dev *pci_dev,
8954     const struct pci_device_id *id)
8955 {
8956     int rc;
8957     int node;
8958     struct pqi_ctrl_info *ctrl_info;
8959 
8960     pqi_print_ctrl_info(pci_dev, id);
8961 
8962     if (pqi_disable_device_id_wildcards &&
8963         id->subvendor == PCI_ANY_ID &&
8964         id->subdevice == PCI_ANY_ID) {
8965         dev_warn(&pci_dev->dev,
8966             "controller not probed because device ID wildcards are disabled\n");
8967         return -ENODEV;
8968     }
8969 
8970     if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8971         dev_warn(&pci_dev->dev,
8972             "controller device ID matched using wildcards\n");
8973 
8974     node = dev_to_node(&pci_dev->dev);
8975     if (node == NUMA_NO_NODE) {
8976         node = cpu_to_node(0);
8977         if (node == NUMA_NO_NODE)
8978             node = 0;
8979         set_dev_node(&pci_dev->dev, node);
8980     }
8981 
8982     ctrl_info = pqi_alloc_ctrl_info(node);
8983     if (!ctrl_info) {
8984         dev_err(&pci_dev->dev,
8985             "failed to allocate controller info block\n");
8986         return -ENOMEM;
8987     }
8988 
8989     ctrl_info->pci_dev = pci_dev;
8990 
8991     rc = pqi_pci_init(ctrl_info);
8992     if (rc)
8993         goto error;
8994 
8995     rc = pqi_ctrl_init(ctrl_info);
8996     if (rc)
8997         goto error;
8998 
8999     return 0;
9000 
9001 error:
9002     pqi_remove_ctrl(ctrl_info);
9003 
9004     return rc;
9005 }
9006 
9007 static void pqi_pci_remove(struct pci_dev *pci_dev)
9008 {
9009     struct pqi_ctrl_info *ctrl_info;
9010     u16 vendor_id;
9011 
9012     ctrl_info = pci_get_drvdata(pci_dev);
9013     if (!ctrl_info)
9014         return;
9015 
9016     pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9017     if (vendor_id == 0xffff)
9018         ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9019     else
9020         ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9021 
9022     pqi_remove_ctrl(ctrl_info);
9023 }
9024 
9025 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9026 {
9027     unsigned int i;
9028     struct pqi_io_request *io_request;
9029     struct scsi_cmnd *scmd;
9030 
9031     for (i = 0; i < ctrl_info->max_io_slots; i++) {
9032         io_request = &ctrl_info->io_request_pool[i];
9033         if (atomic_read(&io_request->refcount) == 0)
9034             continue;
9035         scmd = io_request->scmd;
9036         WARN_ON(scmd != NULL); /* IO command from SML */
9037         WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9038     }
9039 }
9040 
9041 static void pqi_shutdown(struct pci_dev *pci_dev)
9042 {
9043     int rc;
9044     struct pqi_ctrl_info *ctrl_info;
9045     enum bmic_flush_cache_shutdown_event shutdown_event;
9046 
9047     ctrl_info = pci_get_drvdata(pci_dev);
9048     if (!ctrl_info) {
9049         dev_err(&pci_dev->dev,
9050             "cache could not be flushed\n");
9051         return;
9052     }
9053 
9054     pqi_wait_until_ofa_finished(ctrl_info);
9055 
9056     pqi_scsi_block_requests(ctrl_info);
9057     pqi_ctrl_block_device_reset(ctrl_info);
9058     pqi_ctrl_block_requests(ctrl_info);
9059     pqi_ctrl_wait_until_quiesced(ctrl_info);
9060 
9061     if (system_state == SYSTEM_RESTART)
9062         shutdown_event = RESTART;
9063     else
9064         shutdown_event = SHUTDOWN;
9065 
9066     /*
9067      * Write all data in the controller's battery-backed cache to
9068      * storage.
9069      */
9070     rc = pqi_flush_cache(ctrl_info, shutdown_event);
9071     if (rc)
9072         dev_err(&pci_dev->dev,
9073             "unable to flush controller cache\n");
9074 
9075     pqi_crash_if_pending_command(ctrl_info);
9076     pqi_reset(ctrl_info);
9077 }
9078 
9079 static void pqi_process_lockup_action_param(void)
9080 {
9081     unsigned int i;
9082 
9083     if (!pqi_lockup_action_param)
9084         return;
9085 
9086     for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9087         if (strcmp(pqi_lockup_action_param,
9088             pqi_lockup_actions[i].name) == 0) {
9089             pqi_lockup_action = pqi_lockup_actions[i].action;
9090             return;
9091         }
9092     }
9093 
9094     pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9095         DRIVER_NAME_SHORT, pqi_lockup_action_param);
9096 }
9097 
9098 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS       30
9099 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS       (30 * 60)
9100 
9101 static void pqi_process_ctrl_ready_timeout_param(void)
9102 {
9103     if (pqi_ctrl_ready_timeout_secs == 0)
9104         return;
9105 
9106     if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9107         pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9108             DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9109         pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9110     } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9111         pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9112             DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9113         pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9114     }
9115 
9116     sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9117 }
9118 
9119 static void pqi_process_module_params(void)
9120 {
9121     pqi_process_lockup_action_param();
9122     pqi_process_ctrl_ready_timeout_param();
9123 }
9124 
9125 #if defined(CONFIG_PM)
9126 
9127 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9128 {
9129     if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9130         return RESTART;
9131 
9132     return SUSPEND;
9133 }
9134 
9135 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9136 {
9137     struct pci_dev *pci_dev;
9138     struct pqi_ctrl_info *ctrl_info;
9139 
9140     pci_dev = to_pci_dev(dev);
9141     ctrl_info = pci_get_drvdata(pci_dev);
9142 
9143     pqi_wait_until_ofa_finished(ctrl_info);
9144 
9145     pqi_ctrl_block_scan(ctrl_info);
9146     pqi_scsi_block_requests(ctrl_info);
9147     pqi_ctrl_block_device_reset(ctrl_info);
9148     pqi_ctrl_block_requests(ctrl_info);
9149     pqi_ctrl_wait_until_quiesced(ctrl_info);
9150 
9151     if (suspend) {
9152         enum bmic_flush_cache_shutdown_event shutdown_event;
9153 
9154         shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9155         pqi_flush_cache(ctrl_info, shutdown_event);
9156     }
9157 
9158     pqi_stop_heartbeat_timer(ctrl_info);
9159     pqi_crash_if_pending_command(ctrl_info);
9160     pqi_free_irqs(ctrl_info);
9161 
9162     ctrl_info->controller_online = false;
9163     ctrl_info->pqi_mode_enabled = false;
9164 
9165     return 0;
9166 }
9167 
9168 static __maybe_unused int pqi_suspend(struct device *dev)
9169 {
9170     return pqi_suspend_or_freeze(dev, true);
9171 }
9172 
9173 static int pqi_resume_or_restore(struct device *dev)
9174 {
9175     int rc;
9176     struct pci_dev *pci_dev;
9177     struct pqi_ctrl_info *ctrl_info;
9178 
9179     pci_dev = to_pci_dev(dev);
9180     ctrl_info = pci_get_drvdata(pci_dev);
9181 
9182     rc = pqi_request_irqs(ctrl_info);
9183     if (rc)
9184         return rc;
9185 
9186     pqi_ctrl_unblock_device_reset(ctrl_info);
9187     pqi_ctrl_unblock_requests(ctrl_info);
9188     pqi_scsi_unblock_requests(ctrl_info);
9189     pqi_ctrl_unblock_scan(ctrl_info);
9190 
9191     ssleep(PQI_POST_RESET_DELAY_SECS);
9192 
9193     return pqi_ctrl_init_resume(ctrl_info);
9194 }
9195 
9196 static int pqi_freeze(struct device *dev)
9197 {
9198     return pqi_suspend_or_freeze(dev, false);
9199 }
9200 
9201 static int pqi_thaw(struct device *dev)
9202 {
9203     int rc;
9204     struct pci_dev *pci_dev;
9205     struct pqi_ctrl_info *ctrl_info;
9206 
9207     pci_dev = to_pci_dev(dev);
9208     ctrl_info = pci_get_drvdata(pci_dev);
9209 
9210     rc = pqi_request_irqs(ctrl_info);
9211     if (rc)
9212         return rc;
9213 
9214     ctrl_info->controller_online = true;
9215     ctrl_info->pqi_mode_enabled = true;
9216 
9217     pqi_ctrl_unblock_device_reset(ctrl_info);
9218     pqi_ctrl_unblock_requests(ctrl_info);
9219     pqi_scsi_unblock_requests(ctrl_info);
9220     pqi_ctrl_unblock_scan(ctrl_info);
9221 
9222     return 0;
9223 }
9224 
9225 static int pqi_poweroff(struct device *dev)
9226 {
9227     struct pci_dev *pci_dev;
9228     struct pqi_ctrl_info *ctrl_info;
9229     enum bmic_flush_cache_shutdown_event shutdown_event;
9230 
9231     pci_dev = to_pci_dev(dev);
9232     ctrl_info = pci_get_drvdata(pci_dev);
9233 
9234     shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9235     pqi_flush_cache(ctrl_info, shutdown_event);
9236 
9237     return 0;
9238 }
9239 
9240 static const struct dev_pm_ops pqi_pm_ops = {
9241     .suspend = pqi_suspend,
9242     .resume = pqi_resume_or_restore,
9243     .freeze = pqi_freeze,
9244     .thaw = pqi_thaw,
9245     .poweroff = pqi_poweroff,
9246     .restore = pqi_resume_or_restore,
9247 };
9248 
9249 #endif /* CONFIG_PM */
9250 
9251 /* Define the PCI IDs for the controllers that we support. */
9252 static const struct pci_device_id pqi_pci_id_table[] = {
9253     {
9254         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9255                    0x105b, 0x1211)
9256     },
9257     {
9258         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9259                    0x105b, 0x1321)
9260     },
9261     {
9262         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9263                    0x152d, 0x8a22)
9264     },
9265     {
9266         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9267                    0x152d, 0x8a23)
9268     },
9269     {
9270         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9271                    0x152d, 0x8a24)
9272     },
9273     {
9274         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9275                    0x152d, 0x8a36)
9276     },
9277     {
9278         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9279                    0x152d, 0x8a37)
9280     },
9281     {
9282         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9283                    0x193d, 0x1104)
9284     },
9285     {
9286         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9287                    0x193d, 0x1105)
9288     },
9289     {
9290         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9291                    0x193d, 0x1106)
9292     },
9293     {
9294         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9295                    0x193d, 0x1107)
9296     },
9297     {
9298         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9299                    0x193d, 0x1108)
9300     },
9301     {
9302         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9303                    0x193d, 0x1109)
9304     },
9305     {
9306         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9307                    0x193d, 0x8460)
9308     },
9309     {
9310         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9311                    0x193d, 0x8461)
9312     },
9313     {
9314         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9315                    0x193d, 0xc460)
9316     },
9317     {
9318         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9319                    0x193d, 0xc461)
9320     },
9321     {
9322         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9323                    0x193d, 0xf460)
9324     },
9325     {
9326         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9327                    0x193d, 0xf461)
9328     },
9329     {
9330         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9331                    0x1bd4, 0x0045)
9332     },
9333     {
9334         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9335                    0x1bd4, 0x0046)
9336     },
9337     {
9338         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9339                    0x1bd4, 0x0047)
9340     },
9341     {
9342         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9343                    0x1bd4, 0x0048)
9344     },
9345     {
9346         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9347                    0x1bd4, 0x004a)
9348     },
9349     {
9350         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9351                    0x1bd4, 0x004b)
9352     },
9353     {
9354         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9355                    0x1bd4, 0x004c)
9356     },
9357     {
9358         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9359                    0x1bd4, 0x004f)
9360     },
9361     {
9362         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9363                    0x1bd4, 0x0051)
9364     },
9365     {
9366         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9367                    0x1bd4, 0x0052)
9368     },
9369     {
9370         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9371                    0x1bd4, 0x0053)
9372     },
9373     {
9374         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9375                    0x1bd4, 0x0054)
9376     },
9377     {
9378         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9379                    0x1bd4, 0x006b)
9380     },
9381     {
9382         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9383                    0x1bd4, 0x006c)
9384     },
9385     {
9386         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9387                    0x1bd4, 0x006d)
9388     },
9389     {
9390         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9391                    0x1bd4, 0x006f)
9392     },
9393     {
9394         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9395                    0x1bd4, 0x0070)
9396     },
9397     {
9398         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9399                    0x1bd4, 0x0071)
9400     },
9401     {
9402         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9403                    0x1bd4, 0x0072)
9404     },
9405     {
9406         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9407                    0x19e5, 0xd227)
9408     },
9409     {
9410         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9411                    0x19e5, 0xd228)
9412     },
9413     {
9414         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9415                    0x19e5, 0xd229)
9416     },
9417     {
9418         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9419                    0x19e5, 0xd22a)
9420     },
9421     {
9422         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9423                    0x19e5, 0xd22b)
9424     },
9425     {
9426         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9427                    0x19e5, 0xd22c)
9428     },
9429     {
9430         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9431                    PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9432     },
9433     {
9434         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9435                    PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9436     },
9437     {
9438         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9439                    PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9440     },
9441     {
9442         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9443                    PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9444     },
9445     {
9446         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9447                    PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9448     },
9449     {
9450         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9451                    PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9452     },
9453     {
9454         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9455                    PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9456     },
9457     {
9458         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9459                    PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9460     },
9461     {
9462         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9463                    PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9464     },
9465     {
9466         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9467                    PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9468     },
9469     {
9470         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9471                    PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9472     },
9473     {
9474         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9475                    PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9476     },
9477     {
9478         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9479                    PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9480     },
9481     {
9482         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9483                    PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9484     },
9485     {
9486         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9487                    PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9488     },
9489     {
9490         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9491                    PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9492     },
9493     {
9494         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9495                    PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9496     },
9497     {
9498         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9499                    PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9500     },
9501     {
9502         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9503                    PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9504     },
9505     {
9506         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9507                    PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9508     },
9509     {
9510         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9511                    PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9512     },
9513     {
9514         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9515                    PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9516     },
9517     {
9518         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9519                    PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9520     },
9521     {
9522         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9523                    PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9524     },
9525     {
9526         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9527                    PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9528     },
9529     {
9530         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9531                    PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9532     },
9533     {
9534         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9535                    PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9536     },
9537     {
9538         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9539                    PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9540     },
9541     {
9542         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9543                    PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9544     },
9545     {
9546         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9547                    PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9548     },
9549     {
9550         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9551                    PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9552     },
9553     {
9554         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9555                    PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9556     },
9557     {
9558         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9559                    PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9560     },
9561     {
9562         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9563                    PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9564     },
9565     {
9566         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9567                    PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9568     },
9569     {
9570         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9571                    PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9572     },
9573     {
9574         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9575                    PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9576     },
9577     {
9578         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9579                    PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9580     },
9581     {
9582         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9583                    PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9584     },
9585     {
9586         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9587                    PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9588     },
9589     {
9590         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9591                    PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9592     },
9593     {
9594         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9595                    PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9596     },
9597     {
9598         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9599                    PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9600     },
9601     {
9602         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9603                    PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9604     },
9605     {
9606         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9607                    PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9608     },
9609     {
9610         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9611                    PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9612     },
9613     {
9614         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9615                    PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9616     },
9617     {
9618         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9619                    PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9620     },
9621     {
9622         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9623                    PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9624     },
9625     {
9626         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9627                    PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9628     },
9629     {
9630         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9631                    PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9632     },
9633     {
9634         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9635                    PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9636     },
9637     {
9638         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9639                    PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9640     },
9641     {
9642         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9643                    PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9644     },
9645     {
9646         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9647                    PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9648     },
9649     {
9650         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9651                    PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9652     },
9653     {
9654         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9655                    PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9656     },
9657     {
9658         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9659                    PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9660     },
9661     {
9662         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9663                    PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9664     },
9665     {
9666         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9667                    PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9668     },
9669     {
9670         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9671                    PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9672     },
9673     {
9674         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9675                    PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9676     },
9677     {
9678         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9679                    PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9680     },
9681     {
9682         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9683                    PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9684     },
9685     {
9686         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9687                    PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9688     },
9689     {
9690         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9691                    PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9692     },
9693     {
9694         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9695                    PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9696     },
9697     {
9698         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9699                    PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9700     },
9701     {
9702         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9703                    PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9704     },
9705     {
9706         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9707                    PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9708     },
9709     {
9710         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9711                    PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9712     },
9713     {
9714         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9715                    PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9716     },
9717     {
9718         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9719                    PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9720     },
9721     {
9722         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9723                    PCI_VENDOR_ID_ADVANTECH, 0x8312)
9724     },
9725     {
9726         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9727                    PCI_VENDOR_ID_DELL, 0x1fe0)
9728     },
9729     {
9730         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9731                    PCI_VENDOR_ID_HP, 0x0600)
9732     },
9733     {
9734         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9735                    PCI_VENDOR_ID_HP, 0x0601)
9736     },
9737     {
9738         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9739                    PCI_VENDOR_ID_HP, 0x0602)
9740     },
9741     {
9742         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9743                    PCI_VENDOR_ID_HP, 0x0603)
9744     },
9745     {
9746         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9747                    PCI_VENDOR_ID_HP, 0x0609)
9748     },
9749     {
9750         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9751                    PCI_VENDOR_ID_HP, 0x0650)
9752     },
9753     {
9754         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9755                    PCI_VENDOR_ID_HP, 0x0651)
9756     },
9757     {
9758         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9759                    PCI_VENDOR_ID_HP, 0x0652)
9760     },
9761     {
9762         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9763                    PCI_VENDOR_ID_HP, 0x0653)
9764     },
9765     {
9766         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9767                    PCI_VENDOR_ID_HP, 0x0654)
9768     },
9769     {
9770         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9771                    PCI_VENDOR_ID_HP, 0x0655)
9772     },
9773     {
9774         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9775                    PCI_VENDOR_ID_HP, 0x0700)
9776     },
9777     {
9778         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9779                    PCI_VENDOR_ID_HP, 0x0701)
9780     },
9781     {
9782         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9783                    PCI_VENDOR_ID_HP, 0x1001)
9784     },
9785     {
9786         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9787                    PCI_VENDOR_ID_HP, 0x1002)
9788     },
9789     {
9790         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9791                    PCI_VENDOR_ID_HP, 0x1100)
9792     },
9793     {
9794         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9795                    PCI_VENDOR_ID_HP, 0x1101)
9796     },
9797     {
9798         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9799                    0x1590, 0x0294)
9800     },
9801     {
9802         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9803                    0x1590, 0x02db)
9804     },
9805     {
9806         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9807                    0x1590, 0x02dc)
9808     },
9809     {
9810         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9811                    0x1590, 0x032e)
9812     },
9813     {
9814         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9815                    0x1590, 0x036f)
9816     },
9817     {
9818         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9819                    0x1590, 0x0381)
9820     },
9821     {
9822         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9823                    0x1590, 0x0382)
9824     },
9825     {
9826         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9827                    0x1590, 0x0383)
9828     },
9829     {
9830         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9831                    0x1d8d, 0x0800)
9832     },
9833     {
9834         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9835                    0x1d8d, 0x0908)
9836     },
9837     {
9838         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9839                    0x1d8d, 0x0806)
9840     },
9841     {
9842         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9843                    0x1d8d, 0x0916)
9844     },
9845     {
9846         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9847                    PCI_VENDOR_ID_GIGABYTE, 0x1000)
9848     },
9849     {
9850         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9851                    0x1dfc, 0x3161)
9852     },
9853     {
9854         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9855                    0x1f0c, 0x3161)
9856     },
9857     {
9858         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9859                    0x1cf2, 0x5445)
9860     },
9861     {
9862         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9863                    0x1cf2, 0x5446)
9864     },
9865     {
9866         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9867                    0x1cf2, 0x5447)
9868     },
9869     {
9870         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9871                    0x1cf2, 0x5449)
9872     },
9873     {
9874         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9875                    0x1cf2, 0x544a)
9876     },
9877     {
9878         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9879                    0x1cf2, 0x544b)
9880     },
9881     {
9882         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9883                    0x1cf2, 0x544d)
9884     },
9885     {
9886         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9887                    0x1cf2, 0x544e)
9888     },
9889     {
9890         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9891                    0x1cf2, 0x544f)
9892     },
9893     {
9894         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9895                    0x1cf2, 0x0b27)
9896     },
9897     {
9898         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9899                    0x1cf2, 0x0b29)
9900     },
9901     {
9902         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9903                    0x1cf2, 0x0b45)
9904     },
9905     {
9906         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9907                    0x1cc4, 0x0101)
9908     },
9909     {
9910         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9911                    0x1cc4, 0x0201)
9912     },
9913     {
9914         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9915                    PCI_VENDOR_ID_LENOVO, 0x0220)
9916     },
9917     {
9918         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9919                    PCI_VENDOR_ID_LENOVO, 0x0221)
9920     },
9921     {
9922         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9923                    PCI_VENDOR_ID_LENOVO, 0x0520)
9924     },
9925     {
9926         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9927                    PCI_VENDOR_ID_LENOVO, 0x0522)
9928     },
9929     {
9930         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9931                    PCI_VENDOR_ID_LENOVO, 0x0620)
9932     },
9933     {
9934         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9935                    PCI_VENDOR_ID_LENOVO, 0x0621)
9936     },
9937     {
9938         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9939                    PCI_VENDOR_ID_LENOVO, 0x0622)
9940     },
9941     {
9942         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9943                    PCI_VENDOR_ID_LENOVO, 0x0623)
9944     },
9945     {
9946         PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9947                    PCI_ANY_ID, PCI_ANY_ID)
9948     },
9949     { 0 }
9950 };
9951 
9952 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9953 
9954 static struct pci_driver pqi_pci_driver = {
9955     .name = DRIVER_NAME_SHORT,
9956     .id_table = pqi_pci_id_table,
9957     .probe = pqi_pci_probe,
9958     .remove = pqi_pci_remove,
9959     .shutdown = pqi_shutdown,
9960 #if defined(CONFIG_PM)
9961     .driver = {
9962         .pm = &pqi_pm_ops
9963     },
9964 #endif
9965 };
9966 
9967 static int __init pqi_init(void)
9968 {
9969     int rc;
9970 
9971     pr_info(DRIVER_NAME "\n");
9972     pqi_verify_structures();
9973     sis_verify_structures();
9974 
9975     pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9976     if (!pqi_sas_transport_template)
9977         return -ENODEV;
9978 
9979     pqi_process_module_params();
9980 
9981     rc = pci_register_driver(&pqi_pci_driver);
9982     if (rc)
9983         sas_release_transport(pqi_sas_transport_template);
9984 
9985     return rc;
9986 }
9987 
9988 static void __exit pqi_cleanup(void)
9989 {
9990     pci_unregister_driver(&pqi_pci_driver);
9991     sas_release_transport(pqi_sas_transport_template);
9992 }
9993 
9994 module_init(pqi_init);
9995 module_exit(pqi_cleanup);
9996 
9997 static void pqi_verify_structures(void)
9998 {
9999     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10000         sis_host_to_ctrl_doorbell) != 0x20);
10001     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10002         sis_interrupt_mask) != 0x34);
10003     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10004         sis_ctrl_to_host_doorbell) != 0x9c);
10005     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10006         sis_ctrl_to_host_doorbell_clear) != 0xa0);
10007     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10008         sis_driver_scratch) != 0xb0);
10009     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10010         sis_product_identifier) != 0xb4);
10011     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10012         sis_firmware_status) != 0xbc);
10013     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10014         sis_ctrl_shutdown_reason_code) != 0xcc);
10015     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10016         sis_mailbox) != 0x1000);
10017     BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10018         pqi_registers) != 0x4000);
10019 
10020     BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10021         iu_type) != 0x0);
10022     BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10023         iu_length) != 0x2);
10024     BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10025         response_queue_id) != 0x4);
10026     BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10027         driver_flags) != 0x6);
10028     BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10029 
10030     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10031         status) != 0x0);
10032     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10033         service_response) != 0x1);
10034     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10035         data_present) != 0x2);
10036     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10037         reserved) != 0x3);
10038     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10039         residual_count) != 0x4);
10040     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10041         data_length) != 0x8);
10042     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10043         reserved1) != 0xa);
10044     BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10045         data) != 0xc);
10046     BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10047 
10048     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10049         data_in_result) != 0x0);
10050     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10051         data_out_result) != 0x1);
10052     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10053         reserved) != 0x2);
10054     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10055         status) != 0x5);
10056     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10057         status_qualifier) != 0x6);
10058     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10059         sense_data_length) != 0x8);
10060     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10061         response_data_length) != 0xa);
10062     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10063         data_in_transferred) != 0xc);
10064     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10065         data_out_transferred) != 0x10);
10066     BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10067         data) != 0x14);
10068     BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10069 
10070     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10071         signature) != 0x0);
10072     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10073         function_and_status_code) != 0x8);
10074     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10075         max_admin_iq_elements) != 0x10);
10076     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10077         max_admin_oq_elements) != 0x11);
10078     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10079         admin_iq_element_length) != 0x12);
10080     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10081         admin_oq_element_length) != 0x13);
10082     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10083         max_reset_timeout) != 0x14);
10084     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10085         legacy_intx_status) != 0x18);
10086     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10087         legacy_intx_mask_set) != 0x1c);
10088     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10089         legacy_intx_mask_clear) != 0x20);
10090     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10091         device_status) != 0x40);
10092     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10093         admin_iq_pi_offset) != 0x48);
10094     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10095         admin_oq_ci_offset) != 0x50);
10096     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10097         admin_iq_element_array_addr) != 0x58);
10098     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10099         admin_oq_element_array_addr) != 0x60);
10100     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10101         admin_iq_ci_addr) != 0x68);
10102     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10103         admin_oq_pi_addr) != 0x70);
10104     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10105         admin_iq_num_elements) != 0x78);
10106     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10107         admin_oq_num_elements) != 0x79);
10108     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10109         admin_queue_int_msg_num) != 0x7a);
10110     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10111         device_error) != 0x80);
10112     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10113         error_details) != 0x88);
10114     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10115         device_reset) != 0x90);
10116     BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10117         power_action) != 0x94);
10118     BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10119 
10120     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10121         header.iu_type) != 0);
10122     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10123         header.iu_length) != 2);
10124     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10125         header.driver_flags) != 6);
10126     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10127         request_id) != 8);
10128     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10129         function_code) != 10);
10130     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10131         data.report_device_capability.buffer_length) != 44);
10132     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10133         data.report_device_capability.sg_descriptor) != 48);
10134     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10135         data.create_operational_iq.queue_id) != 12);
10136     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10137         data.create_operational_iq.element_array_addr) != 16);
10138     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10139         data.create_operational_iq.ci_addr) != 24);
10140     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10141         data.create_operational_iq.num_elements) != 32);
10142     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10143         data.create_operational_iq.element_length) != 34);
10144     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10145         data.create_operational_iq.queue_protocol) != 36);
10146     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10147         data.create_operational_oq.queue_id) != 12);
10148     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10149         data.create_operational_oq.element_array_addr) != 16);
10150     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10151         data.create_operational_oq.pi_addr) != 24);
10152     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10153         data.create_operational_oq.num_elements) != 32);
10154     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10155         data.create_operational_oq.element_length) != 34);
10156     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10157         data.create_operational_oq.queue_protocol) != 36);
10158     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10159         data.create_operational_oq.int_msg_num) != 40);
10160     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10161         data.create_operational_oq.coalescing_count) != 42);
10162     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10163         data.create_operational_oq.min_coalescing_time) != 44);
10164     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10165         data.create_operational_oq.max_coalescing_time) != 48);
10166     BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10167         data.delete_operational_queue.queue_id) != 12);
10168     BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10169     BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10170         data.create_operational_iq) != 64 - 11);
10171     BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10172         data.create_operational_oq) != 64 - 11);
10173     BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10174         data.delete_operational_queue) != 64 - 11);
10175 
10176     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10177         header.iu_type) != 0);
10178     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10179         header.iu_length) != 2);
10180     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10181         header.driver_flags) != 6);
10182     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10183         request_id) != 8);
10184     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10185         function_code) != 10);
10186     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10187         status) != 11);
10188     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10189         data.create_operational_iq.status_descriptor) != 12);
10190     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10191         data.create_operational_iq.iq_pi_offset) != 16);
10192     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10193         data.create_operational_oq.status_descriptor) != 12);
10194     BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10195         data.create_operational_oq.oq_ci_offset) != 16);
10196     BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10197 
10198     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10199         header.iu_type) != 0);
10200     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10201         header.iu_length) != 2);
10202     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10203         header.response_queue_id) != 4);
10204     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10205         header.driver_flags) != 6);
10206     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10207         request_id) != 8);
10208     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10209         nexus_id) != 10);
10210     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10211         buffer_length) != 12);
10212     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10213         lun_number) != 16);
10214     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10215         protocol_specific) != 24);
10216     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10217         error_index) != 27);
10218     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10219         cdb) != 32);
10220     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10221         timeout) != 60);
10222     BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10223         sg_descriptors) != 64);
10224     BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10225         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10226 
10227     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10228         header.iu_type) != 0);
10229     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10230         header.iu_length) != 2);
10231     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10232         header.response_queue_id) != 4);
10233     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10234         header.driver_flags) != 6);
10235     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10236         request_id) != 8);
10237     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10238         nexus_id) != 12);
10239     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10240         buffer_length) != 16);
10241     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10242         data_encryption_key_index) != 22);
10243     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10244         encrypt_tweak_lower) != 24);
10245     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10246         encrypt_tweak_upper) != 28);
10247     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10248         cdb) != 32);
10249     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10250         error_index) != 48);
10251     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10252         num_sg_descriptors) != 50);
10253     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10254         cdb_length) != 51);
10255     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10256         lun_number) != 52);
10257     BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10258         sg_descriptors) != 64);
10259     BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10260         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10261 
10262     BUILD_BUG_ON(offsetof(struct pqi_io_response,
10263         header.iu_type) != 0);
10264     BUILD_BUG_ON(offsetof(struct pqi_io_response,
10265         header.iu_length) != 2);
10266     BUILD_BUG_ON(offsetof(struct pqi_io_response,
10267         request_id) != 8);
10268     BUILD_BUG_ON(offsetof(struct pqi_io_response,
10269         error_index) != 10);
10270 
10271     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10272         header.iu_type) != 0);
10273     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10274         header.iu_length) != 2);
10275     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10276         header.response_queue_id) != 4);
10277     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10278         request_id) != 8);
10279     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10280         data.report_event_configuration.buffer_length) != 12);
10281     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10282         data.report_event_configuration.sg_descriptors) != 16);
10283     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10284         data.set_event_configuration.global_event_oq_id) != 10);
10285     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10286         data.set_event_configuration.buffer_length) != 12);
10287     BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10288         data.set_event_configuration.sg_descriptors) != 16);
10289 
10290     BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10291         max_inbound_iu_length) != 6);
10292     BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10293         max_outbound_iu_length) != 14);
10294     BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10295 
10296     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10297         data_length) != 0);
10298     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10299         iq_arbitration_priority_support_bitmask) != 8);
10300     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10301         maximum_aw_a) != 9);
10302     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10303         maximum_aw_b) != 10);
10304     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10305         maximum_aw_c) != 11);
10306     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10307         max_inbound_queues) != 16);
10308     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10309         max_elements_per_iq) != 18);
10310     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10311         max_iq_element_length) != 24);
10312     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10313         min_iq_element_length) != 26);
10314     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10315         max_outbound_queues) != 30);
10316     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10317         max_elements_per_oq) != 32);
10318     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10319         intr_coalescing_time_granularity) != 34);
10320     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10321         max_oq_element_length) != 36);
10322     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10323         min_oq_element_length) != 38);
10324     BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10325         iu_layer_descriptors) != 64);
10326     BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10327 
10328     BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10329         event_type) != 0);
10330     BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10331         oq_id) != 2);
10332     BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10333 
10334     BUILD_BUG_ON(offsetof(struct pqi_event_config,
10335         num_event_descriptors) != 2);
10336     BUILD_BUG_ON(offsetof(struct pqi_event_config,
10337         descriptors) != 4);
10338 
10339     BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10340         ARRAY_SIZE(pqi_supported_event_types));
10341 
10342     BUILD_BUG_ON(offsetof(struct pqi_event_response,
10343         header.iu_type) != 0);
10344     BUILD_BUG_ON(offsetof(struct pqi_event_response,
10345         header.iu_length) != 2);
10346     BUILD_BUG_ON(offsetof(struct pqi_event_response,
10347         event_type) != 8);
10348     BUILD_BUG_ON(offsetof(struct pqi_event_response,
10349         event_id) != 10);
10350     BUILD_BUG_ON(offsetof(struct pqi_event_response,
10351         additional_event_id) != 12);
10352     BUILD_BUG_ON(offsetof(struct pqi_event_response,
10353         data) != 16);
10354     BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10355 
10356     BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10357         header.iu_type) != 0);
10358     BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10359         header.iu_length) != 2);
10360     BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10361         event_type) != 8);
10362     BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10363         event_id) != 10);
10364     BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10365         additional_event_id) != 12);
10366     BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10367 
10368     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10369         header.iu_type) != 0);
10370     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10371         header.iu_length) != 2);
10372     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10373         request_id) != 8);
10374     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10375         nexus_id) != 10);
10376     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10377         timeout) != 14);
10378     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10379         lun_number) != 16);
10380     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10381         protocol_specific) != 24);
10382     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10383         outbound_queue_id_to_manage) != 26);
10384     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10385         request_id_to_manage) != 28);
10386     BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10387         task_management_function) != 30);
10388     BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10389 
10390     BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10391         header.iu_type) != 0);
10392     BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10393         header.iu_length) != 2);
10394     BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10395         request_id) != 8);
10396     BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10397         nexus_id) != 10);
10398     BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10399         additional_response_info) != 12);
10400     BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10401         response_code) != 15);
10402     BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10403 
10404     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10405         configured_logical_drive_count) != 0);
10406     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10407         configuration_signature) != 1);
10408     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10409         firmware_version_short) != 5);
10410     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10411         extended_logical_unit_count) != 154);
10412     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10413         firmware_build_number) != 190);
10414     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10415         vendor_id) != 200);
10416     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10417         product_id) != 208);
10418     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10419         extra_controller_flags) != 286);
10420     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10421         controller_mode) != 292);
10422     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10423         spare_part_number) != 293);
10424     BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10425         firmware_version_long) != 325);
10426 
10427     BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10428         phys_bay_in_box) != 115);
10429     BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10430         device_type) != 120);
10431     BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10432         redundant_path_present_map) != 1736);
10433     BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10434         active_path_number) != 1738);
10435     BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10436         alternate_paths_phys_connector) != 1739);
10437     BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10438         alternate_paths_phys_box_on_port) != 1755);
10439     BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10440         current_queue_depth_limit) != 1796);
10441     BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10442 
10443     BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10444     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10445         page_code) != 0);
10446     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10447         subpage_code) != 1);
10448     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10449         buffer_length) != 2);
10450 
10451     BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10452     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10453         page_code) != 0);
10454     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10455         subpage_code) != 1);
10456     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10457         page_length) != 2);
10458 
10459     BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10460         != 18);
10461     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10462         header) != 0);
10463     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10464         firmware_read_support) != 4);
10465     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10466         driver_read_support) != 5);
10467     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10468         firmware_write_support) != 6);
10469     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10470         driver_write_support) != 7);
10471     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10472         max_transfer_encrypted_sas_sata) != 8);
10473     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10474         max_transfer_encrypted_nvme) != 10);
10475     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10476         max_write_raid_5_6) != 12);
10477     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10478         max_write_raid_1_10_2drive) != 14);
10479     BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10480         max_write_raid_1_10_3drive) != 16);
10481 
10482     BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10483     BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10484     BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10485         PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10486     BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10487         PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10488     BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10489     BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10490         PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10491     BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10492     BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10493         PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10494 
10495     BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10496     BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10497         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
10498 }