Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
0003  *
0004  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
0005  * Copyright (C) 2012-2014  LSI Corporation
0006  * Copyright (C) 2013-2014 Avago Technologies
0007  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
0008  *
0009  * This program is free software; you can redistribute it and/or
0010  * modify it under the terms of the GNU General Public License
0011  * as published by the Free Software Foundation; either version 2
0012  * of the License, or (at your option) any later version.
0013  *
0014  * This program is distributed in the hope that it will be useful,
0015  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0016  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0017  * GNU General Public License for more details.
0018  *
0019  * NO WARRANTY
0020  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
0021  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
0022  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
0023  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
0024  * solely responsible for determining the appropriateness of using and
0025  * distributing the Program and assumes all risks associated with its
0026  * exercise of rights under this Agreement, including but not limited to
0027  * the risks and costs of program errors, damage to or loss of data,
0028  * programs or equipment, and unavailability or interruption of operations.
0029 
0030  * DISCLAIMER OF LIABILITY
0031  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
0032  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0033  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
0034  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
0035  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
0036  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
0037  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
0038 
0039  * You should have received a copy of the GNU General Public License
0040  * along with this program; if not, write to the Free Software
0041  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
0042  * USA.
0043  */
0044 
0045 #include <linux/module.h>
0046 #include <linux/kernel.h>
0047 #include <linux/init.h>
0048 #include <linux/errno.h>
0049 #include <linux/blkdev.h>
0050 #include <linux/sched.h>
0051 #include <linux/workqueue.h>
0052 #include <linux/delay.h>
0053 #include <linux/pci.h>
0054 #include <linux/interrupt.h>
0055 #include <linux/aer.h>
0056 #include <linux/raid_class.h>
0057 #include <linux/blk-mq-pci.h>
0058 #include <asm/unaligned.h>
0059 
0060 #include "mpt3sas_base.h"
0061 
0062 #define RAID_CHANNEL 1
0063 
0064 #define PCIE_CHANNEL 2
0065 
0066 /* forward proto's */
0067 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
0068     struct _sas_node *sas_expander);
0069 static void _firmware_event_work(struct work_struct *work);
0070 
0071 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
0072     struct _sas_device *sas_device);
0073 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
0074     u8 retry_count, u8 is_pd);
0075 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
0076 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
0077     struct _pcie_device *pcie_device);
0078 static void
0079 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
0080 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
0081 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
0082 
0083 /* global parameters */
0084 LIST_HEAD(mpt3sas_ioc_list);
0085 /* global ioc lock for list operations */
0086 DEFINE_SPINLOCK(gioc_lock);
0087 
0088 MODULE_AUTHOR(MPT3SAS_AUTHOR);
0089 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
0090 MODULE_LICENSE("GPL");
0091 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
0092 MODULE_ALIAS("mpt2sas");
0093 
0094 /* local parameters */
0095 static u8 scsi_io_cb_idx = -1;
0096 static u8 tm_cb_idx = -1;
0097 static u8 ctl_cb_idx = -1;
0098 static u8 base_cb_idx = -1;
0099 static u8 port_enable_cb_idx = -1;
0100 static u8 transport_cb_idx = -1;
0101 static u8 scsih_cb_idx = -1;
0102 static u8 config_cb_idx = -1;
0103 static int mpt2_ids;
0104 static int mpt3_ids;
0105 
0106 static u8 tm_tr_cb_idx = -1 ;
0107 static u8 tm_tr_volume_cb_idx = -1 ;
0108 static u8 tm_sas_control_cb_idx = -1;
0109 
0110 /* command line options */
0111 static u32 logging_level;
0112 MODULE_PARM_DESC(logging_level,
0113     " bits for enabling additional logging info (default=0)");
0114 
0115 
0116 static ushort max_sectors = 0xFFFF;
0117 module_param(max_sectors, ushort, 0444);
0118 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
0119 
0120 
0121 static int missing_delay[2] = {-1, -1};
0122 module_param_array(missing_delay, int, NULL, 0444);
0123 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
0124 
0125 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
0126 #define MPT3SAS_MAX_LUN (16895)
0127 static u64 max_lun = MPT3SAS_MAX_LUN;
0128 module_param(max_lun, ullong, 0444);
0129 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
0130 
0131 static ushort hbas_to_enumerate;
0132 module_param(hbas_to_enumerate, ushort, 0444);
0133 MODULE_PARM_DESC(hbas_to_enumerate,
0134         " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
0135           1 - enumerates only SAS 2.0 generation HBAs\n \
0136           2 - enumerates only SAS 3.0 generation HBAs (default=0)");
0137 
0138 /* diag_buffer_enable is bitwise
0139  * bit 0 set = TRACE
0140  * bit 1 set = SNAPSHOT
0141  * bit 2 set = EXTENDED
0142  *
0143  * Either bit can be set, or both
0144  */
0145 static int diag_buffer_enable = -1;
0146 module_param(diag_buffer_enable, int, 0444);
0147 MODULE_PARM_DESC(diag_buffer_enable,
0148     " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
0149 static int disable_discovery = -1;
0150 module_param(disable_discovery, int, 0444);
0151 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
0152 
0153 
0154 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
0155 static int prot_mask = -1;
0156 module_param(prot_mask, int, 0444);
0157 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
0158 
0159 static bool enable_sdev_max_qd;
0160 module_param(enable_sdev_max_qd, bool, 0444);
0161 MODULE_PARM_DESC(enable_sdev_max_qd,
0162     "Enable sdev max qd as can_queue, def=disabled(0)");
0163 
0164 static int multipath_on_hba = -1;
0165 module_param(multipath_on_hba, int, 0);
0166 MODULE_PARM_DESC(multipath_on_hba,
0167     "Multipath support to add same target device\n\t\t"
0168     "as many times as it is visible to HBA from various paths\n\t\t"
0169     "(by default:\n\t\t"
0170     "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
0171     "\t SAS 3.5 HBA - This will be enabled)");
0172 
0173 static int host_tagset_enable = 1;
0174 module_param(host_tagset_enable, int, 0444);
0175 MODULE_PARM_DESC(host_tagset_enable,
0176     "Shared host tagset enable/disable Default: enable(1)");
0177 
0178 /* raid transport support */
0179 static struct raid_template *mpt3sas_raid_template;
0180 static struct raid_template *mpt2sas_raid_template;
0181 
0182 
0183 /**
0184  * struct sense_info - common structure for obtaining sense keys
0185  * @skey: sense key
0186  * @asc: additional sense code
0187  * @ascq: additional sense code qualifier
0188  */
0189 struct sense_info {
0190     u8 skey;
0191     u8 asc;
0192     u8 ascq;
0193 };
0194 
0195 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
0196 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
0197 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
0198 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
0199 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
0200 /**
0201  * struct fw_event_work - firmware event struct
0202  * @list: link list framework
0203  * @work: work object (ioc->fault_reset_work_q)
0204  * @ioc: per adapter object
0205  * @device_handle: device handle
0206  * @VF_ID: virtual function id
0207  * @VP_ID: virtual port id
0208  * @ignore: flag meaning this event has been marked to ignore
0209  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
0210  * @refcount: kref for this event
0211  * @event_data: reply event data payload follows
0212  *
0213  * This object stored on ioc->fw_event_list.
0214  */
0215 struct fw_event_work {
0216     struct list_head    list;
0217     struct work_struct  work;
0218 
0219     struct MPT3SAS_ADAPTER *ioc;
0220     u16         device_handle;
0221     u8          VF_ID;
0222     u8          VP_ID;
0223     u8          ignore;
0224     u16         event;
0225     struct kref     refcount;
0226     char            event_data[] __aligned(4);
0227 };
0228 
0229 static void fw_event_work_free(struct kref *r)
0230 {
0231     kfree(container_of(r, struct fw_event_work, refcount));
0232 }
0233 
0234 static void fw_event_work_get(struct fw_event_work *fw_work)
0235 {
0236     kref_get(&fw_work->refcount);
0237 }
0238 
0239 static void fw_event_work_put(struct fw_event_work *fw_work)
0240 {
0241     kref_put(&fw_work->refcount, fw_event_work_free);
0242 }
0243 
0244 static struct fw_event_work *alloc_fw_event_work(int len)
0245 {
0246     struct fw_event_work *fw_event;
0247 
0248     fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
0249     if (!fw_event)
0250         return NULL;
0251 
0252     kref_init(&fw_event->refcount);
0253     return fw_event;
0254 }
0255 
0256 /**
0257  * struct _scsi_io_transfer - scsi io transfer
0258  * @handle: sas device handle (assigned by firmware)
0259  * @is_raid: flag set for hidden raid components
0260  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
0261  * @data_length: data transfer length
0262  * @data_dma: dma pointer to data
0263  * @sense: sense data
0264  * @lun: lun number
0265  * @cdb_length: cdb length
0266  * @cdb: cdb contents
0267  * @timeout: timeout for this command
0268  * @VF_ID: virtual function id
0269  * @VP_ID: virtual port id
0270  * @valid_reply: flag set for reply message
0271  * @sense_length: sense length
0272  * @ioc_status: ioc status
0273  * @scsi_state: scsi state
0274  * @scsi_status: scsi staus
0275  * @log_info: log information
0276  * @transfer_length: data length transfer when there is a reply message
0277  *
0278  * Used for sending internal scsi commands to devices within this module.
0279  * Refer to _scsi_send_scsi_io().
0280  */
0281 struct _scsi_io_transfer {
0282     u16 handle;
0283     u8  is_raid;
0284     enum dma_data_direction dir;
0285     u32 data_length;
0286     dma_addr_t data_dma;
0287     u8  sense[SCSI_SENSE_BUFFERSIZE];
0288     u32 lun;
0289     u8  cdb_length;
0290     u8  cdb[32];
0291     u8  timeout;
0292     u8  VF_ID;
0293     u8  VP_ID;
0294     u8  valid_reply;
0295   /* the following bits are only valid when 'valid_reply = 1' */
0296     u32 sense_length;
0297     u16 ioc_status;
0298     u8  scsi_state;
0299     u8  scsi_status;
0300     u32 log_info;
0301     u32 transfer_length;
0302 };
0303 
0304 /**
0305  * _scsih_set_debug_level - global setting of ioc->logging_level.
0306  * @val: ?
0307  * @kp: ?
0308  *
0309  * Note: The logging levels are defined in mpt3sas_debug.h.
0310  */
0311 static int
0312 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
0313 {
0314     int ret = param_set_int(val, kp);
0315     struct MPT3SAS_ADAPTER *ioc;
0316 
0317     if (ret)
0318         return ret;
0319 
0320     pr_info("setting logging_level(0x%08x)\n", logging_level);
0321     spin_lock(&gioc_lock);
0322     list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
0323         ioc->logging_level = logging_level;
0324     spin_unlock(&gioc_lock);
0325     return 0;
0326 }
0327 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
0328     &logging_level, 0644);
0329 
0330 /**
0331  * _scsih_srch_boot_sas_address - search based on sas_address
0332  * @sas_address: sas address
0333  * @boot_device: boot device object from bios page 2
0334  *
0335  * Return: 1 when there's a match, 0 means no match.
0336  */
0337 static inline int
0338 _scsih_srch_boot_sas_address(u64 sas_address,
0339     Mpi2BootDeviceSasWwid_t *boot_device)
0340 {
0341     return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
0342 }
0343 
0344 /**
0345  * _scsih_srch_boot_device_name - search based on device name
0346  * @device_name: device name specified in INDENTIFY fram
0347  * @boot_device: boot device object from bios page 2
0348  *
0349  * Return: 1 when there's a match, 0 means no match.
0350  */
0351 static inline int
0352 _scsih_srch_boot_device_name(u64 device_name,
0353     Mpi2BootDeviceDeviceName_t *boot_device)
0354 {
0355     return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
0356 }
0357 
0358 /**
0359  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
0360  * @enclosure_logical_id: enclosure logical id
0361  * @slot_number: slot number
0362  * @boot_device: boot device object from bios page 2
0363  *
0364  * Return: 1 when there's a match, 0 means no match.
0365  */
0366 static inline int
0367 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
0368     Mpi2BootDeviceEnclosureSlot_t *boot_device)
0369 {
0370     return (enclosure_logical_id == le64_to_cpu(boot_device->
0371         EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
0372         SlotNumber)) ? 1 : 0;
0373 }
0374 
0375 /**
0376  * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
0377  *            port number from port list
0378  * @ioc: per adapter object
0379  * @port_id: port number
0380  * @bypass_dirty_port_flag: when set look the matching hba port entry even
0381  *          if hba port entry is marked as dirty.
0382  *
0383  * Search for hba port entry corresponding to provided port number,
0384  * if available return port object otherwise return NULL.
0385  */
0386 struct hba_port *
0387 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
0388     u8 port_id, u8 bypass_dirty_port_flag)
0389 {
0390     struct hba_port *port, *port_next;
0391 
0392     /*
0393      * When multipath_on_hba is disabled then
0394      * search the hba_port entry using default
0395      * port id i.e. 255
0396      */
0397     if (!ioc->multipath_on_hba)
0398         port_id = MULTIPATH_DISABLED_PORT_ID;
0399 
0400     list_for_each_entry_safe(port, port_next,
0401         &ioc->port_table_list, list) {
0402         if (port->port_id != port_id)
0403             continue;
0404         if (bypass_dirty_port_flag)
0405             return port;
0406         if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
0407             continue;
0408         return port;
0409     }
0410 
0411     /*
0412      * Allocate hba_port object for default port id (i.e. 255)
0413      * when multipath_on_hba is disabled for the HBA.
0414      * And add this object to port_table_list.
0415      */
0416     if (!ioc->multipath_on_hba) {
0417         port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
0418         if (!port)
0419             return NULL;
0420 
0421         port->port_id = port_id;
0422         ioc_info(ioc,
0423            "hba_port entry: %p, port: %d is added to hba_port list\n",
0424            port, port->port_id);
0425         list_add_tail(&port->list,
0426             &ioc->port_table_list);
0427         return port;
0428     }
0429     return NULL;
0430 }
0431 
0432 /**
0433  * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
0434  * @ioc: per adapter object
0435  * @port: hba_port object
0436  * @phy: phy number
0437  *
0438  * Return virtual_phy object corresponding to phy number.
0439  */
0440 struct virtual_phy *
0441 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
0442     struct hba_port *port, u32 phy)
0443 {
0444     struct virtual_phy *vphy, *vphy_next;
0445 
0446     if (!port->vphys_mask)
0447         return NULL;
0448 
0449     list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
0450         if (vphy->phy_mask & (1 << phy))
0451             return vphy;
0452     }
0453     return NULL;
0454 }
0455 
0456 /**
0457  * _scsih_is_boot_device - search for matching boot device.
0458  * @sas_address: sas address
0459  * @device_name: device name specified in INDENTIFY fram
0460  * @enclosure_logical_id: enclosure logical id
0461  * @slot: slot number
0462  * @form: specifies boot device form
0463  * @boot_device: boot device object from bios page 2
0464  *
0465  * Return: 1 when there's a match, 0 means no match.
0466  */
0467 static int
0468 _scsih_is_boot_device(u64 sas_address, u64 device_name,
0469     u64 enclosure_logical_id, u16 slot, u8 form,
0470     Mpi2BiosPage2BootDevice_t *boot_device)
0471 {
0472     int rc = 0;
0473 
0474     switch (form) {
0475     case MPI2_BIOSPAGE2_FORM_SAS_WWID:
0476         if (!sas_address)
0477             break;
0478         rc = _scsih_srch_boot_sas_address(
0479             sas_address, &boot_device->SasWwid);
0480         break;
0481     case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
0482         if (!enclosure_logical_id)
0483             break;
0484         rc = _scsih_srch_boot_encl_slot(
0485             enclosure_logical_id,
0486             slot, &boot_device->EnclosureSlot);
0487         break;
0488     case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
0489         if (!device_name)
0490             break;
0491         rc = _scsih_srch_boot_device_name(
0492             device_name, &boot_device->DeviceName);
0493         break;
0494     case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
0495         break;
0496     }
0497 
0498     return rc;
0499 }
0500 
0501 /**
0502  * _scsih_get_sas_address - set the sas_address for given device handle
0503  * @ioc: ?
0504  * @handle: device handle
0505  * @sas_address: sas address
0506  *
0507  * Return: 0 success, non-zero when failure
0508  */
0509 static int
0510 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
0511     u64 *sas_address)
0512 {
0513     Mpi2SasDevicePage0_t sas_device_pg0;
0514     Mpi2ConfigReply_t mpi_reply;
0515     u32 ioc_status;
0516 
0517     *sas_address = 0;
0518 
0519     if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
0520         MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
0521         ioc_err(ioc, "failure at %s:%d/%s()!\n",
0522             __FILE__, __LINE__, __func__);
0523         return -ENXIO;
0524     }
0525 
0526     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
0527     if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
0528         /* For HBA, vSES doesn't return HBA SAS address. Instead return
0529          * vSES's sas address.
0530          */
0531         if ((handle <= ioc->sas_hba.num_phys) &&
0532            (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
0533            MPI2_SAS_DEVICE_INFO_SEP)))
0534             *sas_address = ioc->sas_hba.sas_address;
0535         else
0536             *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
0537         return 0;
0538     }
0539 
0540     /* we hit this because the given parent handle doesn't exist */
0541     if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
0542         return -ENXIO;
0543 
0544     /* else error case */
0545     ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
0546         handle, ioc_status, __FILE__, __LINE__, __func__);
0547     return -EIO;
0548 }
0549 
0550 /**
0551  * _scsih_determine_boot_device - determine boot device.
0552  * @ioc: per adapter object
0553  * @device: sas_device or pcie_device object
0554  * @channel: SAS or PCIe channel
0555  *
0556  * Determines whether this device should be first reported device to
0557  * to scsi-ml or sas transport, this purpose is for persistent boot device.
0558  * There are primary, alternate, and current entries in bios page 2. The order
0559  * priority is primary, alternate, then current.  This routine saves
0560  * the corresponding device object.
0561  * The saved data to be used later in _scsih_probe_boot_devices().
0562  */
0563 static void
0564 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
0565     u32 channel)
0566 {
0567     struct _sas_device *sas_device;
0568     struct _pcie_device *pcie_device;
0569     struct _raid_device *raid_device;
0570     u64 sas_address;
0571     u64 device_name;
0572     u64 enclosure_logical_id;
0573     u16 slot;
0574 
0575      /* only process this function when driver loads */
0576     if (!ioc->is_driver_loading)
0577         return;
0578 
0579      /* no Bios, return immediately */
0580     if (!ioc->bios_pg3.BiosVersion)
0581         return;
0582 
0583     if (channel == RAID_CHANNEL) {
0584         raid_device = device;
0585         sas_address = raid_device->wwid;
0586         device_name = 0;
0587         enclosure_logical_id = 0;
0588         slot = 0;
0589     } else if (channel == PCIE_CHANNEL) {
0590         pcie_device = device;
0591         sas_address = pcie_device->wwid;
0592         device_name = 0;
0593         enclosure_logical_id = 0;
0594         slot = 0;
0595     } else {
0596         sas_device = device;
0597         sas_address = sas_device->sas_address;
0598         device_name = sas_device->device_name;
0599         enclosure_logical_id = sas_device->enclosure_logical_id;
0600         slot = sas_device->slot;
0601     }
0602 
0603     if (!ioc->req_boot_device.device) {
0604         if (_scsih_is_boot_device(sas_address, device_name,
0605             enclosure_logical_id, slot,
0606             (ioc->bios_pg2.ReqBootDeviceForm &
0607             MPI2_BIOSPAGE2_FORM_MASK),
0608             &ioc->bios_pg2.RequestedBootDevice)) {
0609             dinitprintk(ioc,
0610                     ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
0611                          __func__, (u64)sas_address));
0612             ioc->req_boot_device.device = device;
0613             ioc->req_boot_device.channel = channel;
0614         }
0615     }
0616 
0617     if (!ioc->req_alt_boot_device.device) {
0618         if (_scsih_is_boot_device(sas_address, device_name,
0619             enclosure_logical_id, slot,
0620             (ioc->bios_pg2.ReqAltBootDeviceForm &
0621             MPI2_BIOSPAGE2_FORM_MASK),
0622             &ioc->bios_pg2.RequestedAltBootDevice)) {
0623             dinitprintk(ioc,
0624                     ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
0625                          __func__, (u64)sas_address));
0626             ioc->req_alt_boot_device.device = device;
0627             ioc->req_alt_boot_device.channel = channel;
0628         }
0629     }
0630 
0631     if (!ioc->current_boot_device.device) {
0632         if (_scsih_is_boot_device(sas_address, device_name,
0633             enclosure_logical_id, slot,
0634             (ioc->bios_pg2.CurrentBootDeviceForm &
0635             MPI2_BIOSPAGE2_FORM_MASK),
0636             &ioc->bios_pg2.CurrentBootDevice)) {
0637             dinitprintk(ioc,
0638                     ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
0639                          __func__, (u64)sas_address));
0640             ioc->current_boot_device.device = device;
0641             ioc->current_boot_device.channel = channel;
0642         }
0643     }
0644 }
0645 
0646 static struct _sas_device *
0647 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
0648         struct MPT3SAS_TARGET *tgt_priv)
0649 {
0650     struct _sas_device *ret;
0651 
0652     assert_spin_locked(&ioc->sas_device_lock);
0653 
0654     ret = tgt_priv->sas_dev;
0655     if (ret)
0656         sas_device_get(ret);
0657 
0658     return ret;
0659 }
0660 
0661 static struct _sas_device *
0662 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
0663         struct MPT3SAS_TARGET *tgt_priv)
0664 {
0665     struct _sas_device *ret;
0666     unsigned long flags;
0667 
0668     spin_lock_irqsave(&ioc->sas_device_lock, flags);
0669     ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
0670     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
0671 
0672     return ret;
0673 }
0674 
0675 static struct _pcie_device *
0676 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
0677     struct MPT3SAS_TARGET *tgt_priv)
0678 {
0679     struct _pcie_device *ret;
0680 
0681     assert_spin_locked(&ioc->pcie_device_lock);
0682 
0683     ret = tgt_priv->pcie_dev;
0684     if (ret)
0685         pcie_device_get(ret);
0686 
0687     return ret;
0688 }
0689 
0690 /**
0691  * mpt3sas_get_pdev_from_target - pcie device search
0692  * @ioc: per adapter object
0693  * @tgt_priv: starget private object
0694  *
0695  * Context: This function will acquire ioc->pcie_device_lock and will release
0696  * before returning the pcie_device object.
0697  *
0698  * This searches for pcie_device from target, then return pcie_device object.
0699  */
0700 static struct _pcie_device *
0701 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
0702     struct MPT3SAS_TARGET *tgt_priv)
0703 {
0704     struct _pcie_device *ret;
0705     unsigned long flags;
0706 
0707     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
0708     ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
0709     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
0710 
0711     return ret;
0712 }
0713 
0714 
0715 /**
0716  * __mpt3sas_get_sdev_by_rphy - sas device search
0717  * @ioc: per adapter object
0718  * @rphy: sas_rphy pointer
0719  *
0720  * Context: This function will acquire ioc->sas_device_lock and will release
0721  * before returning the sas_device object.
0722  *
0723  * This searches for sas_device from rphy object
0724  * then return sas_device object.
0725  */
0726 struct _sas_device *
0727 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
0728     struct sas_rphy *rphy)
0729 {
0730     struct _sas_device *sas_device;
0731 
0732     assert_spin_locked(&ioc->sas_device_lock);
0733 
0734     list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
0735         if (sas_device->rphy != rphy)
0736             continue;
0737         sas_device_get(sas_device);
0738         return sas_device;
0739     }
0740 
0741     sas_device = NULL;
0742     list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
0743         if (sas_device->rphy != rphy)
0744             continue;
0745         sas_device_get(sas_device);
0746         return sas_device;
0747     }
0748 
0749     return NULL;
0750 }
0751 
0752 /**
0753  * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
0754  *              sas address from sas_device_list list
0755  * @ioc: per adapter object
0756  * @sas_address: device sas address
0757  * @port: port number
0758  *
0759  * Search for _sas_device object corresponding to provided sas address,
0760  * if available return _sas_device object address otherwise return NULL.
0761  */
0762 struct _sas_device *
0763 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
0764     u64 sas_address, struct hba_port *port)
0765 {
0766     struct _sas_device *sas_device;
0767 
0768     if (!port)
0769         return NULL;
0770 
0771     assert_spin_locked(&ioc->sas_device_lock);
0772 
0773     list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
0774         if (sas_device->sas_address != sas_address)
0775             continue;
0776         if (sas_device->port != port)
0777             continue;
0778         sas_device_get(sas_device);
0779         return sas_device;
0780     }
0781 
0782     list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
0783         if (sas_device->sas_address != sas_address)
0784             continue;
0785         if (sas_device->port != port)
0786             continue;
0787         sas_device_get(sas_device);
0788         return sas_device;
0789     }
0790 
0791     return NULL;
0792 }
0793 
0794 /**
0795  * mpt3sas_get_sdev_by_addr - sas device search
0796  * @ioc: per adapter object
0797  * @sas_address: sas address
0798  * @port: hba port entry
0799  * Context: Calling function should acquire ioc->sas_device_lock
0800  *
0801  * This searches for sas_device based on sas_address & port number,
0802  * then return sas_device object.
0803  */
0804 struct _sas_device *
0805 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
0806     u64 sas_address, struct hba_port *port)
0807 {
0808     struct _sas_device *sas_device;
0809     unsigned long flags;
0810 
0811     spin_lock_irqsave(&ioc->sas_device_lock, flags);
0812     sas_device = __mpt3sas_get_sdev_by_addr(ioc,
0813         sas_address, port);
0814     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
0815 
0816     return sas_device;
0817 }
0818 
0819 static struct _sas_device *
0820 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
0821 {
0822     struct _sas_device *sas_device;
0823 
0824     assert_spin_locked(&ioc->sas_device_lock);
0825 
0826     list_for_each_entry(sas_device, &ioc->sas_device_list, list)
0827         if (sas_device->handle == handle)
0828             goto found_device;
0829 
0830     list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
0831         if (sas_device->handle == handle)
0832             goto found_device;
0833 
0834     return NULL;
0835 
0836 found_device:
0837     sas_device_get(sas_device);
0838     return sas_device;
0839 }
0840 
0841 /**
0842  * mpt3sas_get_sdev_by_handle - sas device search
0843  * @ioc: per adapter object
0844  * @handle: sas device handle (assigned by firmware)
0845  * Context: Calling function should acquire ioc->sas_device_lock
0846  *
0847  * This searches for sas_device based on sas_address, then return sas_device
0848  * object.
0849  */
0850 struct _sas_device *
0851 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
0852 {
0853     struct _sas_device *sas_device;
0854     unsigned long flags;
0855 
0856     spin_lock_irqsave(&ioc->sas_device_lock, flags);
0857     sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
0858     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
0859 
0860     return sas_device;
0861 }
0862 
0863 /**
0864  * _scsih_display_enclosure_chassis_info - display device location info
0865  * @ioc: per adapter object
0866  * @sas_device: per sas device object
0867  * @sdev: scsi device struct
0868  * @starget: scsi target struct
0869  */
0870 static void
0871 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
0872     struct _sas_device *sas_device, struct scsi_device *sdev,
0873     struct scsi_target *starget)
0874 {
0875     if (sdev) {
0876         if (sas_device->enclosure_handle != 0)
0877             sdev_printk(KERN_INFO, sdev,
0878                 "enclosure logical id (0x%016llx), slot(%d) \n",
0879                 (unsigned long long)
0880                 sas_device->enclosure_logical_id,
0881                 sas_device->slot);
0882         if (sas_device->connector_name[0] != '\0')
0883             sdev_printk(KERN_INFO, sdev,
0884                 "enclosure level(0x%04x), connector name( %s)\n",
0885                 sas_device->enclosure_level,
0886                 sas_device->connector_name);
0887         if (sas_device->is_chassis_slot_valid)
0888             sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
0889                 sas_device->chassis_slot);
0890     } else if (starget) {
0891         if (sas_device->enclosure_handle != 0)
0892             starget_printk(KERN_INFO, starget,
0893                 "enclosure logical id(0x%016llx), slot(%d) \n",
0894                 (unsigned long long)
0895                 sas_device->enclosure_logical_id,
0896                 sas_device->slot);
0897         if (sas_device->connector_name[0] != '\0')
0898             starget_printk(KERN_INFO, starget,
0899                 "enclosure level(0x%04x), connector name( %s)\n",
0900                 sas_device->enclosure_level,
0901                 sas_device->connector_name);
0902         if (sas_device->is_chassis_slot_valid)
0903             starget_printk(KERN_INFO, starget,
0904                 "chassis slot(0x%04x)\n",
0905                 sas_device->chassis_slot);
0906     } else {
0907         if (sas_device->enclosure_handle != 0)
0908             ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
0909                  (u64)sas_device->enclosure_logical_id,
0910                  sas_device->slot);
0911         if (sas_device->connector_name[0] != '\0')
0912             ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
0913                  sas_device->enclosure_level,
0914                  sas_device->connector_name);
0915         if (sas_device->is_chassis_slot_valid)
0916             ioc_info(ioc, "chassis slot(0x%04x)\n",
0917                  sas_device->chassis_slot);
0918     }
0919 }
0920 
0921 /**
0922  * _scsih_sas_device_remove - remove sas_device from list.
0923  * @ioc: per adapter object
0924  * @sas_device: the sas_device object
0925  * Context: This function will acquire ioc->sas_device_lock.
0926  *
0927  * If sas_device is on the list, remove it and decrement its reference count.
0928  */
0929 static void
0930 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
0931     struct _sas_device *sas_device)
0932 {
0933     unsigned long flags;
0934 
0935     if (!sas_device)
0936         return;
0937     ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
0938          sas_device->handle, (u64)sas_device->sas_address);
0939 
0940     _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
0941 
0942     /*
0943      * The lock serializes access to the list, but we still need to verify
0944      * that nobody removed the entry while we were waiting on the lock.
0945      */
0946     spin_lock_irqsave(&ioc->sas_device_lock, flags);
0947     if (!list_empty(&sas_device->list)) {
0948         list_del_init(&sas_device->list);
0949         sas_device_put(sas_device);
0950     }
0951     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
0952 }
0953 
0954 /**
0955  * _scsih_device_remove_by_handle - removing device object by handle
0956  * @ioc: per adapter object
0957  * @handle: device handle
0958  */
0959 static void
0960 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
0961 {
0962     struct _sas_device *sas_device;
0963     unsigned long flags;
0964 
0965     if (ioc->shost_recovery)
0966         return;
0967 
0968     spin_lock_irqsave(&ioc->sas_device_lock, flags);
0969     sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
0970     if (sas_device) {
0971         list_del_init(&sas_device->list);
0972         sas_device_put(sas_device);
0973     }
0974     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
0975     if (sas_device) {
0976         _scsih_remove_device(ioc, sas_device);
0977         sas_device_put(sas_device);
0978     }
0979 }
0980 
0981 /**
0982  * mpt3sas_device_remove_by_sas_address - removing device object by
0983  *                  sas address & port number
0984  * @ioc: per adapter object
0985  * @sas_address: device sas_address
0986  * @port: hba port entry
0987  *
0988  * Return nothing.
0989  */
0990 void
0991 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
0992     u64 sas_address, struct hba_port *port)
0993 {
0994     struct _sas_device *sas_device;
0995     unsigned long flags;
0996 
0997     if (ioc->shost_recovery)
0998         return;
0999 
1000     spin_lock_irqsave(&ioc->sas_device_lock, flags);
1001     sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1002     if (sas_device) {
1003         list_del_init(&sas_device->list);
1004         sas_device_put(sas_device);
1005     }
1006     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1007     if (sas_device) {
1008         _scsih_remove_device(ioc, sas_device);
1009         sas_device_put(sas_device);
1010     }
1011 }
1012 
1013 /**
1014  * _scsih_sas_device_add - insert sas_device to the list.
1015  * @ioc: per adapter object
1016  * @sas_device: the sas_device object
1017  * Context: This function will acquire ioc->sas_device_lock.
1018  *
1019  * Adding new object to the ioc->sas_device_list.
1020  */
1021 static void
1022 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1023     struct _sas_device *sas_device)
1024 {
1025     unsigned long flags;
1026 
1027     dewtprintk(ioc,
1028            ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1029                 __func__, sas_device->handle,
1030                 (u64)sas_device->sas_address));
1031 
1032     dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1033         NULL, NULL));
1034 
1035     spin_lock_irqsave(&ioc->sas_device_lock, flags);
1036     sas_device_get(sas_device);
1037     list_add_tail(&sas_device->list, &ioc->sas_device_list);
1038     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1039 
1040     if (ioc->hide_drives) {
1041         clear_bit(sas_device->handle, ioc->pend_os_device_add);
1042         return;
1043     }
1044 
1045     if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1046          sas_device->sas_address_parent, sas_device->port)) {
1047         _scsih_sas_device_remove(ioc, sas_device);
1048     } else if (!sas_device->starget) {
1049         /*
1050          * When asyn scanning is enabled, its not possible to remove
1051          * devices while scanning is turned on due to an oops in
1052          * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1053          */
1054         if (!ioc->is_driver_loading) {
1055             mpt3sas_transport_port_remove(ioc,
1056                 sas_device->sas_address,
1057                 sas_device->sas_address_parent,
1058                 sas_device->port);
1059             _scsih_sas_device_remove(ioc, sas_device);
1060         }
1061     } else
1062         clear_bit(sas_device->handle, ioc->pend_os_device_add);
1063 }
1064 
1065 /**
1066  * _scsih_sas_device_init_add - insert sas_device to the list.
1067  * @ioc: per adapter object
1068  * @sas_device: the sas_device object
1069  * Context: This function will acquire ioc->sas_device_lock.
1070  *
1071  * Adding new object at driver load time to the ioc->sas_device_init_list.
1072  */
1073 static void
1074 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1075     struct _sas_device *sas_device)
1076 {
1077     unsigned long flags;
1078 
1079     dewtprintk(ioc,
1080            ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1081                 __func__, sas_device->handle,
1082                 (u64)sas_device->sas_address));
1083 
1084     dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1085         NULL, NULL));
1086 
1087     spin_lock_irqsave(&ioc->sas_device_lock, flags);
1088     sas_device_get(sas_device);
1089     list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1090     _scsih_determine_boot_device(ioc, sas_device, 0);
1091     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1092 }
1093 
1094 
1095 static struct _pcie_device *
1096 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1097 {
1098     struct _pcie_device *pcie_device;
1099 
1100     assert_spin_locked(&ioc->pcie_device_lock);
1101 
1102     list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1103         if (pcie_device->wwid == wwid)
1104             goto found_device;
1105 
1106     list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1107         if (pcie_device->wwid == wwid)
1108             goto found_device;
1109 
1110     return NULL;
1111 
1112 found_device:
1113     pcie_device_get(pcie_device);
1114     return pcie_device;
1115 }
1116 
1117 
1118 /**
1119  * mpt3sas_get_pdev_by_wwid - pcie device search
1120  * @ioc: per adapter object
1121  * @wwid: wwid
1122  *
1123  * Context: This function will acquire ioc->pcie_device_lock and will release
1124  * before returning the pcie_device object.
1125  *
1126  * This searches for pcie_device based on wwid, then return pcie_device object.
1127  */
1128 static struct _pcie_device *
1129 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1130 {
1131     struct _pcie_device *pcie_device;
1132     unsigned long flags;
1133 
1134     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1135     pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1136     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1137 
1138     return pcie_device;
1139 }
1140 
1141 
1142 static struct _pcie_device *
1143 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1144     int channel)
1145 {
1146     struct _pcie_device *pcie_device;
1147 
1148     assert_spin_locked(&ioc->pcie_device_lock);
1149 
1150     list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1151         if (pcie_device->id == id && pcie_device->channel == channel)
1152             goto found_device;
1153 
1154     list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1155         if (pcie_device->id == id && pcie_device->channel == channel)
1156             goto found_device;
1157 
1158     return NULL;
1159 
1160 found_device:
1161     pcie_device_get(pcie_device);
1162     return pcie_device;
1163 }
1164 
1165 static struct _pcie_device *
1166 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1167 {
1168     struct _pcie_device *pcie_device;
1169 
1170     assert_spin_locked(&ioc->pcie_device_lock);
1171 
1172     list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1173         if (pcie_device->handle == handle)
1174             goto found_device;
1175 
1176     list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1177         if (pcie_device->handle == handle)
1178             goto found_device;
1179 
1180     return NULL;
1181 
1182 found_device:
1183     pcie_device_get(pcie_device);
1184     return pcie_device;
1185 }
1186 
1187 
1188 /**
1189  * mpt3sas_get_pdev_by_handle - pcie device search
1190  * @ioc: per adapter object
1191  * @handle: Firmware device handle
1192  *
1193  * Context: This function will acquire ioc->pcie_device_lock and will release
1194  * before returning the pcie_device object.
1195  *
1196  * This searches for pcie_device based on handle, then return pcie_device
1197  * object.
1198  */
1199 struct _pcie_device *
1200 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1201 {
1202     struct _pcie_device *pcie_device;
1203     unsigned long flags;
1204 
1205     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1206     pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1207     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1208 
1209     return pcie_device;
1210 }
1211 
1212 /**
1213  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1214  * @ioc: per adapter object
1215  * Context: This function will acquire ioc->pcie_device_lock
1216  *
1217  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1218  * which has reported maximum among all available NVMe drives.
1219  * Minimum max_shutdown_latency will be six seconds.
1220  */
1221 static void
1222 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1223 {
1224     struct _pcie_device *pcie_device;
1225     unsigned long flags;
1226     u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1227 
1228     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1229     list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1230         if (pcie_device->shutdown_latency) {
1231             if (shutdown_latency < pcie_device->shutdown_latency)
1232                 shutdown_latency =
1233                     pcie_device->shutdown_latency;
1234         }
1235     }
1236     ioc->max_shutdown_latency = shutdown_latency;
1237     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1238 }
1239 
1240 /**
1241  * _scsih_pcie_device_remove - remove pcie_device from list.
1242  * @ioc: per adapter object
1243  * @pcie_device: the pcie_device object
1244  * Context: This function will acquire ioc->pcie_device_lock.
1245  *
1246  * If pcie_device is on the list, remove it and decrement its reference count.
1247  */
1248 static void
1249 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1250     struct _pcie_device *pcie_device)
1251 {
1252     unsigned long flags;
1253     int was_on_pcie_device_list = 0;
1254     u8 update_latency = 0;
1255 
1256     if (!pcie_device)
1257         return;
1258     ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1259          pcie_device->handle, (u64)pcie_device->wwid);
1260     if (pcie_device->enclosure_handle != 0)
1261         ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1262              (u64)pcie_device->enclosure_logical_id,
1263              pcie_device->slot);
1264     if (pcie_device->connector_name[0] != '\0')
1265         ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1266              pcie_device->enclosure_level,
1267              pcie_device->connector_name);
1268 
1269     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1270     if (!list_empty(&pcie_device->list)) {
1271         list_del_init(&pcie_device->list);
1272         was_on_pcie_device_list = 1;
1273     }
1274     if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1275         update_latency = 1;
1276     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1277     if (was_on_pcie_device_list) {
1278         kfree(pcie_device->serial_number);
1279         pcie_device_put(pcie_device);
1280     }
1281 
1282     /*
1283      * This device's RTD3 Entry Latency matches IOC's
1284      * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1285      * from the available drives as current drive is getting removed.
1286      */
1287     if (update_latency)
1288         _scsih_set_nvme_max_shutdown_latency(ioc);
1289 }
1290 
1291 
1292 /**
1293  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1294  * @ioc: per adapter object
1295  * @handle: device handle
1296  */
1297 static void
1298 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1299 {
1300     struct _pcie_device *pcie_device;
1301     unsigned long flags;
1302     int was_on_pcie_device_list = 0;
1303     u8 update_latency = 0;
1304 
1305     if (ioc->shost_recovery)
1306         return;
1307 
1308     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1309     pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1310     if (pcie_device) {
1311         if (!list_empty(&pcie_device->list)) {
1312             list_del_init(&pcie_device->list);
1313             was_on_pcie_device_list = 1;
1314             pcie_device_put(pcie_device);
1315         }
1316         if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1317             update_latency = 1;
1318     }
1319     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1320     if (was_on_pcie_device_list) {
1321         _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1322         pcie_device_put(pcie_device);
1323     }
1324 
1325     /*
1326      * This device's RTD3 Entry Latency matches IOC's
1327      * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1328      * from the available drives as current drive is getting removed.
1329      */
1330     if (update_latency)
1331         _scsih_set_nvme_max_shutdown_latency(ioc);
1332 }
1333 
1334 /**
1335  * _scsih_pcie_device_add - add pcie_device object
1336  * @ioc: per adapter object
1337  * @pcie_device: pcie_device object
1338  *
1339  * This is added to the pcie_device_list link list.
1340  */
1341 static void
1342 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1343     struct _pcie_device *pcie_device)
1344 {
1345     unsigned long flags;
1346 
1347     dewtprintk(ioc,
1348            ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1349                 __func__,
1350                 pcie_device->handle, (u64)pcie_device->wwid));
1351     if (pcie_device->enclosure_handle != 0)
1352         dewtprintk(ioc,
1353                ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1354                     __func__,
1355                     (u64)pcie_device->enclosure_logical_id,
1356                     pcie_device->slot));
1357     if (pcie_device->connector_name[0] != '\0')
1358         dewtprintk(ioc,
1359                ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1360                     __func__, pcie_device->enclosure_level,
1361                     pcie_device->connector_name));
1362 
1363     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1364     pcie_device_get(pcie_device);
1365     list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1366     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1367 
1368     if (pcie_device->access_status ==
1369         MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1370         clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1371         return;
1372     }
1373     if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1374         _scsih_pcie_device_remove(ioc, pcie_device);
1375     } else if (!pcie_device->starget) {
1376         if (!ioc->is_driver_loading) {
1377 /*TODO-- Need to find out whether this condition will occur or not*/
1378             clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1379         }
1380     } else
1381         clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1382 }
1383 
1384 /*
1385  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1386  * @ioc: per adapter object
1387  * @pcie_device: the pcie_device object
1388  * Context: This function will acquire ioc->pcie_device_lock.
1389  *
1390  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1391  */
1392 static void
1393 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1394                 struct _pcie_device *pcie_device)
1395 {
1396     unsigned long flags;
1397 
1398     dewtprintk(ioc,
1399            ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1400                 __func__,
1401                 pcie_device->handle, (u64)pcie_device->wwid));
1402     if (pcie_device->enclosure_handle != 0)
1403         dewtprintk(ioc,
1404                ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1405                     __func__,
1406                     (u64)pcie_device->enclosure_logical_id,
1407                     pcie_device->slot));
1408     if (pcie_device->connector_name[0] != '\0')
1409         dewtprintk(ioc,
1410                ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1411                     __func__, pcie_device->enclosure_level,
1412                     pcie_device->connector_name));
1413 
1414     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1415     pcie_device_get(pcie_device);
1416     list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1417     if (pcie_device->access_status !=
1418         MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1419         _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1420     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1421 }
1422 /**
1423  * _scsih_raid_device_find_by_id - raid device search
1424  * @ioc: per adapter object
1425  * @id: sas device target id
1426  * @channel: sas device channel
1427  * Context: Calling function should acquire ioc->raid_device_lock
1428  *
1429  * This searches for raid_device based on target id, then return raid_device
1430  * object.
1431  */
1432 static struct _raid_device *
1433 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1434 {
1435     struct _raid_device *raid_device, *r;
1436 
1437     r = NULL;
1438     list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1439         if (raid_device->id == id && raid_device->channel == channel) {
1440             r = raid_device;
1441             goto out;
1442         }
1443     }
1444 
1445  out:
1446     return r;
1447 }
1448 
1449 /**
1450  * mpt3sas_raid_device_find_by_handle - raid device search
1451  * @ioc: per adapter object
1452  * @handle: sas device handle (assigned by firmware)
1453  * Context: Calling function should acquire ioc->raid_device_lock
1454  *
1455  * This searches for raid_device based on handle, then return raid_device
1456  * object.
1457  */
1458 struct _raid_device *
1459 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1460 {
1461     struct _raid_device *raid_device, *r;
1462 
1463     r = NULL;
1464     list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1465         if (raid_device->handle != handle)
1466             continue;
1467         r = raid_device;
1468         goto out;
1469     }
1470 
1471  out:
1472     return r;
1473 }
1474 
1475 /**
1476  * _scsih_raid_device_find_by_wwid - raid device search
1477  * @ioc: per adapter object
1478  * @wwid: ?
1479  * Context: Calling function should acquire ioc->raid_device_lock
1480  *
1481  * This searches for raid_device based on wwid, then return raid_device
1482  * object.
1483  */
1484 static struct _raid_device *
1485 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1486 {
1487     struct _raid_device *raid_device, *r;
1488 
1489     r = NULL;
1490     list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1491         if (raid_device->wwid != wwid)
1492             continue;
1493         r = raid_device;
1494         goto out;
1495     }
1496 
1497  out:
1498     return r;
1499 }
1500 
1501 /**
1502  * _scsih_raid_device_add - add raid_device object
1503  * @ioc: per adapter object
1504  * @raid_device: raid_device object
1505  *
1506  * This is added to the raid_device_list link list.
1507  */
1508 static void
1509 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1510     struct _raid_device *raid_device)
1511 {
1512     unsigned long flags;
1513 
1514     dewtprintk(ioc,
1515            ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1516                 __func__,
1517                 raid_device->handle, (u64)raid_device->wwid));
1518 
1519     spin_lock_irqsave(&ioc->raid_device_lock, flags);
1520     list_add_tail(&raid_device->list, &ioc->raid_device_list);
1521     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1522 }
1523 
1524 /**
1525  * _scsih_raid_device_remove - delete raid_device object
1526  * @ioc: per adapter object
1527  * @raid_device: raid_device object
1528  *
1529  */
1530 static void
1531 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1532     struct _raid_device *raid_device)
1533 {
1534     unsigned long flags;
1535 
1536     spin_lock_irqsave(&ioc->raid_device_lock, flags);
1537     list_del(&raid_device->list);
1538     kfree(raid_device);
1539     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1540 }
1541 
1542 /**
1543  * mpt3sas_scsih_expander_find_by_handle - expander device search
1544  * @ioc: per adapter object
1545  * @handle: expander handle (assigned by firmware)
1546  * Context: Calling function should acquire ioc->sas_device_lock
1547  *
1548  * This searches for expander device based on handle, then returns the
1549  * sas_node object.
1550  */
1551 struct _sas_node *
1552 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1553 {
1554     struct _sas_node *sas_expander, *r;
1555 
1556     r = NULL;
1557     list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1558         if (sas_expander->handle != handle)
1559             continue;
1560         r = sas_expander;
1561         goto out;
1562     }
1563  out:
1564     return r;
1565 }
1566 
1567 /**
1568  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1569  * @ioc: per adapter object
1570  * @handle: enclosure handle (assigned by firmware)
1571  * Context: Calling function should acquire ioc->sas_device_lock
1572  *
1573  * This searches for enclosure device based on handle, then returns the
1574  * enclosure object.
1575  */
1576 static struct _enclosure_node *
1577 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1578 {
1579     struct _enclosure_node *enclosure_dev, *r;
1580 
1581     r = NULL;
1582     list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1583         if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1584             continue;
1585         r = enclosure_dev;
1586         goto out;
1587     }
1588 out:
1589     return r;
1590 }
1591 /**
1592  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1593  * @ioc: per adapter object
1594  * @sas_address: sas address
1595  * @port: hba port entry
1596  * Context: Calling function should acquire ioc->sas_node_lock.
1597  *
1598  * This searches for expander device based on sas_address & port number,
1599  * then returns the sas_node object.
1600  */
1601 struct _sas_node *
1602 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1603     u64 sas_address, struct hba_port *port)
1604 {
1605     struct _sas_node *sas_expander, *r = NULL;
1606 
1607     if (!port)
1608         return r;
1609 
1610     list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1611         if (sas_expander->sas_address != sas_address)
1612             continue;
1613         if (sas_expander->port != port)
1614             continue;
1615         r = sas_expander;
1616         goto out;
1617     }
1618  out:
1619     return r;
1620 }
1621 
1622 /**
1623  * _scsih_expander_node_add - insert expander device to the list.
1624  * @ioc: per adapter object
1625  * @sas_expander: the sas_device object
1626  * Context: This function will acquire ioc->sas_node_lock.
1627  *
1628  * Adding new object to the ioc->sas_expander_list.
1629  */
1630 static void
1631 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1632     struct _sas_node *sas_expander)
1633 {
1634     unsigned long flags;
1635 
1636     spin_lock_irqsave(&ioc->sas_node_lock, flags);
1637     list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1638     spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1639 }
1640 
1641 /**
1642  * _scsih_is_end_device - determines if device is an end device
1643  * @device_info: bitfield providing information about the device.
1644  * Context: none
1645  *
1646  * Return: 1 if end device.
1647  */
1648 static int
1649 _scsih_is_end_device(u32 device_info)
1650 {
1651     if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1652         ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1653         (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1654         (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1655         return 1;
1656     else
1657         return 0;
1658 }
1659 
1660 /**
1661  * _scsih_is_nvme_pciescsi_device - determines if
1662  *          device is an pcie nvme/scsi device
1663  * @device_info: bitfield providing information about the device.
1664  * Context: none
1665  *
1666  * Returns 1 if device is pcie device type nvme/scsi.
1667  */
1668 static int
1669 _scsih_is_nvme_pciescsi_device(u32 device_info)
1670 {
1671     if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1672         == MPI26_PCIE_DEVINFO_NVME) ||
1673         ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1674         == MPI26_PCIE_DEVINFO_SCSI))
1675         return 1;
1676     else
1677         return 0;
1678 }
1679 
1680 /**
1681  * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1682  * @ioc: per adapter object
1683  * @id: target id
1684  * @channel: channel
1685  * Context: This function will acquire ioc->scsi_lookup_lock.
1686  *
1687  * This will search for a matching channel:id in the scsi_lookup array,
1688  * returning 1 if found.
1689  */
1690 static u8
1691 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1692     int channel)
1693 {
1694     int smid;
1695     struct scsi_cmnd *scmd;
1696 
1697     for (smid = 1;
1698          smid <= ioc->shost->can_queue; smid++) {
1699         scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1700         if (!scmd)
1701             continue;
1702         if (scmd->device->id == id &&
1703             scmd->device->channel == channel)
1704             return 1;
1705     }
1706     return 0;
1707 }
1708 
1709 /**
1710  * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1711  * @ioc: per adapter object
1712  * @id: target id
1713  * @lun: lun number
1714  * @channel: channel
1715  * Context: This function will acquire ioc->scsi_lookup_lock.
1716  *
1717  * This will search for a matching channel:id:lun in the scsi_lookup array,
1718  * returning 1 if found.
1719  */
1720 static u8
1721 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1722     unsigned int lun, int channel)
1723 {
1724     int smid;
1725     struct scsi_cmnd *scmd;
1726 
1727     for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1728 
1729         scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1730         if (!scmd)
1731             continue;
1732         if (scmd->device->id == id &&
1733             scmd->device->channel == channel &&
1734             scmd->device->lun == lun)
1735             return 1;
1736     }
1737     return 0;
1738 }
1739 
1740 /**
1741  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1742  * @ioc: per adapter object
1743  * @smid: system request message index
1744  *
1745  * Return: the smid stored scmd pointer.
1746  * Then will dereference the stored scmd pointer.
1747  */
1748 struct scsi_cmnd *
1749 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1750 {
1751     struct scsi_cmnd *scmd = NULL;
1752     struct scsiio_tracker *st;
1753     Mpi25SCSIIORequest_t *mpi_request;
1754     u16 tag = smid - 1;
1755 
1756     if (smid > 0  &&
1757         smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1758         u32 unique_tag =
1759             ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1760 
1761         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1762 
1763         /*
1764          * If SCSI IO request is outstanding at driver level then
1765          * DevHandle filed must be non-zero. If DevHandle is zero
1766          * then it means that this smid is free at driver level,
1767          * so return NULL.
1768          */
1769         if (!mpi_request->DevHandle)
1770             return scmd;
1771 
1772         scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1773         if (scmd) {
1774             st = scsi_cmd_priv(scmd);
1775             if (st->cb_idx == 0xFF || st->smid == 0)
1776                 scmd = NULL;
1777         }
1778     }
1779     return scmd;
1780 }
1781 
1782 /**
1783  * scsih_change_queue_depth - setting device queue depth
1784  * @sdev: scsi device struct
1785  * @qdepth: requested queue depth
1786  *
1787  * Return: queue depth.
1788  */
1789 static int
1790 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1791 {
1792     struct Scsi_Host *shost = sdev->host;
1793     int max_depth;
1794     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795     struct MPT3SAS_DEVICE *sas_device_priv_data;
1796     struct MPT3SAS_TARGET *sas_target_priv_data;
1797     struct _sas_device *sas_device;
1798     unsigned long flags;
1799 
1800     max_depth = shost->can_queue;
1801 
1802     /*
1803      * limit max device queue for SATA to 32 if enable_sdev_max_qd
1804      * is disabled.
1805      */
1806     if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1807         goto not_sata;
1808 
1809     sas_device_priv_data = sdev->hostdata;
1810     if (!sas_device_priv_data)
1811         goto not_sata;
1812     sas_target_priv_data = sas_device_priv_data->sas_target;
1813     if (!sas_target_priv_data)
1814         goto not_sata;
1815     if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1816         goto not_sata;
1817 
1818     spin_lock_irqsave(&ioc->sas_device_lock, flags);
1819     sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1820     if (sas_device) {
1821         if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1822             max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1823 
1824         sas_device_put(sas_device);
1825     }
1826     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1827 
1828  not_sata:
1829 
1830     if (!sdev->tagged_supported)
1831         max_depth = 1;
1832     if (qdepth > max_depth)
1833         qdepth = max_depth;
1834     scsi_change_queue_depth(sdev, qdepth);
1835     sdev_printk(KERN_INFO, sdev,
1836         "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1837         sdev->queue_depth, sdev->tagged_supported,
1838         sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1839     return sdev->queue_depth;
1840 }
1841 
1842 /**
1843  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1844  * @sdev: scsi device struct
1845  * @qdepth: requested queue depth
1846  *
1847  * Returns nothing.
1848  */
1849 void
1850 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1851 {
1852     struct Scsi_Host *shost = sdev->host;
1853     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1854 
1855     if (ioc->enable_sdev_max_qd)
1856         qdepth = shost->can_queue;
1857 
1858     scsih_change_queue_depth(sdev, qdepth);
1859 }
1860 
1861 /**
1862  * scsih_target_alloc - target add routine
1863  * @starget: scsi target struct
1864  *
1865  * Return: 0 if ok. Any other return is assumed to be an error and
1866  * the device is ignored.
1867  */
1868 static int
1869 scsih_target_alloc(struct scsi_target *starget)
1870 {
1871     struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1872     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1873     struct MPT3SAS_TARGET *sas_target_priv_data;
1874     struct _sas_device *sas_device;
1875     struct _raid_device *raid_device;
1876     struct _pcie_device *pcie_device;
1877     unsigned long flags;
1878     struct sas_rphy *rphy;
1879 
1880     sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1881                        GFP_KERNEL);
1882     if (!sas_target_priv_data)
1883         return -ENOMEM;
1884 
1885     starget->hostdata = sas_target_priv_data;
1886     sas_target_priv_data->starget = starget;
1887     sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1888 
1889     /* RAID volumes */
1890     if (starget->channel == RAID_CHANNEL) {
1891         spin_lock_irqsave(&ioc->raid_device_lock, flags);
1892         raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1893             starget->channel);
1894         if (raid_device) {
1895             sas_target_priv_data->handle = raid_device->handle;
1896             sas_target_priv_data->sas_address = raid_device->wwid;
1897             sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1898             if (ioc->is_warpdrive)
1899                 sas_target_priv_data->raid_device = raid_device;
1900             raid_device->starget = starget;
1901         }
1902         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1903         return 0;
1904     }
1905 
1906     /* PCIe devices */
1907     if (starget->channel == PCIE_CHANNEL) {
1908         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909         pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1910             starget->channel);
1911         if (pcie_device) {
1912             sas_target_priv_data->handle = pcie_device->handle;
1913             sas_target_priv_data->sas_address = pcie_device->wwid;
1914             sas_target_priv_data->port = NULL;
1915             sas_target_priv_data->pcie_dev = pcie_device;
1916             pcie_device->starget = starget;
1917             pcie_device->id = starget->id;
1918             pcie_device->channel = starget->channel;
1919             sas_target_priv_data->flags |=
1920                 MPT_TARGET_FLAGS_PCIE_DEVICE;
1921             if (pcie_device->fast_path)
1922                 sas_target_priv_data->flags |=
1923                     MPT_TARGET_FASTPATH_IO;
1924         }
1925         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1926         return 0;
1927     }
1928 
1929     /* sas/sata devices */
1930     spin_lock_irqsave(&ioc->sas_device_lock, flags);
1931     rphy = dev_to_rphy(starget->dev.parent);
1932     sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1933 
1934     if (sas_device) {
1935         sas_target_priv_data->handle = sas_device->handle;
1936         sas_target_priv_data->sas_address = sas_device->sas_address;
1937         sas_target_priv_data->port = sas_device->port;
1938         sas_target_priv_data->sas_dev = sas_device;
1939         sas_device->starget = starget;
1940         sas_device->id = starget->id;
1941         sas_device->channel = starget->channel;
1942         if (test_bit(sas_device->handle, ioc->pd_handles))
1943             sas_target_priv_data->flags |=
1944                 MPT_TARGET_FLAGS_RAID_COMPONENT;
1945         if (sas_device->fast_path)
1946             sas_target_priv_data->flags |=
1947                     MPT_TARGET_FASTPATH_IO;
1948     }
1949     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1950 
1951     return 0;
1952 }
1953 
1954 /**
1955  * scsih_target_destroy - target destroy routine
1956  * @starget: scsi target struct
1957  */
1958 static void
1959 scsih_target_destroy(struct scsi_target *starget)
1960 {
1961     struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1962     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1963     struct MPT3SAS_TARGET *sas_target_priv_data;
1964     struct _sas_device *sas_device;
1965     struct _raid_device *raid_device;
1966     struct _pcie_device *pcie_device;
1967     unsigned long flags;
1968 
1969     sas_target_priv_data = starget->hostdata;
1970     if (!sas_target_priv_data)
1971         return;
1972 
1973     if (starget->channel == RAID_CHANNEL) {
1974         spin_lock_irqsave(&ioc->raid_device_lock, flags);
1975         raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1976             starget->channel);
1977         if (raid_device) {
1978             raid_device->starget = NULL;
1979             raid_device->sdev = NULL;
1980         }
1981         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1982         goto out;
1983     }
1984 
1985     if (starget->channel == PCIE_CHANNEL) {
1986         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1987         pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1988                             sas_target_priv_data);
1989         if (pcie_device && (pcie_device->starget == starget) &&
1990             (pcie_device->id == starget->id) &&
1991             (pcie_device->channel == starget->channel))
1992             pcie_device->starget = NULL;
1993 
1994         if (pcie_device) {
1995             /*
1996              * Corresponding get() is in _scsih_target_alloc()
1997              */
1998             sas_target_priv_data->pcie_dev = NULL;
1999             pcie_device_put(pcie_device);
2000             pcie_device_put(pcie_device);
2001         }
2002         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2003         goto out;
2004     }
2005 
2006     spin_lock_irqsave(&ioc->sas_device_lock, flags);
2007     sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2008     if (sas_device && (sas_device->starget == starget) &&
2009         (sas_device->id == starget->id) &&
2010         (sas_device->channel == starget->channel))
2011         sas_device->starget = NULL;
2012 
2013     if (sas_device) {
2014         /*
2015          * Corresponding get() is in _scsih_target_alloc()
2016          */
2017         sas_target_priv_data->sas_dev = NULL;
2018         sas_device_put(sas_device);
2019 
2020         sas_device_put(sas_device);
2021     }
2022     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2023 
2024  out:
2025     kfree(sas_target_priv_data);
2026     starget->hostdata = NULL;
2027 }
2028 
2029 /**
2030  * scsih_slave_alloc - device add routine
2031  * @sdev: scsi device struct
2032  *
2033  * Return: 0 if ok. Any other return is assumed to be an error and
2034  * the device is ignored.
2035  */
2036 static int
2037 scsih_slave_alloc(struct scsi_device *sdev)
2038 {
2039     struct Scsi_Host *shost;
2040     struct MPT3SAS_ADAPTER *ioc;
2041     struct MPT3SAS_TARGET *sas_target_priv_data;
2042     struct MPT3SAS_DEVICE *sas_device_priv_data;
2043     struct scsi_target *starget;
2044     struct _raid_device *raid_device;
2045     struct _sas_device *sas_device;
2046     struct _pcie_device *pcie_device;
2047     unsigned long flags;
2048 
2049     sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2050                        GFP_KERNEL);
2051     if (!sas_device_priv_data)
2052         return -ENOMEM;
2053 
2054     sas_device_priv_data->lun = sdev->lun;
2055     sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2056 
2057     starget = scsi_target(sdev);
2058     sas_target_priv_data = starget->hostdata;
2059     sas_target_priv_data->num_luns++;
2060     sas_device_priv_data->sas_target = sas_target_priv_data;
2061     sdev->hostdata = sas_device_priv_data;
2062     if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2063         sdev->no_uld_attach = 1;
2064 
2065     shost = dev_to_shost(&starget->dev);
2066     ioc = shost_priv(shost);
2067     if (starget->channel == RAID_CHANNEL) {
2068         spin_lock_irqsave(&ioc->raid_device_lock, flags);
2069         raid_device = _scsih_raid_device_find_by_id(ioc,
2070             starget->id, starget->channel);
2071         if (raid_device)
2072             raid_device->sdev = sdev; /* raid is single lun */
2073         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2074     }
2075     if (starget->channel == PCIE_CHANNEL) {
2076         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2077         pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2078                 sas_target_priv_data->sas_address);
2079         if (pcie_device && (pcie_device->starget == NULL)) {
2080             sdev_printk(KERN_INFO, sdev,
2081                 "%s : pcie_device->starget set to starget @ %d\n",
2082                 __func__, __LINE__);
2083             pcie_device->starget = starget;
2084         }
2085 
2086         if (pcie_device)
2087             pcie_device_put(pcie_device);
2088         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2089 
2090     } else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2091         spin_lock_irqsave(&ioc->sas_device_lock, flags);
2092         sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2093             sas_target_priv_data->sas_address,
2094             sas_target_priv_data->port);
2095         if (sas_device && (sas_device->starget == NULL)) {
2096             sdev_printk(KERN_INFO, sdev,
2097             "%s : sas_device->starget set to starget @ %d\n",
2098                  __func__, __LINE__);
2099             sas_device->starget = starget;
2100         }
2101 
2102         if (sas_device)
2103             sas_device_put(sas_device);
2104 
2105         spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2106     }
2107 
2108     return 0;
2109 }
2110 
2111 /**
2112  * scsih_slave_destroy - device destroy routine
2113  * @sdev: scsi device struct
2114  */
2115 static void
2116 scsih_slave_destroy(struct scsi_device *sdev)
2117 {
2118     struct MPT3SAS_TARGET *sas_target_priv_data;
2119     struct scsi_target *starget;
2120     struct Scsi_Host *shost;
2121     struct MPT3SAS_ADAPTER *ioc;
2122     struct _sas_device *sas_device;
2123     struct _pcie_device *pcie_device;
2124     unsigned long flags;
2125 
2126     if (!sdev->hostdata)
2127         return;
2128 
2129     starget = scsi_target(sdev);
2130     sas_target_priv_data = starget->hostdata;
2131     sas_target_priv_data->num_luns--;
2132 
2133     shost = dev_to_shost(&starget->dev);
2134     ioc = shost_priv(shost);
2135 
2136     if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2137         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2138         pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2139                 sas_target_priv_data);
2140         if (pcie_device && !sas_target_priv_data->num_luns)
2141             pcie_device->starget = NULL;
2142 
2143         if (pcie_device)
2144             pcie_device_put(pcie_device);
2145 
2146         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2147 
2148     } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2149         spin_lock_irqsave(&ioc->sas_device_lock, flags);
2150         sas_device = __mpt3sas_get_sdev_from_target(ioc,
2151                 sas_target_priv_data);
2152         if (sas_device && !sas_target_priv_data->num_luns)
2153             sas_device->starget = NULL;
2154 
2155         if (sas_device)
2156             sas_device_put(sas_device);
2157         spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2158     }
2159 
2160     kfree(sdev->hostdata);
2161     sdev->hostdata = NULL;
2162 }
2163 
2164 /**
2165  * _scsih_display_sata_capabilities - sata capabilities
2166  * @ioc: per adapter object
2167  * @handle: device handle
2168  * @sdev: scsi device struct
2169  */
2170 static void
2171 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2172     u16 handle, struct scsi_device *sdev)
2173 {
2174     Mpi2ConfigReply_t mpi_reply;
2175     Mpi2SasDevicePage0_t sas_device_pg0;
2176     u32 ioc_status;
2177     u16 flags;
2178     u32 device_info;
2179 
2180     if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2181         MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2182         ioc_err(ioc, "failure at %s:%d/%s()!\n",
2183             __FILE__, __LINE__, __func__);
2184         return;
2185     }
2186 
2187     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2188         MPI2_IOCSTATUS_MASK;
2189     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2190         ioc_err(ioc, "failure at %s:%d/%s()!\n",
2191             __FILE__, __LINE__, __func__);
2192         return;
2193     }
2194 
2195     flags = le16_to_cpu(sas_device_pg0.Flags);
2196     device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2197 
2198     sdev_printk(KERN_INFO, sdev,
2199         "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2200         "sw_preserve(%s)\n",
2201         (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2202         (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2203         (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2204         "n",
2205         (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2206         (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2207         (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2208 }
2209 
2210 /*
2211  * raid transport support -
2212  * Enabled for SLES11 and newer, in older kernels the driver will panic when
2213  * unloading the driver followed by a load - I believe that the subroutine
2214  * raid_class_release() is not cleaning up properly.
2215  */
2216 
2217 /**
2218  * scsih_is_raid - return boolean indicating device is raid volume
2219  * @dev: the device struct object
2220  */
2221 static int
2222 scsih_is_raid(struct device *dev)
2223 {
2224     struct scsi_device *sdev = to_scsi_device(dev);
2225     struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2226 
2227     if (ioc->is_warpdrive)
2228         return 0;
2229     return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2230 }
2231 
2232 static int
2233 scsih_is_nvme(struct device *dev)
2234 {
2235     struct scsi_device *sdev = to_scsi_device(dev);
2236 
2237     return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2238 }
2239 
2240 /**
2241  * scsih_get_resync - get raid volume resync percent complete
2242  * @dev: the device struct object
2243  */
2244 static void
2245 scsih_get_resync(struct device *dev)
2246 {
2247     struct scsi_device *sdev = to_scsi_device(dev);
2248     struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2249     static struct _raid_device *raid_device;
2250     unsigned long flags;
2251     Mpi2RaidVolPage0_t vol_pg0;
2252     Mpi2ConfigReply_t mpi_reply;
2253     u32 volume_status_flags;
2254     u8 percent_complete;
2255     u16 handle;
2256 
2257     percent_complete = 0;
2258     handle = 0;
2259     if (ioc->is_warpdrive)
2260         goto out;
2261 
2262     spin_lock_irqsave(&ioc->raid_device_lock, flags);
2263     raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2264         sdev->channel);
2265     if (raid_device) {
2266         handle = raid_device->handle;
2267         percent_complete = raid_device->percent_complete;
2268     }
2269     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2270 
2271     if (!handle)
2272         goto out;
2273 
2274     if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2275          MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2276          sizeof(Mpi2RaidVolPage0_t))) {
2277         ioc_err(ioc, "failure at %s:%d/%s()!\n",
2278             __FILE__, __LINE__, __func__);
2279         percent_complete = 0;
2280         goto out;
2281     }
2282 
2283     volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2284     if (!(volume_status_flags &
2285         MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2286         percent_complete = 0;
2287 
2288  out:
2289 
2290     switch (ioc->hba_mpi_version_belonged) {
2291     case MPI2_VERSION:
2292         raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2293         break;
2294     case MPI25_VERSION:
2295     case MPI26_VERSION:
2296         raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2297         break;
2298     }
2299 }
2300 
2301 /**
2302  * scsih_get_state - get raid volume level
2303  * @dev: the device struct object
2304  */
2305 static void
2306 scsih_get_state(struct device *dev)
2307 {
2308     struct scsi_device *sdev = to_scsi_device(dev);
2309     struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2310     static struct _raid_device *raid_device;
2311     unsigned long flags;
2312     Mpi2RaidVolPage0_t vol_pg0;
2313     Mpi2ConfigReply_t mpi_reply;
2314     u32 volstate;
2315     enum raid_state state = RAID_STATE_UNKNOWN;
2316     u16 handle = 0;
2317 
2318     spin_lock_irqsave(&ioc->raid_device_lock, flags);
2319     raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2320         sdev->channel);
2321     if (raid_device)
2322         handle = raid_device->handle;
2323     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2324 
2325     if (!raid_device)
2326         goto out;
2327 
2328     if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2329          MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2330          sizeof(Mpi2RaidVolPage0_t))) {
2331         ioc_err(ioc, "failure at %s:%d/%s()!\n",
2332             __FILE__, __LINE__, __func__);
2333         goto out;
2334     }
2335 
2336     volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2337     if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2338         state = RAID_STATE_RESYNCING;
2339         goto out;
2340     }
2341 
2342     switch (vol_pg0.VolumeState) {
2343     case MPI2_RAID_VOL_STATE_OPTIMAL:
2344     case MPI2_RAID_VOL_STATE_ONLINE:
2345         state = RAID_STATE_ACTIVE;
2346         break;
2347     case  MPI2_RAID_VOL_STATE_DEGRADED:
2348         state = RAID_STATE_DEGRADED;
2349         break;
2350     case MPI2_RAID_VOL_STATE_FAILED:
2351     case MPI2_RAID_VOL_STATE_MISSING:
2352         state = RAID_STATE_OFFLINE;
2353         break;
2354     }
2355  out:
2356     switch (ioc->hba_mpi_version_belonged) {
2357     case MPI2_VERSION:
2358         raid_set_state(mpt2sas_raid_template, dev, state);
2359         break;
2360     case MPI25_VERSION:
2361     case MPI26_VERSION:
2362         raid_set_state(mpt3sas_raid_template, dev, state);
2363         break;
2364     }
2365 }
2366 
2367 /**
2368  * _scsih_set_level - set raid level
2369  * @ioc: ?
2370  * @sdev: scsi device struct
2371  * @volume_type: volume type
2372  */
2373 static void
2374 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2375     struct scsi_device *sdev, u8 volume_type)
2376 {
2377     enum raid_level level = RAID_LEVEL_UNKNOWN;
2378 
2379     switch (volume_type) {
2380     case MPI2_RAID_VOL_TYPE_RAID0:
2381         level = RAID_LEVEL_0;
2382         break;
2383     case MPI2_RAID_VOL_TYPE_RAID10:
2384         level = RAID_LEVEL_10;
2385         break;
2386     case MPI2_RAID_VOL_TYPE_RAID1E:
2387         level = RAID_LEVEL_1E;
2388         break;
2389     case MPI2_RAID_VOL_TYPE_RAID1:
2390         level = RAID_LEVEL_1;
2391         break;
2392     }
2393 
2394     switch (ioc->hba_mpi_version_belonged) {
2395     case MPI2_VERSION:
2396         raid_set_level(mpt2sas_raid_template,
2397             &sdev->sdev_gendev, level);
2398         break;
2399     case MPI25_VERSION:
2400     case MPI26_VERSION:
2401         raid_set_level(mpt3sas_raid_template,
2402             &sdev->sdev_gendev, level);
2403         break;
2404     }
2405 }
2406 
2407 
2408 /**
2409  * _scsih_get_volume_capabilities - volume capabilities
2410  * @ioc: per adapter object
2411  * @raid_device: the raid_device object
2412  *
2413  * Return: 0 for success, else 1
2414  */
2415 static int
2416 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2417     struct _raid_device *raid_device)
2418 {
2419     Mpi2RaidVolPage0_t *vol_pg0;
2420     Mpi2RaidPhysDiskPage0_t pd_pg0;
2421     Mpi2SasDevicePage0_t sas_device_pg0;
2422     Mpi2ConfigReply_t mpi_reply;
2423     u16 sz;
2424     u8 num_pds;
2425 
2426     if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2427         &num_pds)) || !num_pds) {
2428         dfailprintk(ioc,
2429                 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2430                      __FILE__, __LINE__, __func__));
2431         return 1;
2432     }
2433 
2434     raid_device->num_pds = num_pds;
2435     sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2436         sizeof(Mpi2RaidVol0PhysDisk_t));
2437     vol_pg0 = kzalloc(sz, GFP_KERNEL);
2438     if (!vol_pg0) {
2439         dfailprintk(ioc,
2440                 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2441                      __FILE__, __LINE__, __func__));
2442         return 1;
2443     }
2444 
2445     if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2446          MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2447         dfailprintk(ioc,
2448                 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2449                      __FILE__, __LINE__, __func__));
2450         kfree(vol_pg0);
2451         return 1;
2452     }
2453 
2454     raid_device->volume_type = vol_pg0->VolumeType;
2455 
2456     /* figure out what the underlying devices are by
2457      * obtaining the device_info bits for the 1st device
2458      */
2459     if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2460         &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2461         vol_pg0->PhysDisk[0].PhysDiskNum))) {
2462         if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2463             &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2464             le16_to_cpu(pd_pg0.DevHandle)))) {
2465             raid_device->device_info =
2466                 le32_to_cpu(sas_device_pg0.DeviceInfo);
2467         }
2468     }
2469 
2470     kfree(vol_pg0);
2471     return 0;
2472 }
2473 
2474 /**
2475  * _scsih_enable_tlr - setting TLR flags
2476  * @ioc: per adapter object
2477  * @sdev: scsi device struct
2478  *
2479  * Enabling Transaction Layer Retries for tape devices when
2480  * vpd page 0x90 is present
2481  *
2482  */
2483 static void
2484 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2485 {
2486 
2487     /* only for TAPE */
2488     if (sdev->type != TYPE_TAPE)
2489         return;
2490 
2491     if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2492         return;
2493 
2494     sas_enable_tlr(sdev);
2495     sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2496         sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2497     return;
2498 
2499 }
2500 
2501 /**
2502  * scsih_slave_configure - device configure routine.
2503  * @sdev: scsi device struct
2504  *
2505  * Return: 0 if ok. Any other return is assumed to be an error and
2506  * the device is ignored.
2507  */
2508 static int
2509 scsih_slave_configure(struct scsi_device *sdev)
2510 {
2511     struct Scsi_Host *shost = sdev->host;
2512     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2513     struct MPT3SAS_DEVICE *sas_device_priv_data;
2514     struct MPT3SAS_TARGET *sas_target_priv_data;
2515     struct _sas_device *sas_device;
2516     struct _pcie_device *pcie_device;
2517     struct _raid_device *raid_device;
2518     unsigned long flags;
2519     int qdepth;
2520     u8 ssp_target = 0;
2521     char *ds = "";
2522     char *r_level = "";
2523     u16 handle, volume_handle = 0;
2524     u64 volume_wwid = 0;
2525 
2526     qdepth = 1;
2527     sas_device_priv_data = sdev->hostdata;
2528     sas_device_priv_data->configured_lun = 1;
2529     sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2530     sas_target_priv_data = sas_device_priv_data->sas_target;
2531     handle = sas_target_priv_data->handle;
2532 
2533     /* raid volume handling */
2534     if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2535 
2536         spin_lock_irqsave(&ioc->raid_device_lock, flags);
2537         raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2538         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2539         if (!raid_device) {
2540             dfailprintk(ioc,
2541                     ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2542                          __FILE__, __LINE__, __func__));
2543             return 1;
2544         }
2545 
2546         if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2547             dfailprintk(ioc,
2548                     ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2549                          __FILE__, __LINE__, __func__));
2550             return 1;
2551         }
2552 
2553         /*
2554          * WARPDRIVE: Initialize the required data for Direct IO
2555          */
2556         mpt3sas_init_warpdrive_properties(ioc, raid_device);
2557 
2558         /* RAID Queue Depth Support
2559          * IS volume = underlying qdepth of drive type, either
2560          *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2561          * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2562          */
2563         if (raid_device->device_info &
2564             MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2565             qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2566             ds = "SSP";
2567         } else {
2568             qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2569             if (raid_device->device_info &
2570                 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2571                 ds = "SATA";
2572             else
2573                 ds = "STP";
2574         }
2575 
2576         switch (raid_device->volume_type) {
2577         case MPI2_RAID_VOL_TYPE_RAID0:
2578             r_level = "RAID0";
2579             break;
2580         case MPI2_RAID_VOL_TYPE_RAID1E:
2581             qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2582             if (ioc->manu_pg10.OEMIdentifier &&
2583                 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2584                 MFG10_GF0_R10_DISPLAY) &&
2585                 !(raid_device->num_pds % 2))
2586                 r_level = "RAID10";
2587             else
2588                 r_level = "RAID1E";
2589             break;
2590         case MPI2_RAID_VOL_TYPE_RAID1:
2591             qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2592             r_level = "RAID1";
2593             break;
2594         case MPI2_RAID_VOL_TYPE_RAID10:
2595             qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2596             r_level = "RAID10";
2597             break;
2598         case MPI2_RAID_VOL_TYPE_UNKNOWN:
2599         default:
2600             qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2601             r_level = "RAIDX";
2602             break;
2603         }
2604 
2605         if (!ioc->hide_ir_msg)
2606             sdev_printk(KERN_INFO, sdev,
2607                "%s: handle(0x%04x), wwid(0x%016llx),"
2608                 " pd_count(%d), type(%s)\n",
2609                 r_level, raid_device->handle,
2610                 (unsigned long long)raid_device->wwid,
2611                 raid_device->num_pds, ds);
2612 
2613         if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2614             blk_queue_max_hw_sectors(sdev->request_queue,
2615                         MPT3SAS_RAID_MAX_SECTORS);
2616             sdev_printk(KERN_INFO, sdev,
2617                     "Set queue's max_sector to: %u\n",
2618                         MPT3SAS_RAID_MAX_SECTORS);
2619         }
2620 
2621         mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2622 
2623         /* raid transport support */
2624         if (!ioc->is_warpdrive)
2625             _scsih_set_level(ioc, sdev, raid_device->volume_type);
2626         return 0;
2627     }
2628 
2629     /* non-raid handling */
2630     if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2631         if (mpt3sas_config_get_volume_handle(ioc, handle,
2632             &volume_handle)) {
2633             dfailprintk(ioc,
2634                     ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2635                          __FILE__, __LINE__, __func__));
2636             return 1;
2637         }
2638         if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2639             volume_handle, &volume_wwid)) {
2640             dfailprintk(ioc,
2641                     ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2642                          __FILE__, __LINE__, __func__));
2643             return 1;
2644         }
2645     }
2646 
2647     /* PCIe handling */
2648     if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2649         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2650         pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2651                 sas_device_priv_data->sas_target->sas_address);
2652         if (!pcie_device) {
2653             spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2654             dfailprintk(ioc,
2655                     ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2656                          __FILE__, __LINE__, __func__));
2657             return 1;
2658         }
2659 
2660         qdepth = ioc->max_nvme_qd;
2661         ds = "NVMe";
2662         sdev_printk(KERN_INFO, sdev,
2663             "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2664             ds, handle, (unsigned long long)pcie_device->wwid,
2665             pcie_device->port_num);
2666         if (pcie_device->enclosure_handle != 0)
2667             sdev_printk(KERN_INFO, sdev,
2668             "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2669             ds,
2670             (unsigned long long)pcie_device->enclosure_logical_id,
2671             pcie_device->slot);
2672         if (pcie_device->connector_name[0] != '\0')
2673             sdev_printk(KERN_INFO, sdev,
2674                 "%s: enclosure level(0x%04x),"
2675                 "connector name( %s)\n", ds,
2676                 pcie_device->enclosure_level,
2677                 pcie_device->connector_name);
2678 
2679         if (pcie_device->nvme_mdts)
2680             blk_queue_max_hw_sectors(sdev->request_queue,
2681                     pcie_device->nvme_mdts/512);
2682 
2683         pcie_device_put(pcie_device);
2684         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2685         mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2686         /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2687          ** merged and can eliminate holes created during merging
2688          ** operation.
2689          **/
2690         blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2691                 sdev->request_queue);
2692         blk_queue_virt_boundary(sdev->request_queue,
2693                 ioc->page_size - 1);
2694         return 0;
2695     }
2696 
2697     spin_lock_irqsave(&ioc->sas_device_lock, flags);
2698     sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2699        sas_device_priv_data->sas_target->sas_address,
2700        sas_device_priv_data->sas_target->port);
2701     if (!sas_device) {
2702         spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2703         dfailprintk(ioc,
2704                 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2705                      __FILE__, __LINE__, __func__));
2706         return 1;
2707     }
2708 
2709     sas_device->volume_handle = volume_handle;
2710     sas_device->volume_wwid = volume_wwid;
2711     if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2712         qdepth = (sas_device->port_type > 1) ?
2713             ioc->max_wideport_qd : ioc->max_narrowport_qd;
2714         ssp_target = 1;
2715         if (sas_device->device_info &
2716                 MPI2_SAS_DEVICE_INFO_SEP) {
2717             sdev_printk(KERN_WARNING, sdev,
2718             "set ignore_delay_remove for handle(0x%04x)\n",
2719             sas_device_priv_data->sas_target->handle);
2720             sas_device_priv_data->ignore_delay_remove = 1;
2721             ds = "SES";
2722         } else
2723             ds = "SSP";
2724     } else {
2725         qdepth = ioc->max_sata_qd;
2726         if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2727             ds = "STP";
2728         else if (sas_device->device_info &
2729             MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2730             ds = "SATA";
2731     }
2732 
2733     sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2734         "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2735         ds, handle, (unsigned long long)sas_device->sas_address,
2736         sas_device->phy, (unsigned long long)sas_device->device_name);
2737 
2738     _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2739 
2740     sas_device_put(sas_device);
2741     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2742 
2743     if (!ssp_target)
2744         _scsih_display_sata_capabilities(ioc, handle, sdev);
2745 
2746 
2747     mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2748 
2749     if (ssp_target) {
2750         sas_read_port_mode_page(sdev);
2751         _scsih_enable_tlr(ioc, sdev);
2752     }
2753 
2754     return 0;
2755 }
2756 
2757 /**
2758  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2759  * @sdev: scsi device struct
2760  * @bdev: pointer to block device context
2761  * @capacity: device size (in 512 byte sectors)
2762  * @params: three element array to place output:
2763  *              params[0] number of heads (max 255)
2764  *              params[1] number of sectors (max 63)
2765  *              params[2] number of cylinders
2766  */
2767 static int
2768 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2769     sector_t capacity, int params[])
2770 {
2771     int     heads;
2772     int     sectors;
2773     sector_t    cylinders;
2774     ulong       dummy;
2775 
2776     heads = 64;
2777     sectors = 32;
2778 
2779     dummy = heads * sectors;
2780     cylinders = capacity;
2781     sector_div(cylinders, dummy);
2782 
2783     /*
2784      * Handle extended translation size for logical drives
2785      * > 1Gb
2786      */
2787     if ((ulong)capacity >= 0x200000) {
2788         heads = 255;
2789         sectors = 63;
2790         dummy = heads * sectors;
2791         cylinders = capacity;
2792         sector_div(cylinders, dummy);
2793     }
2794 
2795     /* return result */
2796     params[0] = heads;
2797     params[1] = sectors;
2798     params[2] = cylinders;
2799 
2800     return 0;
2801 }
2802 
2803 /**
2804  * _scsih_response_code - translation of device response code
2805  * @ioc: per adapter object
2806  * @response_code: response code returned by the device
2807  */
2808 static void
2809 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2810 {
2811     char *desc;
2812 
2813     switch (response_code) {
2814     case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2815         desc = "task management request completed";
2816         break;
2817     case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2818         desc = "invalid frame";
2819         break;
2820     case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2821         desc = "task management request not supported";
2822         break;
2823     case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2824         desc = "task management request failed";
2825         break;
2826     case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2827         desc = "task management request succeeded";
2828         break;
2829     case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2830         desc = "invalid lun";
2831         break;
2832     case 0xA:
2833         desc = "overlapped tag attempted";
2834         break;
2835     case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2836         desc = "task queued, however not sent to target";
2837         break;
2838     default:
2839         desc = "unknown";
2840         break;
2841     }
2842     ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2843 }
2844 
2845 /**
2846  * _scsih_tm_done - tm completion routine
2847  * @ioc: per adapter object
2848  * @smid: system request message index
2849  * @msix_index: MSIX table index supplied by the OS
2850  * @reply: reply message frame(lower 32bit addr)
2851  * Context: none.
2852  *
2853  * The callback handler when using scsih_issue_tm.
2854  *
2855  * Return: 1 meaning mf should be freed from _base_interrupt
2856  *         0 means the mf is freed from this function.
2857  */
2858 static u8
2859 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2860 {
2861     MPI2DefaultReply_t *mpi_reply;
2862 
2863     if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2864         return 1;
2865     if (ioc->tm_cmds.smid != smid)
2866         return 1;
2867     ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2868     mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2869     if (mpi_reply) {
2870         memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2871         ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2872     }
2873     ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2874     complete(&ioc->tm_cmds.done);
2875     return 1;
2876 }
2877 
2878 /**
2879  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2880  * @ioc: per adapter object
2881  * @handle: device handle
2882  *
2883  * During taskmangement request, we need to freeze the device queue.
2884  */
2885 void
2886 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2887 {
2888     struct MPT3SAS_DEVICE *sas_device_priv_data;
2889     struct scsi_device *sdev;
2890     u8 skip = 0;
2891 
2892     shost_for_each_device(sdev, ioc->shost) {
2893         if (skip)
2894             continue;
2895         sas_device_priv_data = sdev->hostdata;
2896         if (!sas_device_priv_data)
2897             continue;
2898         if (sas_device_priv_data->sas_target->handle == handle) {
2899             sas_device_priv_data->sas_target->tm_busy = 1;
2900             skip = 1;
2901             ioc->ignore_loginfos = 1;
2902         }
2903     }
2904 }
2905 
2906 /**
2907  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2908  * @ioc: per adapter object
2909  * @handle: device handle
2910  *
2911  * During taskmangement request, we need to freeze the device queue.
2912  */
2913 void
2914 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2915 {
2916     struct MPT3SAS_DEVICE *sas_device_priv_data;
2917     struct scsi_device *sdev;
2918     u8 skip = 0;
2919 
2920     shost_for_each_device(sdev, ioc->shost) {
2921         if (skip)
2922             continue;
2923         sas_device_priv_data = sdev->hostdata;
2924         if (!sas_device_priv_data)
2925             continue;
2926         if (sas_device_priv_data->sas_target->handle == handle) {
2927             sas_device_priv_data->sas_target->tm_busy = 0;
2928             skip = 1;
2929             ioc->ignore_loginfos = 0;
2930         }
2931     }
2932 }
2933 
2934 /**
2935  * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2936  * @ioc: per adapter object
2937  * @channel: the channel assigned by the OS
2938  * @id: the id assigned by the OS
2939  * @lun: lun number
2940  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2941  * @smid_task: smid assigned to the task
2942  *
2943  * Look whether TM has aborted the timed out SCSI command, if
2944  * TM has aborted the IO then return SUCCESS else return FAILED.
2945  */
2946 static int
2947 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2948     uint id, uint lun, u8 type, u16 smid_task)
2949 {
2950 
2951     if (smid_task <= ioc->shost->can_queue) {
2952         switch (type) {
2953         case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2954             if (!(_scsih_scsi_lookup_find_by_target(ioc,
2955                 id, channel)))
2956                 return SUCCESS;
2957             break;
2958         case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2959         case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2960             if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2961                 lun, channel)))
2962                 return SUCCESS;
2963             break;
2964         default:
2965             return SUCCESS;
2966         }
2967     } else if (smid_task == ioc->scsih_cmds.smid) {
2968         if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2969             (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2970             return SUCCESS;
2971     } else if (smid_task == ioc->ctl_cmds.smid) {
2972         if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2973             (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2974             return SUCCESS;
2975     }
2976 
2977     return FAILED;
2978 }
2979 
2980 /**
2981  * scsih_tm_post_processing - post processing of target & LUN reset
2982  * @ioc: per adapter object
2983  * @handle: device handle
2984  * @channel: the channel assigned by the OS
2985  * @id: the id assigned by the OS
2986  * @lun: lun number
2987  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2988  * @smid_task: smid assigned to the task
2989  *
2990  * Post processing of target & LUN reset. Due to interrupt latency
2991  * issue it possible that interrupt for aborted IO might not be
2992  * received yet. So before returning failure status, poll the
2993  * reply descriptor pools for the reply of timed out SCSI command.
2994  * Return FAILED status if reply for timed out is not received
2995  * otherwise return SUCCESS.
2996  */
2997 static int
2998 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2999     uint channel, uint id, uint lun, u8 type, u16 smid_task)
3000 {
3001     int rc;
3002 
3003     rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3004     if (rc == SUCCESS)
3005         return rc;
3006 
3007     ioc_info(ioc,
3008         "Poll ReplyDescriptor queues for completion of"
3009         " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3010         smid_task, type, handle);
3011 
3012     /*
3013      * Due to interrupt latency issues, driver may receive interrupt for
3014      * TM first and then for aborted SCSI IO command. So, poll all the
3015      * ReplyDescriptor pools before returning the FAILED status to SML.
3016      */
3017     mpt3sas_base_mask_interrupts(ioc);
3018     mpt3sas_base_sync_reply_irqs(ioc, 1);
3019     mpt3sas_base_unmask_interrupts(ioc);
3020 
3021     return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3022 }
3023 
3024 /**
3025  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3026  * @ioc: per adapter struct
3027  * @handle: device handle
3028  * @channel: the channel assigned by the OS
3029  * @id: the id assigned by the OS
3030  * @lun: lun number
3031  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3032  * @smid_task: smid assigned to the task
3033  * @msix_task: MSIX table index supplied by the OS
3034  * @timeout: timeout in seconds
3035  * @tr_method: Target Reset Method
3036  * Context: user
3037  *
3038  * A generic API for sending task management requests to firmware.
3039  *
3040  * The callback index is set inside `ioc->tm_cb_idx`.
3041  * The caller is responsible to check for outstanding commands.
3042  *
3043  * Return: SUCCESS or FAILED.
3044  */
3045 int
3046 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3047     uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3048     u8 timeout, u8 tr_method)
3049 {
3050     Mpi2SCSITaskManagementRequest_t *mpi_request;
3051     Mpi2SCSITaskManagementReply_t *mpi_reply;
3052     Mpi25SCSIIORequest_t *request;
3053     u16 smid = 0;
3054     u32 ioc_state;
3055     int rc;
3056     u8 issue_reset = 0;
3057 
3058     lockdep_assert_held(&ioc->tm_cmds.mutex);
3059 
3060     if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3061         ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3062         return FAILED;
3063     }
3064 
3065     if (ioc->shost_recovery || ioc->remove_host ||
3066         ioc->pci_error_recovery) {
3067         ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3068         return FAILED;
3069     }
3070 
3071     ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3072     if (ioc_state & MPI2_DOORBELL_USED) {
3073         dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3074         rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3075         return (!rc) ? SUCCESS : FAILED;
3076     }
3077 
3078     if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3079         mpt3sas_print_fault_code(ioc, ioc_state &
3080             MPI2_DOORBELL_DATA_MASK);
3081         rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3082         return (!rc) ? SUCCESS : FAILED;
3083     } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3084         MPI2_IOC_STATE_COREDUMP) {
3085         mpt3sas_print_coredump_info(ioc, ioc_state &
3086             MPI2_DOORBELL_DATA_MASK);
3087         rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3088         return (!rc) ? SUCCESS : FAILED;
3089     }
3090 
3091     smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3092     if (!smid) {
3093         ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3094         return FAILED;
3095     }
3096 
3097     dtmprintk(ioc,
3098           ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3099                handle, type, smid_task, timeout, tr_method));
3100     ioc->tm_cmds.status = MPT3_CMD_PENDING;
3101     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3102     ioc->tm_cmds.smid = smid;
3103     memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3104     memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3105     mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3106     mpi_request->DevHandle = cpu_to_le16(handle);
3107     mpi_request->TaskType = type;
3108     if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3109         type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3110         mpi_request->MsgFlags = tr_method;
3111     mpi_request->TaskMID = cpu_to_le16(smid_task);
3112     int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3113     mpt3sas_scsih_set_tm_flag(ioc, handle);
3114     init_completion(&ioc->tm_cmds.done);
3115     ioc->put_smid_hi_priority(ioc, smid, msix_task);
3116     wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3117     if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3118         mpt3sas_check_cmd_timeout(ioc,
3119             ioc->tm_cmds.status, mpi_request,
3120             sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3121         if (issue_reset) {
3122             rc = mpt3sas_base_hard_reset_handler(ioc,
3123                     FORCE_BIG_HAMMER);
3124             rc = (!rc) ? SUCCESS : FAILED;
3125             goto out;
3126         }
3127     }
3128 
3129     /* sync IRQs in case those were busy during flush. */
3130     mpt3sas_base_sync_reply_irqs(ioc, 0);
3131 
3132     if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3133         mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3134         mpi_reply = ioc->tm_cmds.reply;
3135         dtmprintk(ioc,
3136               ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3137                    le16_to_cpu(mpi_reply->IOCStatus),
3138                    le32_to_cpu(mpi_reply->IOCLogInfo),
3139                    le32_to_cpu(mpi_reply->TerminationCount)));
3140         if (ioc->logging_level & MPT_DEBUG_TM) {
3141             _scsih_response_code(ioc, mpi_reply->ResponseCode);
3142             if (mpi_reply->IOCStatus)
3143                 _debug_dump_mf(mpi_request,
3144                     sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3145         }
3146     }
3147 
3148     switch (type) {
3149     case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3150         rc = SUCCESS;
3151         /*
3152          * If DevHandle filed in smid_task's entry of request pool
3153          * doesn't match with device handle on which this task abort
3154          * TM is received then it means that TM has successfully
3155          * aborted the timed out command. Since smid_task's entry in
3156          * request pool will be memset to zero once the timed out
3157          * command is returned to the SML. If the command is not
3158          * aborted then smid_task’s entry won’t be cleared and it
3159          * will have same DevHandle value on which this task abort TM
3160          * is received and driver will return the TM status as FAILED.
3161          */
3162         request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3163         if (le16_to_cpu(request->DevHandle) != handle)
3164             break;
3165 
3166         ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3167             "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3168             handle, timeout, tr_method, smid_task, msix_task);
3169         rc = FAILED;
3170         break;
3171 
3172     case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3173     case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3174     case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3175         rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3176             type, smid_task);
3177         break;
3178     case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3179         rc = SUCCESS;
3180         break;
3181     default:
3182         rc = FAILED;
3183         break;
3184     }
3185 
3186 out:
3187     mpt3sas_scsih_clear_tm_flag(ioc, handle);
3188     ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3189     return rc;
3190 }
3191 
3192 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3193         uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3194         u16 msix_task, u8 timeout, u8 tr_method)
3195 {
3196     int ret;
3197 
3198     mutex_lock(&ioc->tm_cmds.mutex);
3199     ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3200             smid_task, msix_task, timeout, tr_method);
3201     mutex_unlock(&ioc->tm_cmds.mutex);
3202 
3203     return ret;
3204 }
3205 
3206 /**
3207  * _scsih_tm_display_info - displays info about the device
3208  * @ioc: per adapter struct
3209  * @scmd: pointer to scsi command object
3210  *
3211  * Called by task management callback handlers.
3212  */
3213 static void
3214 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3215 {
3216     struct scsi_target *starget = scmd->device->sdev_target;
3217     struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3218     struct _sas_device *sas_device = NULL;
3219     struct _pcie_device *pcie_device = NULL;
3220     unsigned long flags;
3221     char *device_str = NULL;
3222 
3223     if (!priv_target)
3224         return;
3225     if (ioc->hide_ir_msg)
3226         device_str = "WarpDrive";
3227     else
3228         device_str = "volume";
3229 
3230     scsi_print_command(scmd);
3231     if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3232         starget_printk(KERN_INFO, starget,
3233             "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3234             device_str, priv_target->handle,
3235             device_str, (unsigned long long)priv_target->sas_address);
3236 
3237     } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3238         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3239         pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3240         if (pcie_device) {
3241             starget_printk(KERN_INFO, starget,
3242                 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3243                 pcie_device->handle,
3244                 (unsigned long long)pcie_device->wwid,
3245                 pcie_device->port_num);
3246             if (pcie_device->enclosure_handle != 0)
3247                 starget_printk(KERN_INFO, starget,
3248                     "enclosure logical id(0x%016llx), slot(%d)\n",
3249                     (unsigned long long)
3250                     pcie_device->enclosure_logical_id,
3251                     pcie_device->slot);
3252             if (pcie_device->connector_name[0] != '\0')
3253                 starget_printk(KERN_INFO, starget,
3254                     "enclosure level(0x%04x), connector name( %s)\n",
3255                     pcie_device->enclosure_level,
3256                     pcie_device->connector_name);
3257             pcie_device_put(pcie_device);
3258         }
3259         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3260 
3261     } else {
3262         spin_lock_irqsave(&ioc->sas_device_lock, flags);
3263         sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3264         if (sas_device) {
3265             if (priv_target->flags &
3266                 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3267                 starget_printk(KERN_INFO, starget,
3268                     "volume handle(0x%04x), "
3269                     "volume wwid(0x%016llx)\n",
3270                     sas_device->volume_handle,
3271                    (unsigned long long)sas_device->volume_wwid);
3272             }
3273             starget_printk(KERN_INFO, starget,
3274                 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3275                 sas_device->handle,
3276                 (unsigned long long)sas_device->sas_address,
3277                 sas_device->phy);
3278 
3279             _scsih_display_enclosure_chassis_info(NULL, sas_device,
3280                 NULL, starget);
3281 
3282             sas_device_put(sas_device);
3283         }
3284         spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3285     }
3286 }
3287 
3288 /**
3289  * scsih_abort - eh threads main abort routine
3290  * @scmd: pointer to scsi command object
3291  *
3292  * Return: SUCCESS if command aborted else FAILED
3293  */
3294 static int
3295 scsih_abort(struct scsi_cmnd *scmd)
3296 {
3297     struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3298     struct MPT3SAS_DEVICE *sas_device_priv_data;
3299     struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3300     u16 handle;
3301     int r;
3302 
3303     u8 timeout = 30;
3304     struct _pcie_device *pcie_device = NULL;
3305     sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3306         "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3307         scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3308         (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3309     _scsih_tm_display_info(ioc, scmd);
3310 
3311     sas_device_priv_data = scmd->device->hostdata;
3312     if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3313         ioc->remove_host) {
3314         sdev_printk(KERN_INFO, scmd->device,
3315             "device been deleted! scmd(0x%p)\n", scmd);
3316         scmd->result = DID_NO_CONNECT << 16;
3317         scsi_done(scmd);
3318         r = SUCCESS;
3319         goto out;
3320     }
3321 
3322     /* check for completed command */
3323     if (st == NULL || st->cb_idx == 0xFF) {
3324         sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3325             "driver, assuming scmd(0x%p) might have completed\n", scmd);
3326         scmd->result = DID_RESET << 16;
3327         r = SUCCESS;
3328         goto out;
3329     }
3330 
3331     /* for hidden raid components and volumes this is not supported */
3332     if (sas_device_priv_data->sas_target->flags &
3333         MPT_TARGET_FLAGS_RAID_COMPONENT ||
3334         sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3335         scmd->result = DID_RESET << 16;
3336         r = FAILED;
3337         goto out;
3338     }
3339 
3340     mpt3sas_halt_firmware(ioc);
3341 
3342     handle = sas_device_priv_data->sas_target->handle;
3343     pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3344     if (pcie_device && (!ioc->tm_custom_handling) &&
3345         (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3346         timeout = ioc->nvme_abort_timeout;
3347     r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3348         scmd->device->id, scmd->device->lun,
3349         MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3350         st->smid, st->msix_io, timeout, 0);
3351     /* Command must be cleared after abort */
3352     if (r == SUCCESS && st->cb_idx != 0xFF)
3353         r = FAILED;
3354  out:
3355     sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3356         ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3357     if (pcie_device)
3358         pcie_device_put(pcie_device);
3359     return r;
3360 }
3361 
3362 /**
3363  * scsih_dev_reset - eh threads main device reset routine
3364  * @scmd: pointer to scsi command object
3365  *
3366  * Return: SUCCESS if command aborted else FAILED
3367  */
3368 static int
3369 scsih_dev_reset(struct scsi_cmnd *scmd)
3370 {
3371     struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3372     struct MPT3SAS_DEVICE *sas_device_priv_data;
3373     struct _sas_device *sas_device = NULL;
3374     struct _pcie_device *pcie_device = NULL;
3375     u16 handle;
3376     u8  tr_method = 0;
3377     u8  tr_timeout = 30;
3378     int r;
3379 
3380     struct scsi_target *starget = scmd->device->sdev_target;
3381     struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3382 
3383     sdev_printk(KERN_INFO, scmd->device,
3384         "attempting device reset! scmd(0x%p)\n", scmd);
3385     _scsih_tm_display_info(ioc, scmd);
3386 
3387     sas_device_priv_data = scmd->device->hostdata;
3388     if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3389         ioc->remove_host) {
3390         sdev_printk(KERN_INFO, scmd->device,
3391             "device been deleted! scmd(0x%p)\n", scmd);
3392         scmd->result = DID_NO_CONNECT << 16;
3393         scsi_done(scmd);
3394         r = SUCCESS;
3395         goto out;
3396     }
3397 
3398     /* for hidden raid components obtain the volume_handle */
3399     handle = 0;
3400     if (sas_device_priv_data->sas_target->flags &
3401         MPT_TARGET_FLAGS_RAID_COMPONENT) {
3402         sas_device = mpt3sas_get_sdev_from_target(ioc,
3403                 target_priv_data);
3404         if (sas_device)
3405             handle = sas_device->volume_handle;
3406     } else
3407         handle = sas_device_priv_data->sas_target->handle;
3408 
3409     if (!handle) {
3410         scmd->result = DID_RESET << 16;
3411         r = FAILED;
3412         goto out;
3413     }
3414 
3415     pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3416 
3417     if (pcie_device && (!ioc->tm_custom_handling) &&
3418         (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3419         tr_timeout = pcie_device->reset_timeout;
3420         tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3421     } else
3422         tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3423 
3424     r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3425         scmd->device->id, scmd->device->lun,
3426         MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3427         tr_timeout, tr_method);
3428     /* Check for busy commands after reset */
3429     if (r == SUCCESS && scsi_device_busy(scmd->device))
3430         r = FAILED;
3431  out:
3432     sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3433         ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3434 
3435     if (sas_device)
3436         sas_device_put(sas_device);
3437     if (pcie_device)
3438         pcie_device_put(pcie_device);
3439 
3440     return r;
3441 }
3442 
3443 /**
3444  * scsih_target_reset - eh threads main target reset routine
3445  * @scmd: pointer to scsi command object
3446  *
3447  * Return: SUCCESS if command aborted else FAILED
3448  */
3449 static int
3450 scsih_target_reset(struct scsi_cmnd *scmd)
3451 {
3452     struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3453     struct MPT3SAS_DEVICE *sas_device_priv_data;
3454     struct _sas_device *sas_device = NULL;
3455     struct _pcie_device *pcie_device = NULL;
3456     u16 handle;
3457     u8  tr_method = 0;
3458     u8  tr_timeout = 30;
3459     int r;
3460     struct scsi_target *starget = scmd->device->sdev_target;
3461     struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3462 
3463     starget_printk(KERN_INFO, starget,
3464         "attempting target reset! scmd(0x%p)\n", scmd);
3465     _scsih_tm_display_info(ioc, scmd);
3466 
3467     sas_device_priv_data = scmd->device->hostdata;
3468     if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3469         ioc->remove_host) {
3470         starget_printk(KERN_INFO, starget,
3471             "target been deleted! scmd(0x%p)\n", scmd);
3472         scmd->result = DID_NO_CONNECT << 16;
3473         scsi_done(scmd);
3474         r = SUCCESS;
3475         goto out;
3476     }
3477 
3478     /* for hidden raid components obtain the volume_handle */
3479     handle = 0;
3480     if (sas_device_priv_data->sas_target->flags &
3481         MPT_TARGET_FLAGS_RAID_COMPONENT) {
3482         sas_device = mpt3sas_get_sdev_from_target(ioc,
3483                 target_priv_data);
3484         if (sas_device)
3485             handle = sas_device->volume_handle;
3486     } else
3487         handle = sas_device_priv_data->sas_target->handle;
3488 
3489     if (!handle) {
3490         scmd->result = DID_RESET << 16;
3491         r = FAILED;
3492         goto out;
3493     }
3494 
3495     pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3496 
3497     if (pcie_device && (!ioc->tm_custom_handling) &&
3498         (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3499         tr_timeout = pcie_device->reset_timeout;
3500         tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3501     } else
3502         tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3503     r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3504         scmd->device->id, 0,
3505         MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3506         tr_timeout, tr_method);
3507     /* Check for busy commands after reset */
3508     if (r == SUCCESS && atomic_read(&starget->target_busy))
3509         r = FAILED;
3510  out:
3511     starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3512         ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3513 
3514     if (sas_device)
3515         sas_device_put(sas_device);
3516     if (pcie_device)
3517         pcie_device_put(pcie_device);
3518     return r;
3519 }
3520 
3521 
3522 /**
3523  * scsih_host_reset - eh threads main host reset routine
3524  * @scmd: pointer to scsi command object
3525  *
3526  * Return: SUCCESS if command aborted else FAILED
3527  */
3528 static int
3529 scsih_host_reset(struct scsi_cmnd *scmd)
3530 {
3531     struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3532     int r, retval;
3533 
3534     ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3535     scsi_print_command(scmd);
3536 
3537     if (ioc->is_driver_loading || ioc->remove_host) {
3538         ioc_info(ioc, "Blocking the host reset\n");
3539         r = FAILED;
3540         goto out;
3541     }
3542 
3543     retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3544     r = (retval < 0) ? FAILED : SUCCESS;
3545 out:
3546     ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3547          r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3548 
3549     return r;
3550 }
3551 
3552 /**
3553  * _scsih_fw_event_add - insert and queue up fw_event
3554  * @ioc: per adapter object
3555  * @fw_event: object describing the event
3556  * Context: This function will acquire ioc->fw_event_lock.
3557  *
3558  * This adds the firmware event object into link list, then queues it up to
3559  * be processed from user context.
3560  */
3561 static void
3562 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3563 {
3564     unsigned long flags;
3565 
3566     if (ioc->firmware_event_thread == NULL)
3567         return;
3568 
3569     spin_lock_irqsave(&ioc->fw_event_lock, flags);
3570     fw_event_work_get(fw_event);
3571     INIT_LIST_HEAD(&fw_event->list);
3572     list_add_tail(&fw_event->list, &ioc->fw_event_list);
3573     INIT_WORK(&fw_event->work, _firmware_event_work);
3574     fw_event_work_get(fw_event);
3575     queue_work(ioc->firmware_event_thread, &fw_event->work);
3576     spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3577 }
3578 
3579 /**
3580  * _scsih_fw_event_del_from_list - delete fw_event from the list
3581  * @ioc: per adapter object
3582  * @fw_event: object describing the event
3583  * Context: This function will acquire ioc->fw_event_lock.
3584  *
3585  * If the fw_event is on the fw_event_list, remove it and do a put.
3586  */
3587 static void
3588 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3589     *fw_event)
3590 {
3591     unsigned long flags;
3592 
3593     spin_lock_irqsave(&ioc->fw_event_lock, flags);
3594     if (!list_empty(&fw_event->list)) {
3595         list_del_init(&fw_event->list);
3596         fw_event_work_put(fw_event);
3597     }
3598     spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3599 }
3600 
3601 
3602  /**
3603  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3604  * @ioc: per adapter object
3605  * @event_data: trigger event data
3606  */
3607 void
3608 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3609     struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3610 {
3611     struct fw_event_work *fw_event;
3612     u16 sz;
3613 
3614     if (ioc->is_driver_loading)
3615         return;
3616     sz = sizeof(*event_data);
3617     fw_event = alloc_fw_event_work(sz);
3618     if (!fw_event)
3619         return;
3620     fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3621     fw_event->ioc = ioc;
3622     memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3623     _scsih_fw_event_add(ioc, fw_event);
3624     fw_event_work_put(fw_event);
3625 }
3626 
3627 /**
3628  * _scsih_error_recovery_delete_devices - remove devices not responding
3629  * @ioc: per adapter object
3630  */
3631 static void
3632 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3633 {
3634     struct fw_event_work *fw_event;
3635 
3636     fw_event = alloc_fw_event_work(0);
3637     if (!fw_event)
3638         return;
3639     fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3640     fw_event->ioc = ioc;
3641     _scsih_fw_event_add(ioc, fw_event);
3642     fw_event_work_put(fw_event);
3643 }
3644 
3645 /**
3646  * mpt3sas_port_enable_complete - port enable completed (fake event)
3647  * @ioc: per adapter object
3648  */
3649 void
3650 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3651 {
3652     struct fw_event_work *fw_event;
3653 
3654     fw_event = alloc_fw_event_work(0);
3655     if (!fw_event)
3656         return;
3657     fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3658     fw_event->ioc = ioc;
3659     _scsih_fw_event_add(ioc, fw_event);
3660     fw_event_work_put(fw_event);
3661 }
3662 
3663 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3664 {
3665     unsigned long flags;
3666     struct fw_event_work *fw_event = NULL;
3667 
3668     spin_lock_irqsave(&ioc->fw_event_lock, flags);
3669     if (!list_empty(&ioc->fw_event_list)) {
3670         fw_event = list_first_entry(&ioc->fw_event_list,
3671                 struct fw_event_work, list);
3672         list_del_init(&fw_event->list);
3673         fw_event_work_put(fw_event);
3674     }
3675     spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3676 
3677     return fw_event;
3678 }
3679 
3680 /**
3681  * _scsih_fw_event_cleanup_queue - cleanup event queue
3682  * @ioc: per adapter object
3683  *
3684  * Walk the firmware event queue, either killing timers, or waiting
3685  * for outstanding events to complete
3686  *
3687  * Context: task, can sleep
3688  */
3689 static void
3690 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3691 {
3692     struct fw_event_work *fw_event;
3693 
3694     if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3695         !ioc->firmware_event_thread)
3696         return;
3697     /*
3698      * Set current running event as ignore, so that
3699      * current running event will exit quickly.
3700      * As diag reset has occurred it is of no use
3701      * to process remaining stale event data entries.
3702      */
3703     if (ioc->shost_recovery && ioc->current_event)
3704         ioc->current_event->ignore = 1;
3705 
3706     ioc->fw_events_cleanup = 1;
3707     while ((fw_event = dequeue_next_fw_event(ioc)) ||
3708          (fw_event = ioc->current_event)) {
3709 
3710         /*
3711          * Don't call cancel_work_sync() for current_event
3712          * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3713          * otherwise we may observe deadlock if current
3714          * hard reset issued as part of processing the current_event.
3715          *
3716          * Orginal logic of cleaning the current_event is added
3717          * for handling the back to back host reset issued by the user.
3718          * i.e. during back to back host reset, driver use to process
3719          * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3720          * event back to back and this made the drives to unregister
3721          * the devices from SML.
3722          */
3723 
3724         if (fw_event == ioc->current_event &&
3725             ioc->current_event->event !=
3726             MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3727             ioc->current_event = NULL;
3728             continue;
3729         }
3730 
3731         /*
3732          * Driver has to clear ioc->start_scan flag when
3733          * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3734          * otherwise scsi_scan_host() API waits for the
3735          * 5 minute timer to expire. If we exit from
3736          * scsi_scan_host() early then we can issue the
3737          * new port enable request as part of current diag reset.
3738          */
3739         if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3740             ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3741             ioc->start_scan = 0;
3742         }
3743 
3744         /*
3745          * Wait on the fw_event to complete. If this returns 1, then
3746          * the event was never executed, and we need a put for the
3747          * reference the work had on the fw_event.
3748          *
3749          * If it did execute, we wait for it to finish, and the put will
3750          * happen from _firmware_event_work()
3751          */
3752         if (cancel_work_sync(&fw_event->work))
3753             fw_event_work_put(fw_event);
3754 
3755     }
3756     ioc->fw_events_cleanup = 0;
3757 }
3758 
3759 /**
3760  * _scsih_internal_device_block - block the sdev device
3761  * @sdev: per device object
3762  * @sas_device_priv_data : per device driver private data
3763  *
3764  * make sure device is blocked without error, if not
3765  * print an error
3766  */
3767 static void
3768 _scsih_internal_device_block(struct scsi_device *sdev,
3769             struct MPT3SAS_DEVICE *sas_device_priv_data)
3770 {
3771     int r = 0;
3772 
3773     sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3774         sas_device_priv_data->sas_target->handle);
3775     sas_device_priv_data->block = 1;
3776 
3777     r = scsi_internal_device_block_nowait(sdev);
3778     if (r == -EINVAL)
3779         sdev_printk(KERN_WARNING, sdev,
3780             "device_block failed with return(%d) for handle(0x%04x)\n",
3781             r, sas_device_priv_data->sas_target->handle);
3782 }
3783 
3784 /**
3785  * _scsih_internal_device_unblock - unblock the sdev device
3786  * @sdev: per device object
3787  * @sas_device_priv_data : per device driver private data
3788  * make sure device is unblocked without error, if not retry
3789  * by blocking and then unblocking
3790  */
3791 
3792 static void
3793 _scsih_internal_device_unblock(struct scsi_device *sdev,
3794             struct MPT3SAS_DEVICE *sas_device_priv_data)
3795 {
3796     int r = 0;
3797 
3798     sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3799         "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3800     sas_device_priv_data->block = 0;
3801     r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3802     if (r == -EINVAL) {
3803         /* The device has been set to SDEV_RUNNING by SD layer during
3804          * device addition but the request queue is still stopped by
3805          * our earlier block call. We need to perform a block again
3806          * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3807 
3808         sdev_printk(KERN_WARNING, sdev,
3809             "device_unblock failed with return(%d) for handle(0x%04x) "
3810             "performing a block followed by an unblock\n",
3811             r, sas_device_priv_data->sas_target->handle);
3812         sas_device_priv_data->block = 1;
3813         r = scsi_internal_device_block_nowait(sdev);
3814         if (r)
3815             sdev_printk(KERN_WARNING, sdev, "retried device_block "
3816                 "failed with return(%d) for handle(0x%04x)\n",
3817                 r, sas_device_priv_data->sas_target->handle);
3818 
3819         sas_device_priv_data->block = 0;
3820         r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3821         if (r)
3822             sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3823                 " failed with return(%d) for handle(0x%04x)\n",
3824                 r, sas_device_priv_data->sas_target->handle);
3825     }
3826 }
3827 
3828 /**
3829  * _scsih_ublock_io_all_device - unblock every device
3830  * @ioc: per adapter object
3831  *
3832  * change the device state from block to running
3833  */
3834 static void
3835 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3836 {
3837     struct MPT3SAS_DEVICE *sas_device_priv_data;
3838     struct scsi_device *sdev;
3839 
3840     shost_for_each_device(sdev, ioc->shost) {
3841         sas_device_priv_data = sdev->hostdata;
3842         if (!sas_device_priv_data)
3843             continue;
3844         if (!sas_device_priv_data->block)
3845             continue;
3846 
3847         dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3848             "device_running, handle(0x%04x)\n",
3849             sas_device_priv_data->sas_target->handle));
3850         _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3851     }
3852 }
3853 
3854 
3855 /**
3856  * _scsih_ublock_io_device - prepare device to be deleted
3857  * @ioc: per adapter object
3858  * @sas_address: sas address
3859  * @port: hba port entry
3860  *
3861  * unblock then put device in offline state
3862  */
3863 static void
3864 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3865     u64 sas_address, struct hba_port *port)
3866 {
3867     struct MPT3SAS_DEVICE *sas_device_priv_data;
3868     struct scsi_device *sdev;
3869 
3870     shost_for_each_device(sdev, ioc->shost) {
3871         sas_device_priv_data = sdev->hostdata;
3872         if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3873             continue;
3874         if (sas_device_priv_data->sas_target->sas_address
3875             != sas_address)
3876             continue;
3877         if (sas_device_priv_data->sas_target->port != port)
3878             continue;
3879         if (sas_device_priv_data->block)
3880             _scsih_internal_device_unblock(sdev,
3881                 sas_device_priv_data);
3882     }
3883 }
3884 
3885 /**
3886  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3887  * @ioc: per adapter object
3888  *
3889  * During device pull we need to appropriately set the sdev state.
3890  */
3891 static void
3892 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3893 {
3894     struct MPT3SAS_DEVICE *sas_device_priv_data;
3895     struct scsi_device *sdev;
3896 
3897     shost_for_each_device(sdev, ioc->shost) {
3898         sas_device_priv_data = sdev->hostdata;
3899         if (!sas_device_priv_data)
3900             continue;
3901         if (sas_device_priv_data->block)
3902             continue;
3903         if (sas_device_priv_data->ignore_delay_remove) {
3904             sdev_printk(KERN_INFO, sdev,
3905             "%s skip device_block for SES handle(0x%04x)\n",
3906             __func__, sas_device_priv_data->sas_target->handle);
3907             continue;
3908         }
3909         _scsih_internal_device_block(sdev, sas_device_priv_data);
3910     }
3911 }
3912 
3913 /**
3914  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3915  * @ioc: per adapter object
3916  * @handle: device handle
3917  *
3918  * During device pull we need to appropriately set the sdev state.
3919  */
3920 static void
3921 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3922 {
3923     struct MPT3SAS_DEVICE *sas_device_priv_data;
3924     struct scsi_device *sdev;
3925     struct _sas_device *sas_device;
3926 
3927     sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3928 
3929     shost_for_each_device(sdev, ioc->shost) {
3930         sas_device_priv_data = sdev->hostdata;
3931         if (!sas_device_priv_data)
3932             continue;
3933         if (sas_device_priv_data->sas_target->handle != handle)
3934             continue;
3935         if (sas_device_priv_data->block)
3936             continue;
3937         if (sas_device && sas_device->pend_sas_rphy_add)
3938             continue;
3939         if (sas_device_priv_data->ignore_delay_remove) {
3940             sdev_printk(KERN_INFO, sdev,
3941             "%s skip device_block for SES handle(0x%04x)\n",
3942             __func__, sas_device_priv_data->sas_target->handle);
3943             continue;
3944         }
3945         _scsih_internal_device_block(sdev, sas_device_priv_data);
3946     }
3947 
3948     if (sas_device)
3949         sas_device_put(sas_device);
3950 }
3951 
3952 /**
3953  * _scsih_block_io_to_children_attached_to_ex
3954  * @ioc: per adapter object
3955  * @sas_expander: the sas_device object
3956  *
3957  * This routine set sdev state to SDEV_BLOCK for all devices
3958  * attached to this expander. This function called when expander is
3959  * pulled.
3960  */
3961 static void
3962 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3963     struct _sas_node *sas_expander)
3964 {
3965     struct _sas_port *mpt3sas_port;
3966     struct _sas_device *sas_device;
3967     struct _sas_node *expander_sibling;
3968     unsigned long flags;
3969 
3970     if (!sas_expander)
3971         return;
3972 
3973     list_for_each_entry(mpt3sas_port,
3974        &sas_expander->sas_port_list, port_list) {
3975         if (mpt3sas_port->remote_identify.device_type ==
3976             SAS_END_DEVICE) {
3977             spin_lock_irqsave(&ioc->sas_device_lock, flags);
3978             sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3979                 mpt3sas_port->remote_identify.sas_address,
3980                 mpt3sas_port->hba_port);
3981             if (sas_device) {
3982                 set_bit(sas_device->handle,
3983                         ioc->blocking_handles);
3984                 sas_device_put(sas_device);
3985             }
3986             spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3987         }
3988     }
3989 
3990     list_for_each_entry(mpt3sas_port,
3991        &sas_expander->sas_port_list, port_list) {
3992 
3993         if (mpt3sas_port->remote_identify.device_type ==
3994             SAS_EDGE_EXPANDER_DEVICE ||
3995             mpt3sas_port->remote_identify.device_type ==
3996             SAS_FANOUT_EXPANDER_DEVICE) {
3997             expander_sibling =
3998                 mpt3sas_scsih_expander_find_by_sas_address(
3999                 ioc, mpt3sas_port->remote_identify.sas_address,
4000                 mpt3sas_port->hba_port);
4001             _scsih_block_io_to_children_attached_to_ex(ioc,
4002                 expander_sibling);
4003         }
4004     }
4005 }
4006 
4007 /**
4008  * _scsih_block_io_to_children_attached_directly
4009  * @ioc: per adapter object
4010  * @event_data: topology change event data
4011  *
4012  * This routine set sdev state to SDEV_BLOCK for all devices
4013  * direct attached during device pull.
4014  */
4015 static void
4016 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4017     Mpi2EventDataSasTopologyChangeList_t *event_data)
4018 {
4019     int i;
4020     u16 handle;
4021     u16 reason_code;
4022 
4023     for (i = 0; i < event_data->NumEntries; i++) {
4024         handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4025         if (!handle)
4026             continue;
4027         reason_code = event_data->PHY[i].PhyStatus &
4028             MPI2_EVENT_SAS_TOPO_RC_MASK;
4029         if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4030             _scsih_block_io_device(ioc, handle);
4031     }
4032 }
4033 
4034 /**
4035  * _scsih_block_io_to_pcie_children_attached_directly
4036  * @ioc: per adapter object
4037  * @event_data: topology change event data
4038  *
4039  * This routine set sdev state to SDEV_BLOCK for all devices
4040  * direct attached during device pull/reconnect.
4041  */
4042 static void
4043 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4044         Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4045 {
4046     int i;
4047     u16 handle;
4048     u16 reason_code;
4049 
4050     for (i = 0; i < event_data->NumEntries; i++) {
4051         handle =
4052             le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4053         if (!handle)
4054             continue;
4055         reason_code = event_data->PortEntry[i].PortStatus;
4056         if (reason_code ==
4057                 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4058             _scsih_block_io_device(ioc, handle);
4059     }
4060 }
4061 /**
4062  * _scsih_tm_tr_send - send task management request
4063  * @ioc: per adapter object
4064  * @handle: device handle
4065  * Context: interrupt time.
4066  *
4067  * This code is to initiate the device removal handshake protocol
4068  * with controller firmware.  This function will issue target reset
4069  * using high priority request queue.  It will send a sas iounit
4070  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4071  *
4072  * This is designed to send muliple task management request at the same
4073  * time to the fifo. If the fifo is full, we will append the request,
4074  * and process it in a future completion.
4075  */
4076 static void
4077 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4078 {
4079     Mpi2SCSITaskManagementRequest_t *mpi_request;
4080     u16 smid;
4081     struct _sas_device *sas_device = NULL;
4082     struct _pcie_device *pcie_device = NULL;
4083     struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4084     u64 sas_address = 0;
4085     unsigned long flags;
4086     struct _tr_list *delayed_tr;
4087     u32 ioc_state;
4088     u8 tr_method = 0;
4089     struct hba_port *port = NULL;
4090 
4091     if (ioc->pci_error_recovery) {
4092         dewtprintk(ioc,
4093                ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4094                     __func__, handle));
4095         return;
4096     }
4097     ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4098     if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4099         dewtprintk(ioc,
4100                ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4101                     __func__, handle));
4102         return;
4103     }
4104 
4105     /* if PD, then return */
4106     if (test_bit(handle, ioc->pd_handles))
4107         return;
4108 
4109     clear_bit(handle, ioc->pend_os_device_add);
4110 
4111     spin_lock_irqsave(&ioc->sas_device_lock, flags);
4112     sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4113     if (sas_device && sas_device->starget &&
4114         sas_device->starget->hostdata) {
4115         sas_target_priv_data = sas_device->starget->hostdata;
4116         sas_target_priv_data->deleted = 1;
4117         sas_address = sas_device->sas_address;
4118         port = sas_device->port;
4119     }
4120     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4121     if (!sas_device) {
4122         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4123         pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4124         if (pcie_device && pcie_device->starget &&
4125             pcie_device->starget->hostdata) {
4126             sas_target_priv_data = pcie_device->starget->hostdata;
4127             sas_target_priv_data->deleted = 1;
4128             sas_address = pcie_device->wwid;
4129         }
4130         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4131         if (pcie_device && (!ioc->tm_custom_handling) &&
4132             (!(mpt3sas_scsih_is_pcie_scsi_device(
4133             pcie_device->device_info))))
4134             tr_method =
4135                 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4136         else
4137             tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4138     }
4139     if (sas_target_priv_data) {
4140         dewtprintk(ioc,
4141                ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4142                     handle, (u64)sas_address));
4143         if (sas_device) {
4144             if (sas_device->enclosure_handle != 0)
4145                 dewtprintk(ioc,
4146                        ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4147                             (u64)sas_device->enclosure_logical_id,
4148                             sas_device->slot));
4149             if (sas_device->connector_name[0] != '\0')
4150                 dewtprintk(ioc,
4151                        ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4152                             sas_device->enclosure_level,
4153                             sas_device->connector_name));
4154         } else if (pcie_device) {
4155             if (pcie_device->enclosure_handle != 0)
4156                 dewtprintk(ioc,
4157                        ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4158                             (u64)pcie_device->enclosure_logical_id,
4159                             pcie_device->slot));
4160             if (pcie_device->connector_name[0] != '\0')
4161                 dewtprintk(ioc,
4162                        ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4163                             pcie_device->enclosure_level,
4164                             pcie_device->connector_name));
4165         }
4166         _scsih_ublock_io_device(ioc, sas_address, port);
4167         sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4168     }
4169 
4170     smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4171     if (!smid) {
4172         delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4173         if (!delayed_tr)
4174             goto out;
4175         INIT_LIST_HEAD(&delayed_tr->list);
4176         delayed_tr->handle = handle;
4177         list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4178         dewtprintk(ioc,
4179                ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4180                     handle));
4181         goto out;
4182     }
4183 
4184     dewtprintk(ioc,
4185            ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4186                 handle, smid, ioc->tm_tr_cb_idx));
4187     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4188     memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4189     mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4190     mpi_request->DevHandle = cpu_to_le16(handle);
4191     mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4192     mpi_request->MsgFlags = tr_method;
4193     set_bit(handle, ioc->device_remove_in_progress);
4194     ioc->put_smid_hi_priority(ioc, smid, 0);
4195     mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4196 
4197 out:
4198     if (sas_device)
4199         sas_device_put(sas_device);
4200     if (pcie_device)
4201         pcie_device_put(pcie_device);
4202 }
4203 
4204 /**
4205  * _scsih_tm_tr_complete -
4206  * @ioc: per adapter object
4207  * @smid: system request message index
4208  * @msix_index: MSIX table index supplied by the OS
4209  * @reply: reply message frame(lower 32bit addr)
4210  * Context: interrupt time.
4211  *
4212  * This is the target reset completion routine.
4213  * This code is part of the code to initiate the device removal
4214  * handshake protocol with controller firmware.
4215  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4216  *
4217  * Return: 1 meaning mf should be freed from _base_interrupt
4218  *         0 means the mf is freed from this function.
4219  */
4220 static u8
4221 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4222     u32 reply)
4223 {
4224     u16 handle;
4225     Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4226     Mpi2SCSITaskManagementReply_t *mpi_reply =
4227         mpt3sas_base_get_reply_virt_addr(ioc, reply);
4228     Mpi2SasIoUnitControlRequest_t *mpi_request;
4229     u16 smid_sas_ctrl;
4230     u32 ioc_state;
4231     struct _sc_list *delayed_sc;
4232 
4233     if (ioc->pci_error_recovery) {
4234         dewtprintk(ioc,
4235                ioc_info(ioc, "%s: host in pci error recovery\n",
4236                     __func__));
4237         return 1;
4238     }
4239     ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4240     if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4241         dewtprintk(ioc,
4242                ioc_info(ioc, "%s: host is not operational\n",
4243                     __func__));
4244         return 1;
4245     }
4246     if (unlikely(!mpi_reply)) {
4247         ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4248             __FILE__, __LINE__, __func__);
4249         return 1;
4250     }
4251     mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4252     handle = le16_to_cpu(mpi_request_tm->DevHandle);
4253     if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4254         dewtprintk(ioc,
4255                ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4256                    handle,
4257                    le16_to_cpu(mpi_reply->DevHandle), smid));
4258         return 0;
4259     }
4260 
4261     mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4262     dewtprintk(ioc,
4263            ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4264                 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4265                 le32_to_cpu(mpi_reply->IOCLogInfo),
4266                 le32_to_cpu(mpi_reply->TerminationCount)));
4267 
4268     smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4269     if (!smid_sas_ctrl) {
4270         delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4271         if (!delayed_sc)
4272             return _scsih_check_for_pending_tm(ioc, smid);
4273         INIT_LIST_HEAD(&delayed_sc->list);
4274         delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4275         list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4276         dewtprintk(ioc,
4277                ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4278                     handle));
4279         return _scsih_check_for_pending_tm(ioc, smid);
4280     }
4281 
4282     dewtprintk(ioc,
4283            ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4284                 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4285     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4286     memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4287     mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4288     mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4289     mpi_request->DevHandle = mpi_request_tm->DevHandle;
4290     ioc->put_smid_default(ioc, smid_sas_ctrl);
4291 
4292     return _scsih_check_for_pending_tm(ioc, smid);
4293 }
4294 
4295 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4296  *               issue to IOC or not.
4297  * @ioc: per adapter object
4298  * @scmd: pointer to scsi command object
4299  *
4300  * Returns true if scmd can be issued to IOC otherwise returns false.
4301  */
4302 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4303     struct scsi_cmnd *scmd)
4304 {
4305 
4306     if (ioc->pci_error_recovery)
4307         return false;
4308 
4309     if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4310         if (ioc->remove_host)
4311             return false;
4312 
4313         return true;
4314     }
4315 
4316     if (ioc->remove_host) {
4317 
4318         switch (scmd->cmnd[0]) {
4319         case SYNCHRONIZE_CACHE:
4320         case START_STOP:
4321             return true;
4322         default:
4323             return false;
4324         }
4325     }
4326 
4327     return true;
4328 }
4329 
4330 /**
4331  * _scsih_sas_control_complete - completion routine
4332  * @ioc: per adapter object
4333  * @smid: system request message index
4334  * @msix_index: MSIX table index supplied by the OS
4335  * @reply: reply message frame(lower 32bit addr)
4336  * Context: interrupt time.
4337  *
4338  * This is the sas iounit control completion routine.
4339  * This code is part of the code to initiate the device removal
4340  * handshake protocol with controller firmware.
4341  *
4342  * Return: 1 meaning mf should be freed from _base_interrupt
4343  *         0 means the mf is freed from this function.
4344  */
4345 static u8
4346 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4347     u8 msix_index, u32 reply)
4348 {
4349     Mpi2SasIoUnitControlReply_t *mpi_reply =
4350         mpt3sas_base_get_reply_virt_addr(ioc, reply);
4351 
4352     if (likely(mpi_reply)) {
4353         dewtprintk(ioc,
4354                ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4355                     le16_to_cpu(mpi_reply->DevHandle), smid,
4356                     le16_to_cpu(mpi_reply->IOCStatus),
4357                     le32_to_cpu(mpi_reply->IOCLogInfo)));
4358         if (le16_to_cpu(mpi_reply->IOCStatus) ==
4359              MPI2_IOCSTATUS_SUCCESS) {
4360             clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4361                 ioc->device_remove_in_progress);
4362         }
4363     } else {
4364         ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4365             __FILE__, __LINE__, __func__);
4366     }
4367     return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4368 }
4369 
4370 /**
4371  * _scsih_tm_tr_volume_send - send target reset request for volumes
4372  * @ioc: per adapter object
4373  * @handle: device handle
4374  * Context: interrupt time.
4375  *
4376  * This is designed to send muliple task management request at the same
4377  * time to the fifo. If the fifo is full, we will append the request,
4378  * and process it in a future completion.
4379  */
4380 static void
4381 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4382 {
4383     Mpi2SCSITaskManagementRequest_t *mpi_request;
4384     u16 smid;
4385     struct _tr_list *delayed_tr;
4386 
4387     if (ioc->pci_error_recovery) {
4388         dewtprintk(ioc,
4389                ioc_info(ioc, "%s: host reset in progress!\n",
4390                     __func__));
4391         return;
4392     }
4393 
4394     smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4395     if (!smid) {
4396         delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4397         if (!delayed_tr)
4398             return;
4399         INIT_LIST_HEAD(&delayed_tr->list);
4400         delayed_tr->handle = handle;
4401         list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4402         dewtprintk(ioc,
4403                ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4404                     handle));
4405         return;
4406     }
4407 
4408     dewtprintk(ioc,
4409            ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4410                 handle, smid, ioc->tm_tr_volume_cb_idx));
4411     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4412     memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4413     mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4414     mpi_request->DevHandle = cpu_to_le16(handle);
4415     mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4416     ioc->put_smid_hi_priority(ioc, smid, 0);
4417 }
4418 
4419 /**
4420  * _scsih_tm_volume_tr_complete - target reset completion
4421  * @ioc: per adapter object
4422  * @smid: system request message index
4423  * @msix_index: MSIX table index supplied by the OS
4424  * @reply: reply message frame(lower 32bit addr)
4425  * Context: interrupt time.
4426  *
4427  * Return: 1 meaning mf should be freed from _base_interrupt
4428  *         0 means the mf is freed from this function.
4429  */
4430 static u8
4431 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4432     u8 msix_index, u32 reply)
4433 {
4434     u16 handle;
4435     Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4436     Mpi2SCSITaskManagementReply_t *mpi_reply =
4437         mpt3sas_base_get_reply_virt_addr(ioc, reply);
4438 
4439     if (ioc->shost_recovery || ioc->pci_error_recovery) {
4440         dewtprintk(ioc,
4441                ioc_info(ioc, "%s: host reset in progress!\n",
4442                     __func__));
4443         return 1;
4444     }
4445     if (unlikely(!mpi_reply)) {
4446         ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4447             __FILE__, __LINE__, __func__);
4448         return 1;
4449     }
4450 
4451     mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4452     handle = le16_to_cpu(mpi_request_tm->DevHandle);
4453     if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4454         dewtprintk(ioc,
4455                ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4456                    handle, le16_to_cpu(mpi_reply->DevHandle),
4457                    smid));
4458         return 0;
4459     }
4460 
4461     dewtprintk(ioc,
4462            ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4463                 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4464                 le32_to_cpu(mpi_reply->IOCLogInfo),
4465                 le32_to_cpu(mpi_reply->TerminationCount)));
4466 
4467     return _scsih_check_for_pending_tm(ioc, smid);
4468 }
4469 
4470 /**
4471  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4472  * @ioc: per adapter object
4473  * @smid: system request message index
4474  * @event: Event ID
4475  * @event_context: used to track events uniquely
4476  *
4477  * Context - processed in interrupt context.
4478  */
4479 static void
4480 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4481                 U32 event_context)
4482 {
4483     Mpi2EventAckRequest_t *ack_request;
4484     int i = smid - ioc->internal_smid;
4485     unsigned long flags;
4486 
4487     /* Without releasing the smid just update the
4488      * call back index and reuse the same smid for
4489      * processing this delayed request
4490      */
4491     spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4492     ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4493     spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4494 
4495     dewtprintk(ioc,
4496            ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4497                 le16_to_cpu(event), smid, ioc->base_cb_idx));
4498     ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4499     memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4500     ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4501     ack_request->Event = event;
4502     ack_request->EventContext = event_context;
4503     ack_request->VF_ID = 0;  /* TODO */
4504     ack_request->VP_ID = 0;
4505     ioc->put_smid_default(ioc, smid);
4506 }
4507 
4508 /**
4509  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4510  *              sas_io_unit_ctrl messages
4511  * @ioc: per adapter object
4512  * @smid: system request message index
4513  * @handle: device handle
4514  *
4515  * Context - processed in interrupt context.
4516  */
4517 static void
4518 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4519                     u16 smid, u16 handle)
4520 {
4521     Mpi2SasIoUnitControlRequest_t *mpi_request;
4522     u32 ioc_state;
4523     int i = smid - ioc->internal_smid;
4524     unsigned long flags;
4525 
4526     if (ioc->remove_host) {
4527         dewtprintk(ioc,
4528                ioc_info(ioc, "%s: host has been removed\n",
4529                     __func__));
4530         return;
4531     } else if (ioc->pci_error_recovery) {
4532         dewtprintk(ioc,
4533                ioc_info(ioc, "%s: host in pci error recovery\n",
4534                     __func__));
4535         return;
4536     }
4537     ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4538     if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4539         dewtprintk(ioc,
4540                ioc_info(ioc, "%s: host is not operational\n",
4541                     __func__));
4542         return;
4543     }
4544 
4545     /* Without releasing the smid just update the
4546      * call back index and reuse the same smid for
4547      * processing this delayed request
4548      */
4549     spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4550     ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4551     spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4552 
4553     dewtprintk(ioc,
4554            ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4555                 handle, smid, ioc->tm_sas_control_cb_idx));
4556     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4557     memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4558     mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4559     mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4560     mpi_request->DevHandle = cpu_to_le16(handle);
4561     ioc->put_smid_default(ioc, smid);
4562 }
4563 
4564 /**
4565  * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4566  * @ioc: per adapter object
4567  * @smid: system request message index
4568  *
4569  * Context: Executed in interrupt context
4570  *
4571  * This will check delayed internal messages list, and process the
4572  * next request.
4573  *
4574  * Return: 1 meaning mf should be freed from _base_interrupt
4575  *         0 means the mf is freed from this function.
4576  */
4577 u8
4578 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4579 {
4580     struct _sc_list *delayed_sc;
4581     struct _event_ack_list *delayed_event_ack;
4582 
4583     if (!list_empty(&ioc->delayed_event_ack_list)) {
4584         delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4585                         struct _event_ack_list, list);
4586         _scsih_issue_delayed_event_ack(ioc, smid,
4587           delayed_event_ack->Event, delayed_event_ack->EventContext);
4588         list_del(&delayed_event_ack->list);
4589         kfree(delayed_event_ack);
4590         return 0;
4591     }
4592 
4593     if (!list_empty(&ioc->delayed_sc_list)) {
4594         delayed_sc = list_entry(ioc->delayed_sc_list.next,
4595                         struct _sc_list, list);
4596         _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4597                          delayed_sc->handle);
4598         list_del(&delayed_sc->list);
4599         kfree(delayed_sc);
4600         return 0;
4601     }
4602     return 1;
4603 }
4604 
4605 /**
4606  * _scsih_check_for_pending_tm - check for pending task management
4607  * @ioc: per adapter object
4608  * @smid: system request message index
4609  *
4610  * This will check delayed target reset list, and feed the
4611  * next reqeust.
4612  *
4613  * Return: 1 meaning mf should be freed from _base_interrupt
4614  *         0 means the mf is freed from this function.
4615  */
4616 static u8
4617 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4618 {
4619     struct _tr_list *delayed_tr;
4620 
4621     if (!list_empty(&ioc->delayed_tr_volume_list)) {
4622         delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4623             struct _tr_list, list);
4624         mpt3sas_base_free_smid(ioc, smid);
4625         _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4626         list_del(&delayed_tr->list);
4627         kfree(delayed_tr);
4628         return 0;
4629     }
4630 
4631     if (!list_empty(&ioc->delayed_tr_list)) {
4632         delayed_tr = list_entry(ioc->delayed_tr_list.next,
4633             struct _tr_list, list);
4634         mpt3sas_base_free_smid(ioc, smid);
4635         _scsih_tm_tr_send(ioc, delayed_tr->handle);
4636         list_del(&delayed_tr->list);
4637         kfree(delayed_tr);
4638         return 0;
4639     }
4640 
4641     return 1;
4642 }
4643 
4644 /**
4645  * _scsih_check_topo_delete_events - sanity check on topo events
4646  * @ioc: per adapter object
4647  * @event_data: the event data payload
4648  *
4649  * This routine added to better handle cable breaker.
4650  *
4651  * This handles the case where driver receives multiple expander
4652  * add and delete events in a single shot.  When there is a delete event
4653  * the routine will void any pending add events waiting in the event queue.
4654  */
4655 static void
4656 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4657     Mpi2EventDataSasTopologyChangeList_t *event_data)
4658 {
4659     struct fw_event_work *fw_event;
4660     Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4661     u16 expander_handle;
4662     struct _sas_node *sas_expander;
4663     unsigned long flags;
4664     int i, reason_code;
4665     u16 handle;
4666 
4667     for (i = 0 ; i < event_data->NumEntries; i++) {
4668         handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4669         if (!handle)
4670             continue;
4671         reason_code = event_data->PHY[i].PhyStatus &
4672             MPI2_EVENT_SAS_TOPO_RC_MASK;
4673         if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4674             _scsih_tm_tr_send(ioc, handle);
4675     }
4676 
4677     expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4678     if (expander_handle < ioc->sas_hba.num_phys) {
4679         _scsih_block_io_to_children_attached_directly(ioc, event_data);
4680         return;
4681     }
4682     if (event_data->ExpStatus ==
4683         MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4684         /* put expander attached devices into blocking state */
4685         spin_lock_irqsave(&ioc->sas_node_lock, flags);
4686         sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4687             expander_handle);
4688         _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4689         spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4690         do {
4691             handle = find_first_bit(ioc->blocking_handles,
4692                 ioc->facts.MaxDevHandle);
4693             if (handle < ioc->facts.MaxDevHandle)
4694                 _scsih_block_io_device(ioc, handle);
4695         } while (test_and_clear_bit(handle, ioc->blocking_handles));
4696     } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4697         _scsih_block_io_to_children_attached_directly(ioc, event_data);
4698 
4699     if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4700         return;
4701 
4702     /* mark ignore flag for pending events */
4703     spin_lock_irqsave(&ioc->fw_event_lock, flags);
4704     list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4705         if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4706             fw_event->ignore)
4707             continue;
4708         local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4709                    fw_event->event_data;
4710         if (local_event_data->ExpStatus ==
4711             MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4712             local_event_data->ExpStatus ==
4713             MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4714             if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4715                 expander_handle) {
4716                 dewtprintk(ioc,
4717                        ioc_info(ioc, "setting ignoring flag\n"));
4718                 fw_event->ignore = 1;
4719             }
4720         }
4721     }
4722     spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4723 }
4724 
4725 /**
4726  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4727  * events
4728  * @ioc: per adapter object
4729  * @event_data: the event data payload
4730  *
4731  * This handles the case where driver receives multiple switch
4732  * or device add and delete events in a single shot.  When there
4733  * is a delete event the routine will void any pending add
4734  * events waiting in the event queue.
4735  */
4736 static void
4737 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4738     Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4739 {
4740     struct fw_event_work *fw_event;
4741     Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4742     unsigned long flags;
4743     int i, reason_code;
4744     u16 handle, switch_handle;
4745 
4746     for (i = 0; i < event_data->NumEntries; i++) {
4747         handle =
4748             le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4749         if (!handle)
4750             continue;
4751         reason_code = event_data->PortEntry[i].PortStatus;
4752         if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4753             _scsih_tm_tr_send(ioc, handle);
4754     }
4755 
4756     switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4757     if (!switch_handle) {
4758         _scsih_block_io_to_pcie_children_attached_directly(
4759                             ioc, event_data);
4760         return;
4761     }
4762     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4763     if ((event_data->SwitchStatus
4764         == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4765         (event_data->SwitchStatus ==
4766                     MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4767         _scsih_block_io_to_pcie_children_attached_directly(
4768                             ioc, event_data);
4769 
4770     if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4771         return;
4772 
4773     /* mark ignore flag for pending events */
4774     spin_lock_irqsave(&ioc->fw_event_lock, flags);
4775     list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4776         if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4777             fw_event->ignore)
4778             continue;
4779         local_event_data =
4780             (Mpi26EventDataPCIeTopologyChangeList_t *)
4781             fw_event->event_data;
4782         if (local_event_data->SwitchStatus ==
4783             MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4784             local_event_data->SwitchStatus ==
4785             MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4786             if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4787                 switch_handle) {
4788                 dewtprintk(ioc,
4789                        ioc_info(ioc, "setting ignoring flag for switch event\n"));
4790                 fw_event->ignore = 1;
4791             }
4792         }
4793     }
4794     spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4795 }
4796 
4797 /**
4798  * _scsih_set_volume_delete_flag - setting volume delete flag
4799  * @ioc: per adapter object
4800  * @handle: device handle
4801  *
4802  * This returns nothing.
4803  */
4804 static void
4805 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4806 {
4807     struct _raid_device *raid_device;
4808     struct MPT3SAS_TARGET *sas_target_priv_data;
4809     unsigned long flags;
4810 
4811     spin_lock_irqsave(&ioc->raid_device_lock, flags);
4812     raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4813     if (raid_device && raid_device->starget &&
4814         raid_device->starget->hostdata) {
4815         sas_target_priv_data =
4816             raid_device->starget->hostdata;
4817         sas_target_priv_data->deleted = 1;
4818         dewtprintk(ioc,
4819                ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4820                     handle, (u64)raid_device->wwid));
4821     }
4822     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4823 }
4824 
4825 /**
4826  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4827  * @handle: input handle
4828  * @a: handle for volume a
4829  * @b: handle for volume b
4830  *
4831  * IR firmware only supports two raid volumes.  The purpose of this
4832  * routine is to set the volume handle in either a or b. When the given
4833  * input handle is non-zero, or when a and b have not been set before.
4834  */
4835 static void
4836 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4837 {
4838     if (!handle || handle == *a || handle == *b)
4839         return;
4840     if (!*a)
4841         *a = handle;
4842     else if (!*b)
4843         *b = handle;
4844 }
4845 
4846 /**
4847  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4848  * @ioc: per adapter object
4849  * @event_data: the event data payload
4850  * Context: interrupt time.
4851  *
4852  * This routine will send target reset to volume, followed by target
4853  * resets to the PDs. This is called when a PD has been removed, or
4854  * volume has been deleted or removed. When the target reset is sent
4855  * to volume, the PD target resets need to be queued to start upon
4856  * completion of the volume target reset.
4857  */
4858 static void
4859 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4860     Mpi2EventDataIrConfigChangeList_t *event_data)
4861 {
4862     Mpi2EventIrConfigElement_t *element;
4863     int i;
4864     u16 handle, volume_handle, a, b;
4865     struct _tr_list *delayed_tr;
4866 
4867     a = 0;
4868     b = 0;
4869 
4870     if (ioc->is_warpdrive)
4871         return;
4872 
4873     /* Volume Resets for Deleted or Removed */
4874     element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4875     for (i = 0; i < event_data->NumElements; i++, element++) {
4876         if (le32_to_cpu(event_data->Flags) &
4877             MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4878             continue;
4879         if (element->ReasonCode ==
4880             MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4881             element->ReasonCode ==
4882             MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4883             volume_handle = le16_to_cpu(element->VolDevHandle);
4884             _scsih_set_volume_delete_flag(ioc, volume_handle);
4885             _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4886         }
4887     }
4888 
4889     /* Volume Resets for UNHIDE events */
4890     element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4891     for (i = 0; i < event_data->NumElements; i++, element++) {
4892         if (le32_to_cpu(event_data->Flags) &
4893             MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4894             continue;
4895         if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4896             volume_handle = le16_to_cpu(element->VolDevHandle);
4897             _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4898         }
4899     }
4900 
4901     if (a)
4902         _scsih_tm_tr_volume_send(ioc, a);
4903     if (b)
4904         _scsih_tm_tr_volume_send(ioc, b);
4905 
4906     /* PD target resets */
4907     element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4908     for (i = 0; i < event_data->NumElements; i++, element++) {
4909         if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4910             continue;
4911         handle = le16_to_cpu(element->PhysDiskDevHandle);
4912         volume_handle = le16_to_cpu(element->VolDevHandle);
4913         clear_bit(handle, ioc->pd_handles);
4914         if (!volume_handle)
4915             _scsih_tm_tr_send(ioc, handle);
4916         else if (volume_handle == a || volume_handle == b) {
4917             delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4918             BUG_ON(!delayed_tr);
4919             INIT_LIST_HEAD(&delayed_tr->list);
4920             delayed_tr->handle = handle;
4921             list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4922             dewtprintk(ioc,
4923                    ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4924                         handle));
4925         } else
4926             _scsih_tm_tr_send(ioc, handle);
4927     }
4928 }
4929 
4930 
4931 /**
4932  * _scsih_check_volume_delete_events - set delete flag for volumes
4933  * @ioc: per adapter object
4934  * @event_data: the event data payload
4935  * Context: interrupt time.
4936  *
4937  * This will handle the case when the cable connected to entire volume is
4938  * pulled. We will take care of setting the deleted flag so normal IO will
4939  * not be sent.
4940  */
4941 static void
4942 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4943     Mpi2EventDataIrVolume_t *event_data)
4944 {
4945     u32 state;
4946 
4947     if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4948         return;
4949     state = le32_to_cpu(event_data->NewValue);
4950     if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4951         MPI2_RAID_VOL_STATE_FAILED)
4952         _scsih_set_volume_delete_flag(ioc,
4953             le16_to_cpu(event_data->VolDevHandle));
4954 }
4955 
4956 /**
4957  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4958  * @ioc: per adapter object
4959  * @event_data: the temp threshold event data
4960  * Context: interrupt time.
4961  */
4962 static void
4963 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4964     Mpi2EventDataTemperature_t *event_data)
4965 {
4966     u32 doorbell;
4967     if (ioc->temp_sensors_count >= event_data->SensorNum) {
4968         ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4969             le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4970             le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4971             le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4972             le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4973             event_data->SensorNum);
4974         ioc_err(ioc, "Current Temp In Celsius: %d\n",
4975             event_data->CurrentTemperature);
4976         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4977             doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4978             if ((doorbell & MPI2_IOC_STATE_MASK) ==
4979                 MPI2_IOC_STATE_FAULT) {
4980                 mpt3sas_print_fault_code(ioc,
4981                     doorbell & MPI2_DOORBELL_DATA_MASK);
4982             } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4983                 MPI2_IOC_STATE_COREDUMP) {
4984                 mpt3sas_print_coredump_info(ioc,
4985                     doorbell & MPI2_DOORBELL_DATA_MASK);
4986             }
4987         }
4988     }
4989 }
4990 
4991 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4992 {
4993     struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4994 
4995     if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4996         return 0;
4997 
4998     if (pending)
4999         return test_and_set_bit(0, &priv->ata_command_pending);
5000 
5001     clear_bit(0, &priv->ata_command_pending);
5002     return 0;
5003 }
5004 
5005 /**
5006  * _scsih_flush_running_cmds - completing outstanding commands.
5007  * @ioc: per adapter object
5008  *
5009  * The flushing out of all pending scmd commands following host reset,
5010  * where all IO is dropped to the floor.
5011  */
5012 static void
5013 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5014 {
5015     struct scsi_cmnd *scmd;
5016     struct scsiio_tracker *st;
5017     u16 smid;
5018     int count = 0;
5019 
5020     for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5021         scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5022         if (!scmd)
5023             continue;
5024         count++;
5025         _scsih_set_satl_pending(scmd, false);
5026         st = scsi_cmd_priv(scmd);
5027         mpt3sas_base_clear_st(ioc, st);
5028         scsi_dma_unmap(scmd);
5029         if (ioc->pci_error_recovery || ioc->remove_host)
5030             scmd->result = DID_NO_CONNECT << 16;
5031         else
5032             scmd->result = DID_RESET << 16;
5033         scsi_done(scmd);
5034     }
5035     dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5036 }
5037 
5038 /**
5039  * _scsih_setup_eedp - setup MPI request for EEDP transfer
5040  * @ioc: per adapter object
5041  * @scmd: pointer to scsi command object
5042  * @mpi_request: pointer to the SCSI_IO request message frame
5043  *
5044  * Supporting protection 1 and 3.
5045  */
5046 static void
5047 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5048     Mpi25SCSIIORequest_t *mpi_request)
5049 {
5050     u16 eedp_flags;
5051     Mpi25SCSIIORequest_t *mpi_request_3v =
5052        (Mpi25SCSIIORequest_t *)mpi_request;
5053 
5054     switch (scsi_get_prot_op(scmd)) {
5055     case SCSI_PROT_READ_STRIP:
5056         eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5057         break;
5058     case SCSI_PROT_WRITE_INSERT:
5059         eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5060         break;
5061     default:
5062         return;
5063     }
5064 
5065     if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5066         eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5067 
5068     if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5069         eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5070 
5071     if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5072         eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5073 
5074         mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5075             cpu_to_be32(scsi_prot_ref_tag(scmd));
5076     }
5077 
5078     mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5079 
5080     if (ioc->is_gen35_ioc)
5081         eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5082     mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5083 }
5084 
5085 /**
5086  * _scsih_eedp_error_handling - return sense code for EEDP errors
5087  * @scmd: pointer to scsi command object
5088  * @ioc_status: ioc status
5089  */
5090 static void
5091 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5092 {
5093     u8 ascq;
5094 
5095     switch (ioc_status) {
5096     case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5097         ascq = 0x01;
5098         break;
5099     case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5100         ascq = 0x02;
5101         break;
5102     case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5103         ascq = 0x03;
5104         break;
5105     default:
5106         ascq = 0x00;
5107         break;
5108     }
5109     scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5110     set_host_byte(scmd, DID_ABORT);
5111 }
5112 
5113 /**
5114  * scsih_qcmd - main scsi request entry point
5115  * @shost: SCSI host pointer
5116  * @scmd: pointer to scsi command object
5117  *
5118  * The callback index is set inside `ioc->scsi_io_cb_idx`.
5119  *
5120  * Return: 0 on success.  If there's a failure, return either:
5121  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5122  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5123  */
5124 static int
5125 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5126 {
5127     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5128     struct MPT3SAS_DEVICE *sas_device_priv_data;
5129     struct MPT3SAS_TARGET *sas_target_priv_data;
5130     struct _raid_device *raid_device;
5131     struct request *rq = scsi_cmd_to_rq(scmd);
5132     int class;
5133     Mpi25SCSIIORequest_t *mpi_request;
5134     struct _pcie_device *pcie_device = NULL;
5135     u32 mpi_control;
5136     u16 smid;
5137     u16 handle;
5138 
5139     if (ioc->logging_level & MPT_DEBUG_SCSI)
5140         scsi_print_command(scmd);
5141 
5142     sas_device_priv_data = scmd->device->hostdata;
5143     if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5144         scmd->result = DID_NO_CONNECT << 16;
5145         scsi_done(scmd);
5146         return 0;
5147     }
5148 
5149     if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5150         scmd->result = DID_NO_CONNECT << 16;
5151         scsi_done(scmd);
5152         return 0;
5153     }
5154 
5155     sas_target_priv_data = sas_device_priv_data->sas_target;
5156 
5157     /* invalid device handle */
5158     handle = sas_target_priv_data->handle;
5159     if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5160         scmd->result = DID_NO_CONNECT << 16;
5161         scsi_done(scmd);
5162         return 0;
5163     }
5164 
5165 
5166     if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5167         /* host recovery or link resets sent via IOCTLs */
5168         return SCSI_MLQUEUE_HOST_BUSY;
5169     } else if (sas_target_priv_data->deleted) {
5170         /* device has been deleted */
5171         scmd->result = DID_NO_CONNECT << 16;
5172         scsi_done(scmd);
5173         return 0;
5174     } else if (sas_target_priv_data->tm_busy ||
5175            sas_device_priv_data->block) {
5176         /* device busy with task management */
5177         return SCSI_MLQUEUE_DEVICE_BUSY;
5178     }
5179 
5180     /*
5181      * Bug work around for firmware SATL handling.  The loop
5182      * is based on atomic operations and ensures consistency
5183      * since we're lockless at this point
5184      */
5185     do {
5186         if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5187             return SCSI_MLQUEUE_DEVICE_BUSY;
5188     } while (_scsih_set_satl_pending(scmd, true));
5189 
5190     if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5191         mpi_control = MPI2_SCSIIO_CONTROL_READ;
5192     else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5193         mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5194     else
5195         mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5196 
5197     /* set tags */
5198     mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5199     /* NCQ Prio supported, make sure control indicated high priority */
5200     if (sas_device_priv_data->ncq_prio_enable) {
5201         class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5202         if (class == IOPRIO_CLASS_RT)
5203             mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5204     }
5205     /* Make sure Device is not raid volume.
5206      * We do not expose raid functionality to upper layer for warpdrive.
5207      */
5208     if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5209         && !scsih_is_nvme(&scmd->device->sdev_gendev))
5210         && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5211         mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5212 
5213     smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5214     if (!smid) {
5215         ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5216         _scsih_set_satl_pending(scmd, false);
5217         goto out;
5218     }
5219     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5220     memset(mpi_request, 0, ioc->request_sz);
5221     _scsih_setup_eedp(ioc, scmd, mpi_request);
5222 
5223     if (scmd->cmd_len == 32)
5224         mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5225     mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5226     if (sas_device_priv_data->sas_target->flags &
5227         MPT_TARGET_FLAGS_RAID_COMPONENT)
5228         mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5229     else
5230         mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5231     mpi_request->DevHandle = cpu_to_le16(handle);
5232     mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5233     mpi_request->Control = cpu_to_le32(mpi_control);
5234     mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5235     mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5236     mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5237     mpi_request->SenseBufferLowAddress =
5238         mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5239     mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5240     int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5241         mpi_request->LUN);
5242     memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5243 
5244     if (mpi_request->DataLength) {
5245         pcie_device = sas_target_priv_data->pcie_dev;
5246         if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5247             mpt3sas_base_free_smid(ioc, smid);
5248             _scsih_set_satl_pending(scmd, false);
5249             goto out;
5250         }
5251     } else
5252         ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5253 
5254     raid_device = sas_target_priv_data->raid_device;
5255     if (raid_device && raid_device->direct_io_enabled)
5256         mpt3sas_setup_direct_io(ioc, scmd,
5257             raid_device, mpi_request);
5258 
5259     if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5260         if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5261             mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5262                 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5263             ioc->put_smid_fast_path(ioc, smid, handle);
5264         } else
5265             ioc->put_smid_scsi_io(ioc, smid,
5266                 le16_to_cpu(mpi_request->DevHandle));
5267     } else
5268         ioc->put_smid_default(ioc, smid);
5269     return 0;
5270 
5271  out:
5272     return SCSI_MLQUEUE_HOST_BUSY;
5273 }
5274 
5275 /**
5276  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5277  * @sense_buffer: sense data returned by target
5278  * @data: normalized skey/asc/ascq
5279  */
5280 static void
5281 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5282 {
5283     if ((sense_buffer[0] & 0x7F) >= 0x72) {
5284         /* descriptor format */
5285         data->skey = sense_buffer[1] & 0x0F;
5286         data->asc = sense_buffer[2];
5287         data->ascq = sense_buffer[3];
5288     } else {
5289         /* fixed format */
5290         data->skey = sense_buffer[2] & 0x0F;
5291         data->asc = sense_buffer[12];
5292         data->ascq = sense_buffer[13];
5293     }
5294 }
5295 
5296 /**
5297  * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5298  * @ioc: per adapter object
5299  * @scmd: pointer to scsi command object
5300  * @mpi_reply: reply mf payload returned from firmware
5301  * @smid: ?
5302  *
5303  * scsi_status - SCSI Status code returned from target device
5304  * scsi_state - state info associated with SCSI_IO determined by ioc
5305  * ioc_status - ioc supplied status info
5306  */
5307 static void
5308 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5309     Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5310 {
5311     u32 response_info;
5312     u8 *response_bytes;
5313     u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5314         MPI2_IOCSTATUS_MASK;
5315     u8 scsi_state = mpi_reply->SCSIState;
5316     u8 scsi_status = mpi_reply->SCSIStatus;
5317     char *desc_ioc_state = NULL;
5318     char *desc_scsi_status = NULL;
5319     char *desc_scsi_state = ioc->tmp_string;
5320     u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5321     struct _sas_device *sas_device = NULL;
5322     struct _pcie_device *pcie_device = NULL;
5323     struct scsi_target *starget = scmd->device->sdev_target;
5324     struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5325     char *device_str = NULL;
5326 
5327     if (!priv_target)
5328         return;
5329     if (ioc->hide_ir_msg)
5330         device_str = "WarpDrive";
5331     else
5332         device_str = "volume";
5333 
5334     if (log_info == 0x31170000)
5335         return;
5336 
5337     switch (ioc_status) {
5338     case MPI2_IOCSTATUS_SUCCESS:
5339         desc_ioc_state = "success";
5340         break;
5341     case MPI2_IOCSTATUS_INVALID_FUNCTION:
5342         desc_ioc_state = "invalid function";
5343         break;
5344     case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5345         desc_ioc_state = "scsi recovered error";
5346         break;
5347     case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5348         desc_ioc_state = "scsi invalid dev handle";
5349         break;
5350     case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5351         desc_ioc_state = "scsi device not there";
5352         break;
5353     case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5354         desc_ioc_state = "scsi data overrun";
5355         break;
5356     case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5357         desc_ioc_state = "scsi data underrun";
5358         break;
5359     case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5360         desc_ioc_state = "scsi io data error";
5361         break;
5362     case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5363         desc_ioc_state = "scsi protocol error";
5364         break;
5365     case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5366         desc_ioc_state = "scsi task terminated";
5367         break;
5368     case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5369         desc_ioc_state = "scsi residual mismatch";
5370         break;
5371     case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5372         desc_ioc_state = "scsi task mgmt failed";
5373         break;
5374     case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5375         desc_ioc_state = "scsi ioc terminated";
5376         break;
5377     case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5378         desc_ioc_state = "scsi ext terminated";
5379         break;
5380     case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5381         desc_ioc_state = "eedp guard error";
5382         break;
5383     case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5384         desc_ioc_state = "eedp ref tag error";
5385         break;
5386     case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5387         desc_ioc_state = "eedp app tag error";
5388         break;
5389     case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5390         desc_ioc_state = "insufficient power";
5391         break;
5392     default:
5393         desc_ioc_state = "unknown";
5394         break;
5395     }
5396 
5397     switch (scsi_status) {
5398     case MPI2_SCSI_STATUS_GOOD:
5399         desc_scsi_status = "good";
5400         break;
5401     case MPI2_SCSI_STATUS_CHECK_CONDITION:
5402         desc_scsi_status = "check condition";
5403         break;
5404     case MPI2_SCSI_STATUS_CONDITION_MET:
5405         desc_scsi_status = "condition met";
5406         break;
5407     case MPI2_SCSI_STATUS_BUSY:
5408         desc_scsi_status = "busy";
5409         break;
5410     case MPI2_SCSI_STATUS_INTERMEDIATE:
5411         desc_scsi_status = "intermediate";
5412         break;
5413     case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5414         desc_scsi_status = "intermediate condmet";
5415         break;
5416     case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5417         desc_scsi_status = "reservation conflict";
5418         break;
5419     case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5420         desc_scsi_status = "command terminated";
5421         break;
5422     case MPI2_SCSI_STATUS_TASK_SET_FULL:
5423         desc_scsi_status = "task set full";
5424         break;
5425     case MPI2_SCSI_STATUS_ACA_ACTIVE:
5426         desc_scsi_status = "aca active";
5427         break;
5428     case MPI2_SCSI_STATUS_TASK_ABORTED:
5429         desc_scsi_status = "task aborted";
5430         break;
5431     default:
5432         desc_scsi_status = "unknown";
5433         break;
5434     }
5435 
5436     desc_scsi_state[0] = '\0';
5437     if (!scsi_state)
5438         desc_scsi_state = " ";
5439     if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5440         strcat(desc_scsi_state, "response info ");
5441     if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5442         strcat(desc_scsi_state, "state terminated ");
5443     if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5444         strcat(desc_scsi_state, "no status ");
5445     if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5446         strcat(desc_scsi_state, "autosense failed ");
5447     if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5448         strcat(desc_scsi_state, "autosense valid ");
5449 
5450     scsi_print_command(scmd);
5451 
5452     if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5453         ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5454              device_str, (u64)priv_target->sas_address);
5455     } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5456         pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5457         if (pcie_device) {
5458             ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5459                  (u64)pcie_device->wwid, pcie_device->port_num);
5460             if (pcie_device->enclosure_handle != 0)
5461                 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5462                      (u64)pcie_device->enclosure_logical_id,
5463                      pcie_device->slot);
5464             if (pcie_device->connector_name[0])
5465                 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5466                      pcie_device->enclosure_level,
5467                      pcie_device->connector_name);
5468             pcie_device_put(pcie_device);
5469         }
5470     } else {
5471         sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5472         if (sas_device) {
5473             ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5474                  (u64)sas_device->sas_address, sas_device->phy);
5475 
5476             _scsih_display_enclosure_chassis_info(ioc, sas_device,
5477                 NULL, NULL);
5478 
5479             sas_device_put(sas_device);
5480         }
5481     }
5482 
5483     ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5484          le16_to_cpu(mpi_reply->DevHandle),
5485          desc_ioc_state, ioc_status, smid);
5486     ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5487          scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5488     ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5489          le16_to_cpu(mpi_reply->TaskTag),
5490          le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5491     ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5492          desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5493 
5494     if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5495         struct sense_info data;
5496         _scsih_normalize_sense(scmd->sense_buffer, &data);
5497         ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5498              data.skey, data.asc, data.ascq,
5499              le32_to_cpu(mpi_reply->SenseCount));
5500     }
5501     if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5502         response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5503         response_bytes = (u8 *)&response_info;
5504         _scsih_response_code(ioc, response_bytes[0]);
5505     }
5506 }
5507 
5508 /**
5509  * _scsih_turn_on_pfa_led - illuminate PFA LED
5510  * @ioc: per adapter object
5511  * @handle: device handle
5512  * Context: process
5513  */
5514 static void
5515 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5516 {
5517     Mpi2SepReply_t mpi_reply;
5518     Mpi2SepRequest_t mpi_request;
5519     struct _sas_device *sas_device;
5520 
5521     sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5522     if (!sas_device)
5523         return;
5524 
5525     memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5526     mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5527     mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5528     mpi_request.SlotStatus =
5529         cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5530     mpi_request.DevHandle = cpu_to_le16(handle);
5531     mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5532     if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5533         &mpi_request)) != 0) {
5534         ioc_err(ioc, "failure at %s:%d/%s()!\n",
5535             __FILE__, __LINE__, __func__);
5536         goto out;
5537     }
5538     sas_device->pfa_led_on = 1;
5539 
5540     if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5541         dewtprintk(ioc,
5542                ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5543                     le16_to_cpu(mpi_reply.IOCStatus),
5544                     le32_to_cpu(mpi_reply.IOCLogInfo)));
5545         goto out;
5546     }
5547 out:
5548     sas_device_put(sas_device);
5549 }
5550 
5551 /**
5552  * _scsih_turn_off_pfa_led - turn off Fault LED
5553  * @ioc: per adapter object
5554  * @sas_device: sas device whose PFA LED has to turned off
5555  * Context: process
5556  */
5557 static void
5558 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5559     struct _sas_device *sas_device)
5560 {
5561     Mpi2SepReply_t mpi_reply;
5562     Mpi2SepRequest_t mpi_request;
5563 
5564     memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5565     mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5566     mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5567     mpi_request.SlotStatus = 0;
5568     mpi_request.Slot = cpu_to_le16(sas_device->slot);
5569     mpi_request.DevHandle = 0;
5570     mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5571     mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5572     if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5573         &mpi_request)) != 0) {
5574         ioc_err(ioc, "failure at %s:%d/%s()!\n",
5575             __FILE__, __LINE__, __func__);
5576         return;
5577     }
5578 
5579     if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5580         dewtprintk(ioc,
5581                ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5582                     le16_to_cpu(mpi_reply.IOCStatus),
5583                     le32_to_cpu(mpi_reply.IOCLogInfo)));
5584         return;
5585     }
5586 }
5587 
5588 /**
5589  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5590  * @ioc: per adapter object
5591  * @handle: device handle
5592  * Context: interrupt.
5593  */
5594 static void
5595 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5596 {
5597     struct fw_event_work *fw_event;
5598 
5599     fw_event = alloc_fw_event_work(0);
5600     if (!fw_event)
5601         return;
5602     fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5603     fw_event->device_handle = handle;
5604     fw_event->ioc = ioc;
5605     _scsih_fw_event_add(ioc, fw_event);
5606     fw_event_work_put(fw_event);
5607 }
5608 
5609 /**
5610  * _scsih_smart_predicted_fault - process smart errors
5611  * @ioc: per adapter object
5612  * @handle: device handle
5613  * Context: interrupt.
5614  */
5615 static void
5616 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5617 {
5618     struct scsi_target *starget;
5619     struct MPT3SAS_TARGET *sas_target_priv_data;
5620     Mpi2EventNotificationReply_t *event_reply;
5621     Mpi2EventDataSasDeviceStatusChange_t *event_data;
5622     struct _sas_device *sas_device;
5623     ssize_t sz;
5624     unsigned long flags;
5625 
5626     /* only handle non-raid devices */
5627     spin_lock_irqsave(&ioc->sas_device_lock, flags);
5628     sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5629     if (!sas_device)
5630         goto out_unlock;
5631 
5632     starget = sas_device->starget;
5633     sas_target_priv_data = starget->hostdata;
5634 
5635     if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5636        ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5637         goto out_unlock;
5638 
5639     _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5640 
5641     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5642 
5643     if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5644         _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5645 
5646     /* insert into event log */
5647     sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5648          sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5649     event_reply = kzalloc(sz, GFP_ATOMIC);
5650     if (!event_reply) {
5651         ioc_err(ioc, "failure at %s:%d/%s()!\n",
5652             __FILE__, __LINE__, __func__);
5653         goto out;
5654     }
5655 
5656     event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5657     event_reply->Event =
5658         cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5659     event_reply->MsgLength = sz/4;
5660     event_reply->EventDataLength =
5661         cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5662     event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5663         event_reply->EventData;
5664     event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5665     event_data->ASC = 0x5D;
5666     event_data->DevHandle = cpu_to_le16(handle);
5667     event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5668     mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5669     kfree(event_reply);
5670 out:
5671     if (sas_device)
5672         sas_device_put(sas_device);
5673     return;
5674 
5675 out_unlock:
5676     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5677     goto out;
5678 }
5679 
5680 /**
5681  * _scsih_io_done - scsi request callback
5682  * @ioc: per adapter object
5683  * @smid: system request message index
5684  * @msix_index: MSIX table index supplied by the OS
5685  * @reply: reply message frame(lower 32bit addr)
5686  *
5687  * Callback handler when using _scsih_qcmd.
5688  *
5689  * Return: 1 meaning mf should be freed from _base_interrupt
5690  *         0 means the mf is freed from this function.
5691  */
5692 static u8
5693 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5694 {
5695     Mpi25SCSIIORequest_t *mpi_request;
5696     Mpi2SCSIIOReply_t *mpi_reply;
5697     struct scsi_cmnd *scmd;
5698     struct scsiio_tracker *st;
5699     u16 ioc_status;
5700     u32 xfer_cnt;
5701     u8 scsi_state;
5702     u8 scsi_status;
5703     u32 log_info;
5704     struct MPT3SAS_DEVICE *sas_device_priv_data;
5705     u32 response_code = 0;
5706 
5707     mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5708 
5709     scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5710     if (scmd == NULL)
5711         return 1;
5712 
5713     _scsih_set_satl_pending(scmd, false);
5714 
5715     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5716 
5717     if (mpi_reply == NULL) {
5718         scmd->result = DID_OK << 16;
5719         goto out;
5720     }
5721 
5722     sas_device_priv_data = scmd->device->hostdata;
5723     if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5724          sas_device_priv_data->sas_target->deleted) {
5725         scmd->result = DID_NO_CONNECT << 16;
5726         goto out;
5727     }
5728     ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5729 
5730     /*
5731      * WARPDRIVE: If direct_io is set then it is directIO,
5732      * the failed direct I/O should be redirected to volume
5733      */
5734     st = scsi_cmd_priv(scmd);
5735     if (st->direct_io &&
5736          ((ioc_status & MPI2_IOCSTATUS_MASK)
5737           != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5738         st->direct_io = 0;
5739         st->scmd = scmd;
5740         memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5741         mpi_request->DevHandle =
5742             cpu_to_le16(sas_device_priv_data->sas_target->handle);
5743         ioc->put_smid_scsi_io(ioc, smid,
5744             sas_device_priv_data->sas_target->handle);
5745         return 0;
5746     }
5747     /* turning off TLR */
5748     scsi_state = mpi_reply->SCSIState;
5749     if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5750         response_code =
5751             le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5752     if (!sas_device_priv_data->tlr_snoop_check) {
5753         sas_device_priv_data->tlr_snoop_check++;
5754         if ((!ioc->is_warpdrive &&
5755             !scsih_is_raid(&scmd->device->sdev_gendev) &&
5756             !scsih_is_nvme(&scmd->device->sdev_gendev))
5757             && sas_is_tlr_enabled(scmd->device) &&
5758             response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5759             sas_disable_tlr(scmd->device);
5760             sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5761         }
5762     }
5763 
5764     xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5765     scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5766     if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5767         log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5768     else
5769         log_info = 0;
5770     ioc_status &= MPI2_IOCSTATUS_MASK;
5771     scsi_status = mpi_reply->SCSIStatus;
5772 
5773     if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5774         (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5775          scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5776          scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5777         ioc_status = MPI2_IOCSTATUS_SUCCESS;
5778     }
5779 
5780     if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5781         struct sense_info data;
5782         const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5783             smid);
5784         u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5785             le32_to_cpu(mpi_reply->SenseCount));
5786         memcpy(scmd->sense_buffer, sense_data, sz);
5787         _scsih_normalize_sense(scmd->sense_buffer, &data);
5788         /* failure prediction threshold exceeded */
5789         if (data.asc == 0x5D)
5790             _scsih_smart_predicted_fault(ioc,
5791                 le16_to_cpu(mpi_reply->DevHandle));
5792         mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5793 
5794         if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5795              ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5796              (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5797              (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5798             _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5799     }
5800     switch (ioc_status) {
5801     case MPI2_IOCSTATUS_BUSY:
5802     case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5803         scmd->result = SAM_STAT_BUSY;
5804         break;
5805 
5806     case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5807         scmd->result = DID_NO_CONNECT << 16;
5808         break;
5809 
5810     case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5811         if (sas_device_priv_data->block) {
5812             scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5813             goto out;
5814         }
5815         if (log_info == 0x31110630) {
5816             if (scmd->retries > 2) {
5817                 scmd->result = DID_NO_CONNECT << 16;
5818                 scsi_device_set_state(scmd->device,
5819                     SDEV_OFFLINE);
5820             } else {
5821                 scmd->result = DID_SOFT_ERROR << 16;
5822                 scmd->device->expecting_cc_ua = 1;
5823             }
5824             break;
5825         } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5826             scmd->result = DID_RESET << 16;
5827             break;
5828         } else if ((scmd->device->channel == RAID_CHANNEL) &&
5829            (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5830            MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5831             scmd->result = DID_RESET << 16;
5832             break;
5833         }
5834         scmd->result = DID_SOFT_ERROR << 16;
5835         break;
5836     case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5837     case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5838         scmd->result = DID_RESET << 16;
5839         break;
5840 
5841     case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5842         if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5843             scmd->result = DID_SOFT_ERROR << 16;
5844         else
5845             scmd->result = (DID_OK << 16) | scsi_status;
5846         break;
5847 
5848     case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5849         scmd->result = (DID_OK << 16) | scsi_status;
5850 
5851         if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5852             break;
5853 
5854         if (xfer_cnt < scmd->underflow) {
5855             if (scsi_status == SAM_STAT_BUSY)
5856                 scmd->result = SAM_STAT_BUSY;
5857             else
5858                 scmd->result = DID_SOFT_ERROR << 16;
5859         } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5860              MPI2_SCSI_STATE_NO_SCSI_STATUS))
5861             scmd->result = DID_SOFT_ERROR << 16;
5862         else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5863             scmd->result = DID_RESET << 16;
5864         else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5865             mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5866             mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5867             scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5868                      0x20, 0);
5869         }
5870         break;
5871 
5872     case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5873         scsi_set_resid(scmd, 0);
5874         fallthrough;
5875     case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5876     case MPI2_IOCSTATUS_SUCCESS:
5877         scmd->result = (DID_OK << 16) | scsi_status;
5878         if (response_code ==
5879             MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5880             (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5881              MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5882             scmd->result = DID_SOFT_ERROR << 16;
5883         else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5884             scmd->result = DID_RESET << 16;
5885         break;
5886 
5887     case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5888     case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5889     case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5890         _scsih_eedp_error_handling(scmd, ioc_status);
5891         break;
5892 
5893     case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5894     case MPI2_IOCSTATUS_INVALID_FUNCTION:
5895     case MPI2_IOCSTATUS_INVALID_SGL:
5896     case MPI2_IOCSTATUS_INTERNAL_ERROR:
5897     case MPI2_IOCSTATUS_INVALID_FIELD:
5898     case MPI2_IOCSTATUS_INVALID_STATE:
5899     case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5900     case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5901     case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5902     default:
5903         scmd->result = DID_SOFT_ERROR << 16;
5904         break;
5905 
5906     }
5907 
5908     if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5909         _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5910 
5911  out:
5912 
5913     scsi_dma_unmap(scmd);
5914     mpt3sas_base_free_smid(ioc, smid);
5915     scsi_done(scmd);
5916     return 0;
5917 }
5918 
5919 /**
5920  * _scsih_update_vphys_after_reset - update the Port's
5921  *          vphys_list after reset
5922  * @ioc: per adapter object
5923  *
5924  * Returns nothing.
5925  */
5926 static void
5927 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5928 {
5929     u16 sz, ioc_status;
5930     int i;
5931     Mpi2ConfigReply_t mpi_reply;
5932     Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5933     u16 attached_handle;
5934     u64 attached_sas_addr;
5935     u8 found = 0, port_id;
5936     Mpi2SasPhyPage0_t phy_pg0;
5937     struct hba_port *port, *port_next, *mport;
5938     struct virtual_phy *vphy, *vphy_next;
5939     struct _sas_device *sas_device;
5940 
5941     /*
5942      * Mark all the vphys objects as dirty.
5943      */
5944     list_for_each_entry_safe(port, port_next,
5945         &ioc->port_table_list, list) {
5946         if (!port->vphys_mask)
5947             continue;
5948         list_for_each_entry_safe(vphy, vphy_next,
5949             &port->vphys_list, list) {
5950             vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5951         }
5952     }
5953 
5954     /*
5955      * Read SASIOUnitPage0 to get each HBA Phy's data.
5956      */
5957     sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5958         (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5959     sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5960     if (!sas_iounit_pg0) {
5961         ioc_err(ioc, "failure at %s:%d/%s()!\n",
5962             __FILE__, __LINE__, __func__);
5963         return;
5964     }
5965     if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5966         sas_iounit_pg0, sz)) != 0)
5967         goto out;
5968     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5969     if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5970         goto out;
5971     /*
5972      * Loop over each HBA Phy.
5973      */
5974     for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5975         /*
5976          * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5977          */
5978         if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5979             MPI2_SAS_NEG_LINK_RATE_1_5)
5980             continue;
5981         /*
5982          * Check whether Phy is connected to SEP device or not,
5983          * if it is SEP device then read the Phy's SASPHYPage0 data to
5984          * determine whether Phy is a virtual Phy or not. if it is
5985          * virtual phy then it is conformed that the attached remote
5986          * device is a HBA's vSES device.
5987          */
5988         if (!(le32_to_cpu(
5989             sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5990             MPI2_SAS_DEVICE_INFO_SEP))
5991             continue;
5992 
5993         if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5994             i))) {
5995             ioc_err(ioc, "failure at %s:%d/%s()!\n",
5996                 __FILE__, __LINE__, __func__);
5997             continue;
5998         }
5999 
6000         if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6001             MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6002             continue;
6003         /*
6004          * Get the vSES device's SAS Address.
6005          */
6006         attached_handle = le16_to_cpu(
6007             sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6008         if (_scsih_get_sas_address(ioc, attached_handle,
6009             &attached_sas_addr) != 0) {
6010             ioc_err(ioc, "failure at %s:%d/%s()!\n",
6011                 __FILE__, __LINE__, __func__);
6012             continue;
6013         }
6014 
6015         found = 0;
6016         port = port_next = NULL;
6017         /*
6018          * Loop over each virtual_phy object from
6019          * each port's vphys_list.
6020          */
6021         list_for_each_entry_safe(port,
6022             port_next, &ioc->port_table_list, list) {
6023             if (!port->vphys_mask)
6024                 continue;
6025             list_for_each_entry_safe(vphy, vphy_next,
6026                 &port->vphys_list, list) {
6027                 /*
6028                  * Continue with next virtual_phy object
6029                  * if the object is not marked as dirty.
6030                  */
6031                 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6032                     continue;
6033 
6034                 /*
6035                  * Continue with next virtual_phy object
6036                  * if the object's SAS Address is not equals
6037                  * to current Phy's vSES device SAS Address.
6038                  */
6039                 if (vphy->sas_address != attached_sas_addr)
6040                     continue;
6041                 /*
6042                  * Enable current Phy number bit in object's
6043                  * phy_mask field.
6044                  */
6045                 if (!(vphy->phy_mask & (1 << i)))
6046                     vphy->phy_mask = (1 << i);
6047                 /*
6048                  * Get hba_port object from hba_port table
6049                  * corresponding to current phy's Port ID.
6050                  * if there is no hba_port object corresponding
6051                  * to Phy's Port ID then create a new hba_port
6052                  * object & add to hba_port table.
6053                  */
6054                 port_id = sas_iounit_pg0->PhyData[i].Port;
6055                 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6056                 if (!mport) {
6057                     mport = kzalloc(
6058                         sizeof(struct hba_port), GFP_KERNEL);
6059                     if (!mport)
6060                         break;
6061                     mport->port_id = port_id;
6062                     ioc_info(ioc,
6063                         "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6064                         __func__, mport, mport->port_id);
6065                     list_add_tail(&mport->list,
6066                         &ioc->port_table_list);
6067                 }
6068                 /*
6069                  * If mport & port pointers are not pointing to
6070                  * same hba_port object then it means that vSES
6071                  * device's Port ID got changed after reset and
6072                  * hence move current virtual_phy object from
6073                  * port's vphys_list to mport's vphys_list.
6074                  */
6075                 if (port != mport) {
6076                     if (!mport->vphys_mask)
6077                         INIT_LIST_HEAD(
6078                             &mport->vphys_list);
6079                     mport->vphys_mask |= (1 << i);
6080                     port->vphys_mask &= ~(1 << i);
6081                     list_move(&vphy->list,
6082                         &mport->vphys_list);
6083                     sas_device = mpt3sas_get_sdev_by_addr(
6084                         ioc, attached_sas_addr, port);
6085                     if (sas_device)
6086                         sas_device->port = mport;
6087                 }
6088                 /*
6089                  * Earlier while updating the hba_port table,
6090                  * it is determined that there is no other
6091                  * direct attached device with mport's Port ID,
6092                  * Hence mport was marked as dirty. Only vSES
6093                  * device has this Port ID, so unmark the mport
6094                  * as dirt.
6095                  */
6096                 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6097                     mport->sas_address = 0;
6098                     mport->phy_mask = 0;
6099                     mport->flags &=
6100                         ~HBA_PORT_FLAG_DIRTY_PORT;
6101                 }
6102                 /*
6103                  * Unmark current virtual_phy object as dirty.
6104                  */
6105                 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6106                 found = 1;
6107                 break;
6108             }
6109             if (found)
6110                 break;
6111         }
6112     }
6113 out:
6114     kfree(sas_iounit_pg0);
6115 }
6116 
6117 /**
6118  * _scsih_get_port_table_after_reset - Construct temporary port table
6119  * @ioc: per adapter object
6120  * @port_table: address where port table needs to be constructed
6121  *
6122  * return number of HBA port entries available after reset.
6123  */
6124 static int
6125 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6126     struct hba_port *port_table)
6127 {
6128     u16 sz, ioc_status;
6129     int i, j;
6130     Mpi2ConfigReply_t mpi_reply;
6131     Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6132     u16 attached_handle;
6133     u64 attached_sas_addr;
6134     u8 found = 0, port_count = 0, port_id;
6135 
6136     sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6137         * sizeof(Mpi2SasIOUnit0PhyData_t));
6138     sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6139     if (!sas_iounit_pg0) {
6140         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6141             __FILE__, __LINE__, __func__);
6142         return port_count;
6143     }
6144 
6145     if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6146         sas_iounit_pg0, sz)) != 0)
6147         goto out;
6148     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6149     if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6150         goto out;
6151     for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6152         found = 0;
6153         if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6154             MPI2_SAS_NEG_LINK_RATE_1_5)
6155             continue;
6156         attached_handle =
6157             le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6158         if (_scsih_get_sas_address(
6159             ioc, attached_handle, &attached_sas_addr) != 0) {
6160             ioc_err(ioc, "failure at %s:%d/%s()!\n",
6161                 __FILE__, __LINE__, __func__);
6162             continue;
6163         }
6164 
6165         for (j = 0; j < port_count; j++) {
6166             port_id = sas_iounit_pg0->PhyData[i].Port;
6167             if (port_table[j].port_id == port_id &&
6168                 port_table[j].sas_address == attached_sas_addr) {
6169                 port_table[j].phy_mask |= (1 << i);
6170                 found = 1;
6171                 break;
6172             }
6173         }
6174 
6175         if (found)
6176             continue;
6177 
6178         port_id = sas_iounit_pg0->PhyData[i].Port;
6179         port_table[port_count].port_id = port_id;
6180         port_table[port_count].phy_mask = (1 << i);
6181         port_table[port_count].sas_address = attached_sas_addr;
6182         port_count++;
6183     }
6184 out:
6185     kfree(sas_iounit_pg0);
6186     return port_count;
6187 }
6188 
6189 enum hba_port_matched_codes {
6190     NOT_MATCHED = 0,
6191     MATCHED_WITH_ADDR_AND_PHYMASK,
6192     MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6193     MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6194     MATCHED_WITH_ADDR,
6195 };
6196 
6197 /**
6198  * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6199  *                  from HBA port table
6200  * @ioc: per adapter object
6201  * @port_entry: hba port entry from temporary port table which needs to be
6202  *      searched for matched entry in the HBA port table
6203  * @matched_port_entry: save matched hba port entry here
6204  * @count: count of matched entries
6205  *
6206  * return type of matched entry found.
6207  */
6208 static enum hba_port_matched_codes
6209 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6210     struct hba_port *port_entry,
6211     struct hba_port **matched_port_entry, int *count)
6212 {
6213     struct hba_port *port_table_entry, *matched_port = NULL;
6214     enum hba_port_matched_codes matched_code = NOT_MATCHED;
6215     int lcount = 0;
6216     *matched_port_entry = NULL;
6217 
6218     list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6219         if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6220             continue;
6221 
6222         if ((port_table_entry->sas_address == port_entry->sas_address)
6223             && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6224             matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6225             matched_port = port_table_entry;
6226             break;
6227         }
6228 
6229         if ((port_table_entry->sas_address == port_entry->sas_address)
6230             && (port_table_entry->phy_mask & port_entry->phy_mask)
6231             && (port_table_entry->port_id == port_entry->port_id)) {
6232             matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6233             matched_port = port_table_entry;
6234             continue;
6235         }
6236 
6237         if ((port_table_entry->sas_address == port_entry->sas_address)
6238             && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6239             if (matched_code ==
6240                 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6241                 continue;
6242             matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6243             matched_port = port_table_entry;
6244             continue;
6245         }
6246 
6247         if (port_table_entry->sas_address == port_entry->sas_address) {
6248             if (matched_code ==
6249                 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6250                 continue;
6251             if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6252                 continue;
6253             matched_code = MATCHED_WITH_ADDR;
6254             matched_port = port_table_entry;
6255             lcount++;
6256         }
6257     }
6258 
6259     *matched_port_entry = matched_port;
6260     if (matched_code ==  MATCHED_WITH_ADDR)
6261         *count = lcount;
6262     return matched_code;
6263 }
6264 
6265 /**
6266  * _scsih_del_phy_part_of_anther_port - remove phy if it
6267  *              is a part of anther port
6268  *@ioc: per adapter object
6269  *@port_table: port table after reset
6270  *@index: hba port entry index
6271  *@port_count: number of ports available after host reset
6272  *@offset: HBA phy bit offset
6273  *
6274  */
6275 static void
6276 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6277     struct hba_port *port_table,
6278     int index, u8 port_count, int offset)
6279 {
6280     struct _sas_node *sas_node = &ioc->sas_hba;
6281     u32 i, found = 0;
6282 
6283     for (i = 0; i < port_count; i++) {
6284         if (i == index)
6285             continue;
6286 
6287         if (port_table[i].phy_mask & (1 << offset)) {
6288             mpt3sas_transport_del_phy_from_an_existing_port(
6289                 ioc, sas_node, &sas_node->phy[offset]);
6290             found = 1;
6291             break;
6292         }
6293     }
6294     if (!found)
6295         port_table[index].phy_mask |= (1 << offset);
6296 }
6297 
6298 /**
6299  * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6300  *                      right port
6301  *@ioc: per adapter object
6302  *@hba_port_entry: hba port table entry
6303  *@port_table: temporary port table
6304  *@index: hba port entry index
6305  *@port_count: number of ports available after host reset
6306  *
6307  */
6308 static void
6309 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6310     struct hba_port *hba_port_entry, struct hba_port *port_table,
6311     int index, int port_count)
6312 {
6313     u32 phy_mask, offset = 0;
6314     struct _sas_node *sas_node = &ioc->sas_hba;
6315 
6316     phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6317 
6318     for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6319         if (phy_mask & (1 << offset)) {
6320             if (!(port_table[index].phy_mask & (1 << offset))) {
6321                 _scsih_del_phy_part_of_anther_port(
6322                     ioc, port_table, index, port_count,
6323                     offset);
6324                 continue;
6325             }
6326             if (sas_node->phy[offset].phy_belongs_to_port)
6327                 mpt3sas_transport_del_phy_from_an_existing_port(
6328                     ioc, sas_node, &sas_node->phy[offset]);
6329             mpt3sas_transport_add_phy_to_an_existing_port(
6330                 ioc, sas_node, &sas_node->phy[offset],
6331                 hba_port_entry->sas_address,
6332                 hba_port_entry);
6333         }
6334     }
6335 }
6336 
6337 /**
6338  * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6339  * @ioc: per adapter object
6340  *
6341  * Returns nothing.
6342  */
6343 static void
6344 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6345 {
6346     struct hba_port *port, *port_next;
6347     struct virtual_phy *vphy, *vphy_next;
6348 
6349     list_for_each_entry_safe(port, port_next,
6350         &ioc->port_table_list, list) {
6351         if (!port->vphys_mask)
6352             continue;
6353         list_for_each_entry_safe(vphy, vphy_next,
6354             &port->vphys_list, list) {
6355             if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6356                 drsprintk(ioc, ioc_info(ioc,
6357                     "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6358                     vphy, port->port_id,
6359                     vphy->phy_mask));
6360                 port->vphys_mask &= ~vphy->phy_mask;
6361                 list_del(&vphy->list);
6362                 kfree(vphy);
6363             }
6364         }
6365         if (!port->vphys_mask && !port->sas_address)
6366             port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6367     }
6368 }
6369 
6370 /**
6371  * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6372  *                  after host reset
6373  *@ioc: per adapter object
6374  *
6375  */
6376 static void
6377 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6378 {
6379     struct hba_port *port, *port_next;
6380 
6381     list_for_each_entry_safe(port, port_next,
6382         &ioc->port_table_list, list) {
6383         if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6384             port->flags & HBA_PORT_FLAG_NEW_PORT)
6385             continue;
6386 
6387         drsprintk(ioc, ioc_info(ioc,
6388             "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6389             port, port->port_id, port->phy_mask));
6390         list_del(&port->list);
6391         kfree(port);
6392     }
6393 }
6394 
6395 /**
6396  * _scsih_sas_port_refresh - Update HBA port table after host reset
6397  * @ioc: per adapter object
6398  */
6399 static void
6400 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6401 {
6402     u32 port_count = 0;
6403     struct hba_port *port_table;
6404     struct hba_port *port_table_entry;
6405     struct hba_port *port_entry = NULL;
6406     int i, j, count = 0, lcount = 0;
6407     int ret;
6408     u64 sas_addr;
6409     u8 num_phys;
6410 
6411     drsprintk(ioc, ioc_info(ioc,
6412         "updating ports for sas_host(0x%016llx)\n",
6413         (unsigned long long)ioc->sas_hba.sas_address));
6414 
6415     mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6416     if (!num_phys) {
6417         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6418             __FILE__, __LINE__, __func__);
6419         return;
6420     }
6421 
6422     if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6423         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6424            __FILE__, __LINE__, __func__);
6425         return;
6426     }
6427     ioc->sas_hba.num_phys = num_phys;
6428 
6429     port_table = kcalloc(ioc->sas_hba.num_phys,
6430         sizeof(struct hba_port), GFP_KERNEL);
6431     if (!port_table)
6432         return;
6433 
6434     port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6435     if (!port_count)
6436         return;
6437 
6438     drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6439     for (j = 0; j < port_count; j++)
6440         drsprintk(ioc, ioc_info(ioc,
6441             "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6442             port_table[j].port_id,
6443             port_table[j].phy_mask, port_table[j].sas_address));
6444 
6445     list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6446         port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6447 
6448     drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6449     port_table_entry = NULL;
6450     list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6451         drsprintk(ioc, ioc_info(ioc,
6452             "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6453             port_table_entry->port_id,
6454             port_table_entry->phy_mask,
6455             port_table_entry->sas_address));
6456     }
6457 
6458     for (j = 0; j < port_count; j++) {
6459         ret = _scsih_look_and_get_matched_port_entry(ioc,
6460             &port_table[j], &port_entry, &count);
6461         if (!port_entry) {
6462             drsprintk(ioc, ioc_info(ioc,
6463                 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6464                 port_table[j].sas_address,
6465                 port_table[j].port_id));
6466             continue;
6467         }
6468 
6469         switch (ret) {
6470         case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6471         case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6472             _scsih_add_or_del_phys_from_existing_port(ioc,
6473                 port_entry, port_table, j, port_count);
6474             break;
6475         case MATCHED_WITH_ADDR:
6476             sas_addr = port_table[j].sas_address;
6477             for (i = 0; i < port_count; i++) {
6478                 if (port_table[i].sas_address == sas_addr)
6479                     lcount++;
6480             }
6481 
6482             if (count > 1 || lcount > 1)
6483                 port_entry = NULL;
6484             else
6485                 _scsih_add_or_del_phys_from_existing_port(ioc,
6486                     port_entry, port_table, j, port_count);
6487         }
6488 
6489         if (!port_entry)
6490             continue;
6491 
6492         if (port_entry->port_id != port_table[j].port_id)
6493             port_entry->port_id = port_table[j].port_id;
6494         port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6495         port_entry->phy_mask = port_table[j].phy_mask;
6496     }
6497 
6498     port_table_entry = NULL;
6499 }
6500 
6501 /**
6502  * _scsih_alloc_vphy - allocate virtual_phy object
6503  * @ioc: per adapter object
6504  * @port_id: Port ID number
6505  * @phy_num: HBA Phy number
6506  *
6507  * Returns allocated virtual_phy object.
6508  */
6509 static struct virtual_phy *
6510 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6511 {
6512     struct virtual_phy *vphy;
6513     struct hba_port *port;
6514 
6515     port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6516     if (!port)
6517         return NULL;
6518 
6519     vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6520     if (!vphy) {
6521         vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6522         if (!vphy)
6523             return NULL;
6524 
6525         if (!port->vphys_mask)
6526             INIT_LIST_HEAD(&port->vphys_list);
6527 
6528         /*
6529          * Enable bit corresponding to HBA phy number on its
6530          * parent hba_port object's vphys_mask field.
6531          */
6532         port->vphys_mask |= (1 << phy_num);
6533         vphy->phy_mask |= (1 << phy_num);
6534 
6535         list_add_tail(&vphy->list, &port->vphys_list);
6536 
6537         ioc_info(ioc,
6538             "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6539             vphy, port->port_id, phy_num);
6540     }
6541     return vphy;
6542 }
6543 
6544 /**
6545  * _scsih_sas_host_refresh - refreshing sas host object contents
6546  * @ioc: per adapter object
6547  * Context: user
6548  *
6549  * During port enable, fw will send topology events for every device. Its
6550  * possible that the handles may change from the previous setting, so this
6551  * code keeping handles updating if changed.
6552  */
6553 static void
6554 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6555 {
6556     u16 sz;
6557     u16 ioc_status;
6558     int i;
6559     Mpi2ConfigReply_t mpi_reply;
6560     Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6561     u16 attached_handle;
6562     u8 link_rate, port_id;
6563     struct hba_port *port;
6564     Mpi2SasPhyPage0_t phy_pg0;
6565 
6566     dtmprintk(ioc,
6567           ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6568                (u64)ioc->sas_hba.sas_address));
6569 
6570     sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6571         * sizeof(Mpi2SasIOUnit0PhyData_t));
6572     sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6573     if (!sas_iounit_pg0) {
6574         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6575             __FILE__, __LINE__, __func__);
6576         return;
6577     }
6578 
6579     if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6580         sas_iounit_pg0, sz)) != 0)
6581         goto out;
6582     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6583     if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6584         goto out;
6585     for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6586         link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6587         if (i == 0)
6588             ioc->sas_hba.handle = le16_to_cpu(
6589                 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6590         port_id = sas_iounit_pg0->PhyData[i].Port;
6591         if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6592             port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6593             if (!port)
6594                 goto out;
6595 
6596             port->port_id = port_id;
6597             ioc_info(ioc,
6598                 "hba_port entry: %p, port: %d is added to hba_port list\n",
6599                 port, port->port_id);
6600             if (ioc->shost_recovery)
6601                 port->flags = HBA_PORT_FLAG_NEW_PORT;
6602             list_add_tail(&port->list, &ioc->port_table_list);
6603         }
6604         /*
6605          * Check whether current Phy belongs to HBA vSES device or not.
6606          */
6607         if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6608             MPI2_SAS_DEVICE_INFO_SEP &&
6609             (link_rate >=  MPI2_SAS_NEG_LINK_RATE_1_5)) {
6610             if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6611                 &phy_pg0, i))) {
6612                 ioc_err(ioc,
6613                     "failure at %s:%d/%s()!\n",
6614                      __FILE__, __LINE__, __func__);
6615                 goto out;
6616             }
6617             if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6618                 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6619                 continue;
6620             /*
6621              * Allocate a virtual_phy object for vSES device, if
6622              * this vSES device is hot added.
6623              */
6624             if (!_scsih_alloc_vphy(ioc, port_id, i))
6625                 goto out;
6626             ioc->sas_hba.phy[i].hba_vphy = 1;
6627         }
6628 
6629         /*
6630          * Add new HBA phys to STL if these new phys got added as part
6631          * of HBA Firmware upgrade/downgrade operation.
6632          */
6633         if (!ioc->sas_hba.phy[i].phy) {
6634             if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6635                             &phy_pg0, i))) {
6636                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6637                     __FILE__, __LINE__, __func__);
6638                 continue;
6639             }
6640             ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6641                 MPI2_IOCSTATUS_MASK;
6642             if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6643                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6644                     __FILE__, __LINE__, __func__);
6645                 continue;
6646             }
6647             ioc->sas_hba.phy[i].phy_id = i;
6648             mpt3sas_transport_add_host_phy(ioc,
6649                 &ioc->sas_hba.phy[i], phy_pg0,
6650                 ioc->sas_hba.parent_dev);
6651             continue;
6652         }
6653         ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6654         attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6655             AttachedDevHandle);
6656         if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6657             link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6658         ioc->sas_hba.phy[i].port =
6659             mpt3sas_get_port_by_id(ioc, port_id, 0);
6660         mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6661             attached_handle, i, link_rate,
6662             ioc->sas_hba.phy[i].port);
6663     }
6664     /*
6665      * Clear the phy details if this phy got disabled as part of
6666      * HBA Firmware upgrade/downgrade operation.
6667      */
6668     for (i = ioc->sas_hba.num_phys;
6669          i < ioc->sas_hba.nr_phys_allocated; i++) {
6670         if (ioc->sas_hba.phy[i].phy &&
6671             ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6672             SAS_LINK_RATE_1_5_GBPS)
6673             mpt3sas_transport_update_links(ioc,
6674                 ioc->sas_hba.sas_address, 0, i,
6675                 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6676     }
6677  out:
6678     kfree(sas_iounit_pg0);
6679 }
6680 
6681 /**
6682  * _scsih_sas_host_add - create sas host object
6683  * @ioc: per adapter object
6684  *
6685  * Creating host side data object, stored in ioc->sas_hba
6686  */
6687 static void
6688 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6689 {
6690     int i;
6691     Mpi2ConfigReply_t mpi_reply;
6692     Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6693     Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6694     Mpi2SasPhyPage0_t phy_pg0;
6695     Mpi2SasDevicePage0_t sas_device_pg0;
6696     Mpi2SasEnclosurePage0_t enclosure_pg0;
6697     u16 ioc_status;
6698     u16 sz;
6699     u8 device_missing_delay;
6700     u8 num_phys, port_id;
6701     struct hba_port *port;
6702 
6703     mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6704     if (!num_phys) {
6705         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6706             __FILE__, __LINE__, __func__);
6707         return;
6708     }
6709 
6710     ioc->sas_hba.nr_phys_allocated = max_t(u8,
6711         MPT_MAX_HBA_NUM_PHYS, num_phys);
6712     ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6713         sizeof(struct _sas_phy), GFP_KERNEL);
6714     if (!ioc->sas_hba.phy) {
6715         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6716             __FILE__, __LINE__, __func__);
6717         goto out;
6718     }
6719     ioc->sas_hba.num_phys = num_phys;
6720 
6721     /* sas_iounit page 0 */
6722     sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6723         sizeof(Mpi2SasIOUnit0PhyData_t));
6724     sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6725     if (!sas_iounit_pg0) {
6726         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6727             __FILE__, __LINE__, __func__);
6728         return;
6729     }
6730     if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6731         sas_iounit_pg0, sz))) {
6732         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6733             __FILE__, __LINE__, __func__);
6734         goto out;
6735     }
6736     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6737         MPI2_IOCSTATUS_MASK;
6738     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6739         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6740             __FILE__, __LINE__, __func__);
6741         goto out;
6742     }
6743 
6744     /* sas_iounit page 1 */
6745     sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6746         sizeof(Mpi2SasIOUnit1PhyData_t));
6747     sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6748     if (!sas_iounit_pg1) {
6749         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6750             __FILE__, __LINE__, __func__);
6751         goto out;
6752     }
6753     if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6754         sas_iounit_pg1, sz))) {
6755         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6756             __FILE__, __LINE__, __func__);
6757         goto out;
6758     }
6759     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6760         MPI2_IOCSTATUS_MASK;
6761     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6762         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6763             __FILE__, __LINE__, __func__);
6764         goto out;
6765     }
6766 
6767     ioc->io_missing_delay =
6768         sas_iounit_pg1->IODeviceMissingDelay;
6769     device_missing_delay =
6770         sas_iounit_pg1->ReportDeviceMissingDelay;
6771     if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6772         ioc->device_missing_delay = (device_missing_delay &
6773             MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6774     else
6775         ioc->device_missing_delay = device_missing_delay &
6776             MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6777 
6778     ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6779     for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6780         if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6781             i))) {
6782             ioc_err(ioc, "failure at %s:%d/%s()!\n",
6783                 __FILE__, __LINE__, __func__);
6784             goto out;
6785         }
6786         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6787             MPI2_IOCSTATUS_MASK;
6788         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6789             ioc_err(ioc, "failure at %s:%d/%s()!\n",
6790                 __FILE__, __LINE__, __func__);
6791             goto out;
6792         }
6793 
6794         if (i == 0)
6795             ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6796                 PhyData[0].ControllerDevHandle);
6797 
6798         port_id = sas_iounit_pg0->PhyData[i].Port;
6799         if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6800             port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6801             if (!port)
6802                 goto out;
6803 
6804             port->port_id = port_id;
6805             ioc_info(ioc,
6806                "hba_port entry: %p, port: %d is added to hba_port list\n",
6807                port, port->port_id);
6808             list_add_tail(&port->list,
6809                 &ioc->port_table_list);
6810         }
6811 
6812         /*
6813          * Check whether current Phy belongs to HBA vSES device or not.
6814          */
6815         if ((le32_to_cpu(phy_pg0.PhyInfo) &
6816             MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6817             (phy_pg0.NegotiatedLinkRate >> 4) >=
6818             MPI2_SAS_NEG_LINK_RATE_1_5) {
6819             /*
6820              * Allocate a virtual_phy object for vSES device.
6821              */
6822             if (!_scsih_alloc_vphy(ioc, port_id, i))
6823                 goto out;
6824             ioc->sas_hba.phy[i].hba_vphy = 1;
6825         }
6826 
6827         ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6828         ioc->sas_hba.phy[i].phy_id = i;
6829         ioc->sas_hba.phy[i].port =
6830             mpt3sas_get_port_by_id(ioc, port_id, 0);
6831         mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6832             phy_pg0, ioc->sas_hba.parent_dev);
6833     }
6834     if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6835         MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6836         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6837             __FILE__, __LINE__, __func__);
6838         goto out;
6839     }
6840     ioc->sas_hba.enclosure_handle =
6841         le16_to_cpu(sas_device_pg0.EnclosureHandle);
6842     ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6843     ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6844          ioc->sas_hba.handle,
6845          (u64)ioc->sas_hba.sas_address,
6846          ioc->sas_hba.num_phys);
6847 
6848     if (ioc->sas_hba.enclosure_handle) {
6849         if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6850             &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6851            ioc->sas_hba.enclosure_handle)))
6852             ioc->sas_hba.enclosure_logical_id =
6853                 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6854     }
6855 
6856  out:
6857     kfree(sas_iounit_pg1);
6858     kfree(sas_iounit_pg0);
6859 }
6860 
6861 /**
6862  * _scsih_expander_add -  creating expander object
6863  * @ioc: per adapter object
6864  * @handle: expander handle
6865  *
6866  * Creating expander object, stored in ioc->sas_expander_list.
6867  *
6868  * Return: 0 for success, else error.
6869  */
6870 static int
6871 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6872 {
6873     struct _sas_node *sas_expander;
6874     struct _enclosure_node *enclosure_dev;
6875     Mpi2ConfigReply_t mpi_reply;
6876     Mpi2ExpanderPage0_t expander_pg0;
6877     Mpi2ExpanderPage1_t expander_pg1;
6878     u32 ioc_status;
6879     u16 parent_handle;
6880     u64 sas_address, sas_address_parent = 0;
6881     int i;
6882     unsigned long flags;
6883     struct _sas_port *mpt3sas_port = NULL;
6884     u8 port_id;
6885 
6886     int rc = 0;
6887 
6888     if (!handle)
6889         return -1;
6890 
6891     if (ioc->shost_recovery || ioc->pci_error_recovery)
6892         return -1;
6893 
6894     if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6895         MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6896         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6897             __FILE__, __LINE__, __func__);
6898         return -1;
6899     }
6900 
6901     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6902         MPI2_IOCSTATUS_MASK;
6903     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6904         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6905             __FILE__, __LINE__, __func__);
6906         return -1;
6907     }
6908 
6909     /* handle out of order topology events */
6910     parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6911     if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6912         != 0) {
6913         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6914             __FILE__, __LINE__, __func__);
6915         return -1;
6916     }
6917 
6918     port_id = expander_pg0.PhysicalPort;
6919     if (sas_address_parent != ioc->sas_hba.sas_address) {
6920         spin_lock_irqsave(&ioc->sas_node_lock, flags);
6921         sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6922             sas_address_parent,
6923             mpt3sas_get_port_by_id(ioc, port_id, 0));
6924         spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6925         if (!sas_expander) {
6926             rc = _scsih_expander_add(ioc, parent_handle);
6927             if (rc != 0)
6928                 return rc;
6929         }
6930     }
6931 
6932     spin_lock_irqsave(&ioc->sas_node_lock, flags);
6933     sas_address = le64_to_cpu(expander_pg0.SASAddress);
6934     sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6935         sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6936     spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6937 
6938     if (sas_expander)
6939         return 0;
6940 
6941     sas_expander = kzalloc(sizeof(struct _sas_node),
6942         GFP_KERNEL);
6943     if (!sas_expander) {
6944         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6945             __FILE__, __LINE__, __func__);
6946         return -1;
6947     }
6948 
6949     sas_expander->handle = handle;
6950     sas_expander->num_phys = expander_pg0.NumPhys;
6951     sas_expander->sas_address_parent = sas_address_parent;
6952     sas_expander->sas_address = sas_address;
6953     sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6954     if (!sas_expander->port) {
6955         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6956             __FILE__, __LINE__, __func__);
6957         rc = -1;
6958         goto out_fail;
6959     }
6960 
6961     ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6962          handle, parent_handle,
6963          (u64)sas_expander->sas_address, sas_expander->num_phys);
6964 
6965     if (!sas_expander->num_phys) {
6966         rc = -1;
6967         goto out_fail;
6968     }
6969     sas_expander->phy = kcalloc(sas_expander->num_phys,
6970         sizeof(struct _sas_phy), GFP_KERNEL);
6971     if (!sas_expander->phy) {
6972         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6973             __FILE__, __LINE__, __func__);
6974         rc = -1;
6975         goto out_fail;
6976     }
6977 
6978     INIT_LIST_HEAD(&sas_expander->sas_port_list);
6979     mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6980         sas_address_parent, sas_expander->port);
6981     if (!mpt3sas_port) {
6982         ioc_err(ioc, "failure at %s:%d/%s()!\n",
6983             __FILE__, __LINE__, __func__);
6984         rc = -1;
6985         goto out_fail;
6986     }
6987     sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6988     sas_expander->rphy = mpt3sas_port->rphy;
6989 
6990     for (i = 0 ; i < sas_expander->num_phys ; i++) {
6991         if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6992             &expander_pg1, i, handle))) {
6993             ioc_err(ioc, "failure at %s:%d/%s()!\n",
6994                 __FILE__, __LINE__, __func__);
6995             rc = -1;
6996             goto out_fail;
6997         }
6998         sas_expander->phy[i].handle = handle;
6999         sas_expander->phy[i].phy_id = i;
7000         sas_expander->phy[i].port =
7001             mpt3sas_get_port_by_id(ioc, port_id, 0);
7002 
7003         if ((mpt3sas_transport_add_expander_phy(ioc,
7004             &sas_expander->phy[i], expander_pg1,
7005             sas_expander->parent_dev))) {
7006             ioc_err(ioc, "failure at %s:%d/%s()!\n",
7007                 __FILE__, __LINE__, __func__);
7008             rc = -1;
7009             goto out_fail;
7010         }
7011     }
7012 
7013     if (sas_expander->enclosure_handle) {
7014         enclosure_dev =
7015             mpt3sas_scsih_enclosure_find_by_handle(ioc,
7016                         sas_expander->enclosure_handle);
7017         if (enclosure_dev)
7018             sas_expander->enclosure_logical_id =
7019                 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7020     }
7021 
7022     _scsih_expander_node_add(ioc, sas_expander);
7023     return 0;
7024 
7025  out_fail:
7026 
7027     if (mpt3sas_port)
7028         mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7029             sas_address_parent, sas_expander->port);
7030     kfree(sas_expander);
7031     return rc;
7032 }
7033 
7034 /**
7035  * mpt3sas_expander_remove - removing expander object
7036  * @ioc: per adapter object
7037  * @sas_address: expander sas_address
7038  * @port: hba port entry
7039  */
7040 void
7041 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7042     struct hba_port *port)
7043 {
7044     struct _sas_node *sas_expander;
7045     unsigned long flags;
7046 
7047     if (ioc->shost_recovery)
7048         return;
7049 
7050     if (!port)
7051         return;
7052 
7053     spin_lock_irqsave(&ioc->sas_node_lock, flags);
7054     sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7055         sas_address, port);
7056     spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7057     if (sas_expander)
7058         _scsih_expander_node_remove(ioc, sas_expander);
7059 }
7060 
7061 /**
7062  * _scsih_done -  internal SCSI_IO callback handler.
7063  * @ioc: per adapter object
7064  * @smid: system request message index
7065  * @msix_index: MSIX table index supplied by the OS
7066  * @reply: reply message frame(lower 32bit addr)
7067  *
7068  * Callback handler when sending internal generated SCSI_IO.
7069  * The callback index passed is `ioc->scsih_cb_idx`
7070  *
7071  * Return: 1 meaning mf should be freed from _base_interrupt
7072  *         0 means the mf is freed from this function.
7073  */
7074 static u8
7075 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7076 {
7077     MPI2DefaultReply_t *mpi_reply;
7078 
7079     mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
7080     if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7081         return 1;
7082     if (ioc->scsih_cmds.smid != smid)
7083         return 1;
7084     ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7085     if (mpi_reply) {
7086         memcpy(ioc->scsih_cmds.reply, mpi_reply,
7087             mpi_reply->MsgLength*4);
7088         ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7089     }
7090     ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7091     complete(&ioc->scsih_cmds.done);
7092     return 1;
7093 }
7094 
7095 
7096 
7097 
7098 #define MPT3_MAX_LUNS (255)
7099 
7100 
7101 /**
7102  * _scsih_check_access_status - check access flags
7103  * @ioc: per adapter object
7104  * @sas_address: sas address
7105  * @handle: sas device handle
7106  * @access_status: errors returned during discovery of the device
7107  *
7108  * Return: 0 for success, else failure
7109  */
7110 static u8
7111 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7112     u16 handle, u8 access_status)
7113 {
7114     u8 rc = 1;
7115     char *desc = NULL;
7116 
7117     switch (access_status) {
7118     case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7119     case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7120         rc = 0;
7121         break;
7122     case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7123         desc = "sata capability failed";
7124         break;
7125     case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7126         desc = "sata affiliation conflict";
7127         break;
7128     case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7129         desc = "route not addressable";
7130         break;
7131     case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7132         desc = "smp error not addressable";
7133         break;
7134     case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7135         desc = "device blocked";
7136         break;
7137     case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7138     case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7139     case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7140     case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7141     case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7142     case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7143     case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7144     case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7145     case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7146     case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7147     case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7148     case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7149         desc = "sata initialization failed";
7150         break;
7151     default:
7152         desc = "unknown";
7153         break;
7154     }
7155 
7156     if (!rc)
7157         return 0;
7158 
7159     ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7160         desc, (u64)sas_address, handle);
7161     return rc;
7162 }
7163 
7164 /**
7165  * _scsih_check_device - checking device responsiveness
7166  * @ioc: per adapter object
7167  * @parent_sas_address: sas address of parent expander or sas host
7168  * @handle: attached device handle
7169  * @phy_number: phy number
7170  * @link_rate: new link rate
7171  */
7172 static void
7173 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7174     u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7175 {
7176     Mpi2ConfigReply_t mpi_reply;
7177     Mpi2SasDevicePage0_t sas_device_pg0;
7178     struct _sas_device *sas_device = NULL;
7179     struct _enclosure_node *enclosure_dev = NULL;
7180     u32 ioc_status;
7181     unsigned long flags;
7182     u64 sas_address;
7183     struct scsi_target *starget;
7184     struct MPT3SAS_TARGET *sas_target_priv_data;
7185     u32 device_info;
7186     struct hba_port *port;
7187 
7188     if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7189         MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7190         return;
7191 
7192     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7193     if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7194         return;
7195 
7196     /* wide port handling ~ we need only handle device once for the phy that
7197      * is matched in sas device page zero
7198      */
7199     if (phy_number != sas_device_pg0.PhyNum)
7200         return;
7201 
7202     /* check if this is end device */
7203     device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7204     if (!(_scsih_is_end_device(device_info)))
7205         return;
7206 
7207     spin_lock_irqsave(&ioc->sas_device_lock, flags);
7208     sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7209     port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7210     if (!port)
7211         goto out_unlock;
7212     sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7213         sas_address, port);
7214 
7215     if (!sas_device)
7216         goto out_unlock;
7217 
7218     if (unlikely(sas_device->handle != handle)) {
7219         starget = sas_device->starget;
7220         sas_target_priv_data = starget->hostdata;
7221         starget_printk(KERN_INFO, starget,
7222             "handle changed from(0x%04x) to (0x%04x)!!!\n",
7223             sas_device->handle, handle);
7224         sas_target_priv_data->handle = handle;
7225         sas_device->handle = handle;
7226         if (le16_to_cpu(sas_device_pg0.Flags) &
7227              MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7228             sas_device->enclosure_level =
7229                 sas_device_pg0.EnclosureLevel;
7230             memcpy(sas_device->connector_name,
7231                 sas_device_pg0.ConnectorName, 4);
7232             sas_device->connector_name[4] = '\0';
7233         } else {
7234             sas_device->enclosure_level = 0;
7235             sas_device->connector_name[0] = '\0';
7236         }
7237 
7238         sas_device->enclosure_handle =
7239                 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7240         sas_device->is_chassis_slot_valid = 0;
7241         enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7242                         sas_device->enclosure_handle);
7243         if (enclosure_dev) {
7244             sas_device->enclosure_logical_id =
7245                 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7246             if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7247                 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7248                 sas_device->is_chassis_slot_valid = 1;
7249                 sas_device->chassis_slot =
7250                     enclosure_dev->pg0.ChassisSlot;
7251             }
7252         }
7253     }
7254 
7255     /* check if device is present */
7256     if (!(le16_to_cpu(sas_device_pg0.Flags) &
7257         MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7258         ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7259             handle);
7260         goto out_unlock;
7261     }
7262 
7263     /* check if there were any issues with discovery */
7264     if (_scsih_check_access_status(ioc, sas_address, handle,
7265         sas_device_pg0.AccessStatus))
7266         goto out_unlock;
7267 
7268     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7269     _scsih_ublock_io_device(ioc, sas_address, port);
7270 
7271     if (sas_device)
7272         sas_device_put(sas_device);
7273     return;
7274 
7275 out_unlock:
7276     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7277     if (sas_device)
7278         sas_device_put(sas_device);
7279 }
7280 
7281 /**
7282  * _scsih_add_device -  creating sas device object
7283  * @ioc: per adapter object
7284  * @handle: sas device handle
7285  * @phy_num: phy number end device attached to
7286  * @is_pd: is this hidden raid component
7287  *
7288  * Creating end device object, stored in ioc->sas_device_list.
7289  *
7290  * Return: 0 for success, non-zero for failure.
7291  */
7292 static int
7293 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7294     u8 is_pd)
7295 {
7296     Mpi2ConfigReply_t mpi_reply;
7297     Mpi2SasDevicePage0_t sas_device_pg0;
7298     struct _sas_device *sas_device;
7299     struct _enclosure_node *enclosure_dev = NULL;
7300     u32 ioc_status;
7301     u64 sas_address;
7302     u32 device_info;
7303     u8 port_id;
7304 
7305     if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7306         MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7307         ioc_err(ioc, "failure at %s:%d/%s()!\n",
7308             __FILE__, __LINE__, __func__);
7309         return -1;
7310     }
7311 
7312     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7313         MPI2_IOCSTATUS_MASK;
7314     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7315         ioc_err(ioc, "failure at %s:%d/%s()!\n",
7316             __FILE__, __LINE__, __func__);
7317         return -1;
7318     }
7319 
7320     /* check if this is end device */
7321     device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7322     if (!(_scsih_is_end_device(device_info)))
7323         return -1;
7324     set_bit(handle, ioc->pend_os_device_add);
7325     sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7326 
7327     /* check if device is present */
7328     if (!(le16_to_cpu(sas_device_pg0.Flags) &
7329         MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7330         ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7331             handle);
7332         return -1;
7333     }
7334 
7335     /* check if there were any issues with discovery */
7336     if (_scsih_check_access_status(ioc, sas_address, handle,
7337         sas_device_pg0.AccessStatus))
7338         return -1;
7339 
7340     port_id = sas_device_pg0.PhysicalPort;
7341     sas_device = mpt3sas_get_sdev_by_addr(ioc,
7342         sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7343     if (sas_device) {
7344         clear_bit(handle, ioc->pend_os_device_add);
7345         sas_device_put(sas_device);
7346         return -1;
7347     }
7348 
7349     if (sas_device_pg0.EnclosureHandle) {
7350         enclosure_dev =
7351             mpt3sas_scsih_enclosure_find_by_handle(ioc,
7352                 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7353         if (enclosure_dev == NULL)
7354             ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7355                  sas_device_pg0.EnclosureHandle);
7356     }
7357 
7358     sas_device = kzalloc(sizeof(struct _sas_device),
7359         GFP_KERNEL);
7360     if (!sas_device) {
7361         ioc_err(ioc, "failure at %s:%d/%s()!\n",
7362             __FILE__, __LINE__, __func__);
7363         return 0;
7364     }
7365 
7366     kref_init(&sas_device->refcount);
7367     sas_device->handle = handle;
7368     if (_scsih_get_sas_address(ioc,
7369         le16_to_cpu(sas_device_pg0.ParentDevHandle),
7370         &sas_device->sas_address_parent) != 0)
7371         ioc_err(ioc, "failure at %s:%d/%s()!\n",
7372             __FILE__, __LINE__, __func__);
7373     sas_device->enclosure_handle =
7374         le16_to_cpu(sas_device_pg0.EnclosureHandle);
7375     if (sas_device->enclosure_handle != 0)
7376         sas_device->slot =
7377             le16_to_cpu(sas_device_pg0.Slot);
7378     sas_device->device_info = device_info;
7379     sas_device->sas_address = sas_address;
7380     sas_device->phy = sas_device_pg0.PhyNum;
7381     sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7382         MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7383     sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7384     if (!sas_device->port) {
7385         ioc_err(ioc, "failure at %s:%d/%s()!\n",
7386             __FILE__, __LINE__, __func__);
7387         goto out;
7388     }
7389 
7390     if (le16_to_cpu(sas_device_pg0.Flags)
7391         & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7392         sas_device->enclosure_level =
7393             sas_device_pg0.EnclosureLevel;
7394         memcpy(sas_device->connector_name,
7395             sas_device_pg0.ConnectorName, 4);
7396         sas_device->connector_name[4] = '\0';
7397     } else {
7398         sas_device->enclosure_level = 0;
7399         sas_device->connector_name[0] = '\0';
7400     }
7401     /* get enclosure_logical_id & chassis_slot*/
7402     sas_device->is_chassis_slot_valid = 0;
7403     if (enclosure_dev) {
7404         sas_device->enclosure_logical_id =
7405             le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7406         if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7407             MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7408             sas_device->is_chassis_slot_valid = 1;
7409             sas_device->chassis_slot =
7410                     enclosure_dev->pg0.ChassisSlot;
7411         }
7412     }
7413 
7414     /* get device name */
7415     sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7416     sas_device->port_type = sas_device_pg0.MaxPortConnections;
7417     ioc_info(ioc,
7418         "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7419         handle, sas_device->sas_address, sas_device->port_type);
7420 
7421     if (ioc->wait_for_discovery_to_complete)
7422         _scsih_sas_device_init_add(ioc, sas_device);
7423     else
7424         _scsih_sas_device_add(ioc, sas_device);
7425 
7426 out:
7427     sas_device_put(sas_device);
7428     return 0;
7429 }
7430 
7431 /**
7432  * _scsih_remove_device -  removing sas device object
7433  * @ioc: per adapter object
7434  * @sas_device: the sas_device object
7435  */
7436 static void
7437 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7438     struct _sas_device *sas_device)
7439 {
7440     struct MPT3SAS_TARGET *sas_target_priv_data;
7441 
7442     if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7443          (sas_device->pfa_led_on)) {
7444         _scsih_turn_off_pfa_led(ioc, sas_device);
7445         sas_device->pfa_led_on = 0;
7446     }
7447 
7448     dewtprintk(ioc,
7449            ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7450                 __func__,
7451                 sas_device->handle, (u64)sas_device->sas_address));
7452 
7453     dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7454         NULL, NULL));
7455 
7456     if (sas_device->starget && sas_device->starget->hostdata) {
7457         sas_target_priv_data = sas_device->starget->hostdata;
7458         sas_target_priv_data->deleted = 1;
7459         _scsih_ublock_io_device(ioc, sas_device->sas_address,
7460             sas_device->port);
7461         sas_target_priv_data->handle =
7462              MPT3SAS_INVALID_DEVICE_HANDLE;
7463     }
7464 
7465     if (!ioc->hide_drives)
7466         mpt3sas_transport_port_remove(ioc,
7467             sas_device->sas_address,
7468             sas_device->sas_address_parent,
7469             sas_device->port);
7470 
7471     ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7472          sas_device->handle, (u64)sas_device->sas_address);
7473 
7474     _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7475 
7476     dewtprintk(ioc,
7477            ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7478                 __func__,
7479                 sas_device->handle, (u64)sas_device->sas_address));
7480     dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7481         NULL, NULL));
7482 }
7483 
7484 /**
7485  * _scsih_sas_topology_change_event_debug - debug for topology event
7486  * @ioc: per adapter object
7487  * @event_data: event data payload
7488  * Context: user.
7489  */
7490 static void
7491 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7492     Mpi2EventDataSasTopologyChangeList_t *event_data)
7493 {
7494     int i;
7495     u16 handle;
7496     u16 reason_code;
7497     u8 phy_number;
7498     char *status_str = NULL;
7499     u8 link_rate, prev_link_rate;
7500 
7501     switch (event_data->ExpStatus) {
7502     case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7503         status_str = "add";
7504         break;
7505     case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7506         status_str = "remove";
7507         break;
7508     case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7509     case 0:
7510         status_str =  "responding";
7511         break;
7512     case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7513         status_str = "remove delay";
7514         break;
7515     default:
7516         status_str = "unknown status";
7517         break;
7518     }
7519     ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7520     pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7521         "start_phy(%02d), count(%d)\n",
7522         le16_to_cpu(event_data->ExpanderDevHandle),
7523         le16_to_cpu(event_data->EnclosureHandle),
7524         event_data->StartPhyNum, event_data->NumEntries);
7525     for (i = 0; i < event_data->NumEntries; i++) {
7526         handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7527         if (!handle)
7528             continue;
7529         phy_number = event_data->StartPhyNum + i;
7530         reason_code = event_data->PHY[i].PhyStatus &
7531             MPI2_EVENT_SAS_TOPO_RC_MASK;
7532         switch (reason_code) {
7533         case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7534             status_str = "target add";
7535             break;
7536         case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7537             status_str = "target remove";
7538             break;
7539         case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7540             status_str = "delay target remove";
7541             break;
7542         case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7543             status_str = "link rate change";
7544             break;
7545         case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7546             status_str = "target responding";
7547             break;
7548         default:
7549             status_str = "unknown";
7550             break;
7551         }
7552         link_rate = event_data->PHY[i].LinkRate >> 4;
7553         prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7554         pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7555             " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7556             handle, status_str, link_rate, prev_link_rate);
7557 
7558     }
7559 }
7560 
7561 /**
7562  * _scsih_sas_topology_change_event - handle topology changes
7563  * @ioc: per adapter object
7564  * @fw_event: The fw_event_work object
7565  * Context: user.
7566  *
7567  */
7568 static int
7569 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7570     struct fw_event_work *fw_event)
7571 {
7572     int i;
7573     u16 parent_handle, handle;
7574     u16 reason_code;
7575     u8 phy_number, max_phys;
7576     struct _sas_node *sas_expander;
7577     u64 sas_address;
7578     unsigned long flags;
7579     u8 link_rate, prev_link_rate;
7580     struct hba_port *port;
7581     Mpi2EventDataSasTopologyChangeList_t *event_data =
7582         (Mpi2EventDataSasTopologyChangeList_t *)
7583         fw_event->event_data;
7584 
7585     if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7586         _scsih_sas_topology_change_event_debug(ioc, event_data);
7587 
7588     if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7589         return 0;
7590 
7591     if (!ioc->sas_hba.num_phys)
7592         _scsih_sas_host_add(ioc);
7593     else
7594         _scsih_sas_host_refresh(ioc);
7595 
7596     if (fw_event->ignore) {
7597         dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7598         return 0;
7599     }
7600 
7601     parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7602     port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7603 
7604     /* handle expander add */
7605     if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7606         if (_scsih_expander_add(ioc, parent_handle) != 0)
7607             return 0;
7608 
7609     spin_lock_irqsave(&ioc->sas_node_lock, flags);
7610     sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7611         parent_handle);
7612     if (sas_expander) {
7613         sas_address = sas_expander->sas_address;
7614         max_phys = sas_expander->num_phys;
7615         port = sas_expander->port;
7616     } else if (parent_handle < ioc->sas_hba.num_phys) {
7617         sas_address = ioc->sas_hba.sas_address;
7618         max_phys = ioc->sas_hba.num_phys;
7619     } else {
7620         spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7621         return 0;
7622     }
7623     spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7624 
7625     /* handle siblings events */
7626     for (i = 0; i < event_data->NumEntries; i++) {
7627         if (fw_event->ignore) {
7628             dewtprintk(ioc,
7629                    ioc_info(ioc, "ignoring expander event\n"));
7630             return 0;
7631         }
7632         if (ioc->remove_host || ioc->pci_error_recovery)
7633             return 0;
7634         phy_number = event_data->StartPhyNum + i;
7635         if (phy_number >= max_phys)
7636             continue;
7637         reason_code = event_data->PHY[i].PhyStatus &
7638             MPI2_EVENT_SAS_TOPO_RC_MASK;
7639         if ((event_data->PHY[i].PhyStatus &
7640             MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7641             MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7642                 continue;
7643         handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7644         if (!handle)
7645             continue;
7646         link_rate = event_data->PHY[i].LinkRate >> 4;
7647         prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7648         switch (reason_code) {
7649         case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7650 
7651             if (ioc->shost_recovery)
7652                 break;
7653 
7654             if (link_rate == prev_link_rate)
7655                 break;
7656 
7657             mpt3sas_transport_update_links(ioc, sas_address,
7658                 handle, phy_number, link_rate, port);
7659 
7660             if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7661                 break;
7662 
7663             _scsih_check_device(ioc, sas_address, handle,
7664                 phy_number, link_rate);
7665 
7666             if (!test_bit(handle, ioc->pend_os_device_add))
7667                 break;
7668 
7669             fallthrough;
7670 
7671         case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7672 
7673             if (ioc->shost_recovery)
7674                 break;
7675 
7676             mpt3sas_transport_update_links(ioc, sas_address,
7677                 handle, phy_number, link_rate, port);
7678 
7679             _scsih_add_device(ioc, handle, phy_number, 0);
7680 
7681             break;
7682         case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7683 
7684             _scsih_device_remove_by_handle(ioc, handle);
7685             break;
7686         }
7687     }
7688 
7689     /* handle expander removal */
7690     if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7691         sas_expander)
7692         mpt3sas_expander_remove(ioc, sas_address, port);
7693 
7694     return 0;
7695 }
7696 
7697 /**
7698  * _scsih_sas_device_status_change_event_debug - debug for device event
7699  * @ioc: ?
7700  * @event_data: event data payload
7701  * Context: user.
7702  */
7703 static void
7704 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7705     Mpi2EventDataSasDeviceStatusChange_t *event_data)
7706 {
7707     char *reason_str = NULL;
7708 
7709     switch (event_data->ReasonCode) {
7710     case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7711         reason_str = "smart data";
7712         break;
7713     case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7714         reason_str = "unsupported device discovered";
7715         break;
7716     case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7717         reason_str = "internal device reset";
7718         break;
7719     case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7720         reason_str = "internal task abort";
7721         break;
7722     case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7723         reason_str = "internal task abort set";
7724         break;
7725     case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7726         reason_str = "internal clear task set";
7727         break;
7728     case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7729         reason_str = "internal query task";
7730         break;
7731     case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7732         reason_str = "sata init failure";
7733         break;
7734     case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7735         reason_str = "internal device reset complete";
7736         break;
7737     case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7738         reason_str = "internal task abort complete";
7739         break;
7740     case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7741         reason_str = "internal async notification";
7742         break;
7743     case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7744         reason_str = "expander reduced functionality";
7745         break;
7746     case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7747         reason_str = "expander reduced functionality complete";
7748         break;
7749     default:
7750         reason_str = "unknown reason";
7751         break;
7752     }
7753     ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7754          reason_str, le16_to_cpu(event_data->DevHandle),
7755          (u64)le64_to_cpu(event_data->SASAddress),
7756          le16_to_cpu(event_data->TaskTag));
7757     if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7758         pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7759             event_data->ASC, event_data->ASCQ);
7760     pr_cont("\n");
7761 }
7762 
7763 /**
7764  * _scsih_sas_device_status_change_event - handle device status change
7765  * @ioc: per adapter object
7766  * @event_data: The fw event
7767  * Context: user.
7768  */
7769 static void
7770 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7771     Mpi2EventDataSasDeviceStatusChange_t *event_data)
7772 {
7773     struct MPT3SAS_TARGET *target_priv_data;
7774     struct _sas_device *sas_device;
7775     u64 sas_address;
7776     unsigned long flags;
7777 
7778     /* In MPI Revision K (0xC), the internal device reset complete was
7779      * implemented, so avoid setting tm_busy flag for older firmware.
7780      */
7781     if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7782         return;
7783 
7784     if (event_data->ReasonCode !=
7785         MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7786        event_data->ReasonCode !=
7787         MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7788         return;
7789 
7790     spin_lock_irqsave(&ioc->sas_device_lock, flags);
7791     sas_address = le64_to_cpu(event_data->SASAddress);
7792     sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7793         sas_address,
7794         mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7795 
7796     if (!sas_device || !sas_device->starget)
7797         goto out;
7798 
7799     target_priv_data = sas_device->starget->hostdata;
7800     if (!target_priv_data)
7801         goto out;
7802 
7803     if (event_data->ReasonCode ==
7804         MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7805         target_priv_data->tm_busy = 1;
7806     else
7807         target_priv_data->tm_busy = 0;
7808 
7809     if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7810         ioc_info(ioc,
7811             "%s tm_busy flag for handle(0x%04x)\n",
7812             (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7813             target_priv_data->handle);
7814 
7815 out:
7816     if (sas_device)
7817         sas_device_put(sas_device);
7818 
7819     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7820 }
7821 
7822 
7823 /**
7824  * _scsih_check_pcie_access_status - check access flags
7825  * @ioc: per adapter object
7826  * @wwid: wwid
7827  * @handle: sas device handle
7828  * @access_status: errors returned during discovery of the device
7829  *
7830  * Return: 0 for success, else failure
7831  */
7832 static u8
7833 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7834     u16 handle, u8 access_status)
7835 {
7836     u8 rc = 1;
7837     char *desc = NULL;
7838 
7839     switch (access_status) {
7840     case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7841     case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7842         rc = 0;
7843         break;
7844     case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7845         desc = "PCIe device capability failed";
7846         break;
7847     case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7848         desc = "PCIe device blocked";
7849         ioc_info(ioc,
7850             "Device with Access Status (%s): wwid(0x%016llx), "
7851             "handle(0x%04x)\n ll only be added to the internal list",
7852             desc, (u64)wwid, handle);
7853         rc = 0;
7854         break;
7855     case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7856         desc = "PCIe device mem space access failed";
7857         break;
7858     case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7859         desc = "PCIe device unsupported";
7860         break;
7861     case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7862         desc = "PCIe device MSIx Required";
7863         break;
7864     case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7865         desc = "PCIe device init fail max";
7866         break;
7867     case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7868         desc = "PCIe device status unknown";
7869         break;
7870     case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7871         desc = "nvme ready timeout";
7872         break;
7873     case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7874         desc = "nvme device configuration unsupported";
7875         break;
7876     case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7877         desc = "nvme identify failed";
7878         break;
7879     case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7880         desc = "nvme qconfig failed";
7881         break;
7882     case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7883         desc = "nvme qcreation failed";
7884         break;
7885     case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7886         desc = "nvme eventcfg failed";
7887         break;
7888     case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7889         desc = "nvme get feature stat failed";
7890         break;
7891     case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7892         desc = "nvme idle timeout";
7893         break;
7894     case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7895         desc = "nvme failure status";
7896         break;
7897     default:
7898         ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7899             access_status, (u64)wwid, handle);
7900         return rc;
7901     }
7902 
7903     if (!rc)
7904         return rc;
7905 
7906     ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7907          desc, (u64)wwid, handle);
7908     return rc;
7909 }
7910 
7911 /**
7912  * _scsih_pcie_device_remove_from_sml -  removing pcie device
7913  * from SML and free up associated memory
7914  * @ioc: per adapter object
7915  * @pcie_device: the pcie_device object
7916  */
7917 static void
7918 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7919     struct _pcie_device *pcie_device)
7920 {
7921     struct MPT3SAS_TARGET *sas_target_priv_data;
7922 
7923     dewtprintk(ioc,
7924            ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7925                 __func__,
7926                 pcie_device->handle, (u64)pcie_device->wwid));
7927     if (pcie_device->enclosure_handle != 0)
7928         dewtprintk(ioc,
7929                ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7930                     __func__,
7931                     (u64)pcie_device->enclosure_logical_id,
7932                     pcie_device->slot));
7933     if (pcie_device->connector_name[0] != '\0')
7934         dewtprintk(ioc,
7935                ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7936                     __func__,
7937                     pcie_device->enclosure_level,
7938                     pcie_device->connector_name));
7939 
7940     if (pcie_device->starget && pcie_device->starget->hostdata) {
7941         sas_target_priv_data = pcie_device->starget->hostdata;
7942         sas_target_priv_data->deleted = 1;
7943         _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7944         sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7945     }
7946 
7947     ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7948          pcie_device->handle, (u64)pcie_device->wwid);
7949     if (pcie_device->enclosure_handle != 0)
7950         ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7951              (u64)pcie_device->enclosure_logical_id,
7952              pcie_device->slot);
7953     if (pcie_device->connector_name[0] != '\0')
7954         ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7955              pcie_device->enclosure_level,
7956              pcie_device->connector_name);
7957 
7958     if (pcie_device->starget && (pcie_device->access_status !=
7959                 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7960         scsi_remove_target(&pcie_device->starget->dev);
7961     dewtprintk(ioc,
7962            ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7963                 __func__,
7964                 pcie_device->handle, (u64)pcie_device->wwid));
7965     if (pcie_device->enclosure_handle != 0)
7966         dewtprintk(ioc,
7967                ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7968                     __func__,
7969                     (u64)pcie_device->enclosure_logical_id,
7970                     pcie_device->slot));
7971     if (pcie_device->connector_name[0] != '\0')
7972         dewtprintk(ioc,
7973                ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7974                     __func__,
7975                     pcie_device->enclosure_level,
7976                     pcie_device->connector_name));
7977 
7978     kfree(pcie_device->serial_number);
7979 }
7980 
7981 
7982 /**
7983  * _scsih_pcie_check_device - checking device responsiveness
7984  * @ioc: per adapter object
7985  * @handle: attached device handle
7986  */
7987 static void
7988 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7989 {
7990     Mpi2ConfigReply_t mpi_reply;
7991     Mpi26PCIeDevicePage0_t pcie_device_pg0;
7992     u32 ioc_status;
7993     struct _pcie_device *pcie_device;
7994     u64 wwid;
7995     unsigned long flags;
7996     struct scsi_target *starget;
7997     struct MPT3SAS_TARGET *sas_target_priv_data;
7998     u32 device_info;
7999 
8000     if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8001         &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8002         return;
8003 
8004     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8005     if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8006         return;
8007 
8008     /* check if this is end device */
8009     device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8010     if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8011         return;
8012 
8013     wwid = le64_to_cpu(pcie_device_pg0.WWID);
8014     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8015     pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8016 
8017     if (!pcie_device) {
8018         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8019         return;
8020     }
8021 
8022     if (unlikely(pcie_device->handle != handle)) {
8023         starget = pcie_device->starget;
8024         sas_target_priv_data = starget->hostdata;
8025         pcie_device->access_status = pcie_device_pg0.AccessStatus;
8026         starget_printk(KERN_INFO, starget,
8027             "handle changed from(0x%04x) to (0x%04x)!!!\n",
8028             pcie_device->handle, handle);
8029         sas_target_priv_data->handle = handle;
8030         pcie_device->handle = handle;
8031 
8032         if (le32_to_cpu(pcie_device_pg0.Flags) &
8033             MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8034             pcie_device->enclosure_level =
8035                 pcie_device_pg0.EnclosureLevel;
8036             memcpy(&pcie_device->connector_name[0],
8037                 &pcie_device_pg0.ConnectorName[0], 4);
8038         } else {
8039             pcie_device->enclosure_level = 0;
8040             pcie_device->connector_name[0] = '\0';
8041         }
8042     }
8043 
8044     /* check if device is present */
8045     if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8046         MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8047         ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8048              handle);
8049         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8050         pcie_device_put(pcie_device);
8051         return;
8052     }
8053 
8054     /* check if there were any issues with discovery */
8055     if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8056         pcie_device_pg0.AccessStatus)) {
8057         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8058         pcie_device_put(pcie_device);
8059         return;
8060     }
8061 
8062     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8063     pcie_device_put(pcie_device);
8064 
8065     _scsih_ublock_io_device(ioc, wwid, NULL);
8066 
8067     return;
8068 }
8069 
8070 /**
8071  * _scsih_pcie_add_device -  creating pcie device object
8072  * @ioc: per adapter object
8073  * @handle: pcie device handle
8074  *
8075  * Creating end device object, stored in ioc->pcie_device_list.
8076  *
8077  * Return: 1 means queue the event later, 0 means complete the event
8078  */
8079 static int
8080 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8081 {
8082     Mpi26PCIeDevicePage0_t pcie_device_pg0;
8083     Mpi26PCIeDevicePage2_t pcie_device_pg2;
8084     Mpi2ConfigReply_t mpi_reply;
8085     struct _pcie_device *pcie_device;
8086     struct _enclosure_node *enclosure_dev;
8087     u32 ioc_status;
8088     u64 wwid;
8089 
8090     if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8091         &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8092         ioc_err(ioc, "failure at %s:%d/%s()!\n",
8093             __FILE__, __LINE__, __func__);
8094         return 0;
8095     }
8096     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8097         MPI2_IOCSTATUS_MASK;
8098     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8099         ioc_err(ioc, "failure at %s:%d/%s()!\n",
8100             __FILE__, __LINE__, __func__);
8101         return 0;
8102     }
8103 
8104     set_bit(handle, ioc->pend_os_device_add);
8105     wwid = le64_to_cpu(pcie_device_pg0.WWID);
8106 
8107     /* check if device is present */
8108     if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8109         MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8110         ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8111             handle);
8112         return 0;
8113     }
8114 
8115     /* check if there were any issues with discovery */
8116     if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8117         pcie_device_pg0.AccessStatus))
8118         return 0;
8119 
8120     if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8121         (pcie_device_pg0.DeviceInfo))))
8122         return 0;
8123 
8124     pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8125     if (pcie_device) {
8126         clear_bit(handle, ioc->pend_os_device_add);
8127         pcie_device_put(pcie_device);
8128         return 0;
8129     }
8130 
8131     /* PCIe Device Page 2 contains read-only information about a
8132      * specific NVMe device; therefore, this page is only
8133      * valid for NVMe devices and skip for pcie devices of type scsi.
8134      */
8135     if (!(mpt3sas_scsih_is_pcie_scsi_device(
8136         le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8137         if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8138             &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8139             handle)) {
8140             ioc_err(ioc,
8141                 "failure at %s:%d/%s()!\n", __FILE__,
8142                 __LINE__, __func__);
8143             return 0;
8144         }
8145 
8146         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8147                     MPI2_IOCSTATUS_MASK;
8148         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8149             ioc_err(ioc,
8150                 "failure at %s:%d/%s()!\n", __FILE__,
8151                 __LINE__, __func__);
8152             return 0;
8153         }
8154     }
8155 
8156     pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8157     if (!pcie_device) {
8158         ioc_err(ioc, "failure at %s:%d/%s()!\n",
8159             __FILE__, __LINE__, __func__);
8160         return 0;
8161     }
8162 
8163     kref_init(&pcie_device->refcount);
8164     pcie_device->id = ioc->pcie_target_id++;
8165     pcie_device->channel = PCIE_CHANNEL;
8166     pcie_device->handle = handle;
8167     pcie_device->access_status = pcie_device_pg0.AccessStatus;
8168     pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8169     pcie_device->wwid = wwid;
8170     pcie_device->port_num = pcie_device_pg0.PortNum;
8171     pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8172         MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8173 
8174     pcie_device->enclosure_handle =
8175         le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8176     if (pcie_device->enclosure_handle != 0)
8177         pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8178 
8179     if (le32_to_cpu(pcie_device_pg0.Flags) &
8180         MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8181         pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8182         memcpy(&pcie_device->connector_name[0],
8183             &pcie_device_pg0.ConnectorName[0], 4);
8184     } else {
8185         pcie_device->enclosure_level = 0;
8186         pcie_device->connector_name[0] = '\0';
8187     }
8188 
8189     /* get enclosure_logical_id */
8190     if (pcie_device->enclosure_handle) {
8191         enclosure_dev =
8192             mpt3sas_scsih_enclosure_find_by_handle(ioc,
8193                         pcie_device->enclosure_handle);
8194         if (enclosure_dev)
8195             pcie_device->enclosure_logical_id =
8196                 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8197     }
8198     /* TODO -- Add device name once FW supports it */
8199     if (!(mpt3sas_scsih_is_pcie_scsi_device(
8200         le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8201         pcie_device->nvme_mdts =
8202             le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8203         pcie_device->shutdown_latency =
8204             le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8205         /*
8206          * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8207          * if drive's RTD3 Entry Latency is greater then IOC's
8208          * max_shutdown_latency.
8209          */
8210         if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8211             ioc->max_shutdown_latency =
8212                 pcie_device->shutdown_latency;
8213         if (pcie_device_pg2.ControllerResetTO)
8214             pcie_device->reset_timeout =
8215                 pcie_device_pg2.ControllerResetTO;
8216         else
8217             pcie_device->reset_timeout = 30;
8218     } else
8219         pcie_device->reset_timeout = 30;
8220 
8221     if (ioc->wait_for_discovery_to_complete)
8222         _scsih_pcie_device_init_add(ioc, pcie_device);
8223     else
8224         _scsih_pcie_device_add(ioc, pcie_device);
8225 
8226     pcie_device_put(pcie_device);
8227     return 0;
8228 }
8229 
8230 /**
8231  * _scsih_pcie_topology_change_event_debug - debug for topology
8232  * event
8233  * @ioc: per adapter object
8234  * @event_data: event data payload
8235  * Context: user.
8236  */
8237 static void
8238 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8239     Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8240 {
8241     int i;
8242     u16 handle;
8243     u16 reason_code;
8244     u8 port_number;
8245     char *status_str = NULL;
8246     u8 link_rate, prev_link_rate;
8247 
8248     switch (event_data->SwitchStatus) {
8249     case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8250         status_str = "add";
8251         break;
8252     case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8253         status_str = "remove";
8254         break;
8255     case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8256     case 0:
8257         status_str =  "responding";
8258         break;
8259     case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8260         status_str = "remove delay";
8261         break;
8262     default:
8263         status_str = "unknown status";
8264         break;
8265     }
8266     ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8267     pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8268         "start_port(%02d), count(%d)\n",
8269         le16_to_cpu(event_data->SwitchDevHandle),
8270         le16_to_cpu(event_data->EnclosureHandle),
8271         event_data->StartPortNum, event_data->NumEntries);
8272     for (i = 0; i < event_data->NumEntries; i++) {
8273         handle =
8274             le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8275         if (!handle)
8276             continue;
8277         port_number = event_data->StartPortNum + i;
8278         reason_code = event_data->PortEntry[i].PortStatus;
8279         switch (reason_code) {
8280         case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8281             status_str = "target add";
8282             break;
8283         case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8284             status_str = "target remove";
8285             break;
8286         case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8287             status_str = "delay target remove";
8288             break;
8289         case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8290             status_str = "link rate change";
8291             break;
8292         case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8293             status_str = "target responding";
8294             break;
8295         default:
8296             status_str = "unknown";
8297             break;
8298         }
8299         link_rate = event_data->PortEntry[i].CurrentPortInfo &
8300             MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8301         prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8302             MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8303         pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8304             " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8305             handle, status_str, link_rate, prev_link_rate);
8306     }
8307 }
8308 
8309 /**
8310  * _scsih_pcie_topology_change_event - handle PCIe topology
8311  *  changes
8312  * @ioc: per adapter object
8313  * @fw_event: The fw_event_work object
8314  * Context: user.
8315  *
8316  */
8317 static void
8318 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8319     struct fw_event_work *fw_event)
8320 {
8321     int i;
8322     u16 handle;
8323     u16 reason_code;
8324     u8 link_rate, prev_link_rate;
8325     unsigned long flags;
8326     int rc;
8327     Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8328         (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8329     struct _pcie_device *pcie_device;
8330 
8331     if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8332         _scsih_pcie_topology_change_event_debug(ioc, event_data);
8333 
8334     if (ioc->shost_recovery || ioc->remove_host ||
8335         ioc->pci_error_recovery)
8336         return;
8337 
8338     if (fw_event->ignore) {
8339         dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8340         return;
8341     }
8342 
8343     /* handle siblings events */
8344     for (i = 0; i < event_data->NumEntries; i++) {
8345         if (fw_event->ignore) {
8346             dewtprintk(ioc,
8347                    ioc_info(ioc, "ignoring switch event\n"));
8348             return;
8349         }
8350         if (ioc->remove_host || ioc->pci_error_recovery)
8351             return;
8352         reason_code = event_data->PortEntry[i].PortStatus;
8353         handle =
8354             le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8355         if (!handle)
8356             continue;
8357 
8358         link_rate = event_data->PortEntry[i].CurrentPortInfo
8359             & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8360         prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8361             & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8362 
8363         switch (reason_code) {
8364         case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8365             if (ioc->shost_recovery)
8366                 break;
8367             if (link_rate == prev_link_rate)
8368                 break;
8369             if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8370                 break;
8371 
8372             _scsih_pcie_check_device(ioc, handle);
8373 
8374             /* This code after this point handles the test case
8375              * where a device has been added, however its returning
8376              * BUSY for sometime.  Then before the Device Missing
8377              * Delay expires and the device becomes READY, the
8378              * device is removed and added back.
8379              */
8380             spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8381             pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8382             spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8383 
8384             if (pcie_device) {
8385                 pcie_device_put(pcie_device);
8386                 break;
8387             }
8388 
8389             if (!test_bit(handle, ioc->pend_os_device_add))
8390                 break;
8391 
8392             dewtprintk(ioc,
8393                    ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8394                         handle));
8395             event_data->PortEntry[i].PortStatus &= 0xF0;
8396             event_data->PortEntry[i].PortStatus |=
8397                 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8398             fallthrough;
8399         case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8400             if (ioc->shost_recovery)
8401                 break;
8402             if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8403                 break;
8404 
8405             rc = _scsih_pcie_add_device(ioc, handle);
8406             if (!rc) {
8407                 /* mark entry vacant */
8408                 /* TODO This needs to be reviewed and fixed,
8409                  * we dont have an entry
8410                  * to make an event void like vacant
8411                  */
8412                 event_data->PortEntry[i].PortStatus |=
8413                     MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8414             }
8415             break;
8416         case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8417             _scsih_pcie_device_remove_by_handle(ioc, handle);
8418             break;
8419         }
8420     }
8421 }
8422 
8423 /**
8424  * _scsih_pcie_device_status_change_event_debug - debug for device event
8425  * @ioc: ?
8426  * @event_data: event data payload
8427  * Context: user.
8428  */
8429 static void
8430 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8431     Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8432 {
8433     char *reason_str = NULL;
8434 
8435     switch (event_data->ReasonCode) {
8436     case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8437         reason_str = "smart data";
8438         break;
8439     case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8440         reason_str = "unsupported device discovered";
8441         break;
8442     case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8443         reason_str = "internal device reset";
8444         break;
8445     case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8446         reason_str = "internal task abort";
8447         break;
8448     case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8449         reason_str = "internal task abort set";
8450         break;
8451     case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8452         reason_str = "internal clear task set";
8453         break;
8454     case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8455         reason_str = "internal query task";
8456         break;
8457     case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8458         reason_str = "device init failure";
8459         break;
8460     case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8461         reason_str = "internal device reset complete";
8462         break;
8463     case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8464         reason_str = "internal task abort complete";
8465         break;
8466     case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8467         reason_str = "internal async notification";
8468         break;
8469     case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8470         reason_str = "pcie hot reset failed";
8471         break;
8472     default:
8473         reason_str = "unknown reason";
8474         break;
8475     }
8476 
8477     ioc_info(ioc, "PCIE device status change: (%s)\n"
8478          "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8479          reason_str, le16_to_cpu(event_data->DevHandle),
8480          (u64)le64_to_cpu(event_data->WWID),
8481          le16_to_cpu(event_data->TaskTag));
8482     if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8483         pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8484             event_data->ASC, event_data->ASCQ);
8485     pr_cont("\n");
8486 }
8487 
8488 /**
8489  * _scsih_pcie_device_status_change_event - handle device status
8490  * change
8491  * @ioc: per adapter object
8492  * @fw_event: The fw_event_work object
8493  * Context: user.
8494  */
8495 static void
8496 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8497     struct fw_event_work *fw_event)
8498 {
8499     struct MPT3SAS_TARGET *target_priv_data;
8500     struct _pcie_device *pcie_device;
8501     u64 wwid;
8502     unsigned long flags;
8503     Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8504         (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8505     if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8506         _scsih_pcie_device_status_change_event_debug(ioc,
8507             event_data);
8508 
8509     if (event_data->ReasonCode !=
8510         MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8511         event_data->ReasonCode !=
8512         MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8513         return;
8514 
8515     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8516     wwid = le64_to_cpu(event_data->WWID);
8517     pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8518 
8519     if (!pcie_device || !pcie_device->starget)
8520         goto out;
8521 
8522     target_priv_data = pcie_device->starget->hostdata;
8523     if (!target_priv_data)
8524         goto out;
8525 
8526     if (event_data->ReasonCode ==
8527         MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8528         target_priv_data->tm_busy = 1;
8529     else
8530         target_priv_data->tm_busy = 0;
8531 out:
8532     if (pcie_device)
8533         pcie_device_put(pcie_device);
8534 
8535     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8536 }
8537 
8538 /**
8539  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8540  * event
8541  * @ioc: per adapter object
8542  * @event_data: event data payload
8543  * Context: user.
8544  */
8545 static void
8546 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8547     Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8548 {
8549     char *reason_str = NULL;
8550 
8551     switch (event_data->ReasonCode) {
8552     case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8553         reason_str = "enclosure add";
8554         break;
8555     case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8556         reason_str = "enclosure remove";
8557         break;
8558     default:
8559         reason_str = "unknown reason";
8560         break;
8561     }
8562 
8563     ioc_info(ioc, "enclosure status change: (%s)\n"
8564          "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8565          reason_str,
8566          le16_to_cpu(event_data->EnclosureHandle),
8567          (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8568          le16_to_cpu(event_data->StartSlot));
8569 }
8570 
8571 /**
8572  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8573  * @ioc: per adapter object
8574  * @fw_event: The fw_event_work object
8575  * Context: user.
8576  */
8577 static void
8578 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8579     struct fw_event_work *fw_event)
8580 {
8581     Mpi2ConfigReply_t mpi_reply;
8582     struct _enclosure_node *enclosure_dev = NULL;
8583     Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8584         (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8585     int rc;
8586     u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8587 
8588     if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8589         _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8590              (Mpi2EventDataSasEnclDevStatusChange_t *)
8591              fw_event->event_data);
8592     if (ioc->shost_recovery)
8593         return;
8594 
8595     if (enclosure_handle)
8596         enclosure_dev =
8597             mpt3sas_scsih_enclosure_find_by_handle(ioc,
8598                         enclosure_handle);
8599     switch (event_data->ReasonCode) {
8600     case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8601         if (!enclosure_dev) {
8602             enclosure_dev =
8603                 kzalloc(sizeof(struct _enclosure_node),
8604                     GFP_KERNEL);
8605             if (!enclosure_dev) {
8606                 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8607                      __FILE__, __LINE__, __func__);
8608                 return;
8609             }
8610             rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8611                 &enclosure_dev->pg0,
8612                 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8613                 enclosure_handle);
8614 
8615             if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8616                         MPI2_IOCSTATUS_MASK)) {
8617                 kfree(enclosure_dev);
8618                 return;
8619             }
8620 
8621             list_add_tail(&enclosure_dev->list,
8622                             &ioc->enclosure_list);
8623         }
8624         break;
8625     case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8626         if (enclosure_dev) {
8627             list_del(&enclosure_dev->list);
8628             kfree(enclosure_dev);
8629         }
8630         break;
8631     default:
8632         break;
8633     }
8634 }
8635 
8636 /**
8637  * _scsih_sas_broadcast_primitive_event - handle broadcast events
8638  * @ioc: per adapter object
8639  * @fw_event: The fw_event_work object
8640  * Context: user.
8641  */
8642 static void
8643 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8644     struct fw_event_work *fw_event)
8645 {
8646     struct scsi_cmnd *scmd;
8647     struct scsi_device *sdev;
8648     struct scsiio_tracker *st;
8649     u16 smid, handle;
8650     u32 lun;
8651     struct MPT3SAS_DEVICE *sas_device_priv_data;
8652     u32 termination_count;
8653     u32 query_count;
8654     Mpi2SCSITaskManagementReply_t *mpi_reply;
8655     Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8656         (Mpi2EventDataSasBroadcastPrimitive_t *)
8657         fw_event->event_data;
8658     u16 ioc_status;
8659     unsigned long flags;
8660     int r;
8661     u8 max_retries = 0;
8662     u8 task_abort_retries;
8663 
8664     mutex_lock(&ioc->tm_cmds.mutex);
8665     ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8666          __func__, event_data->PhyNum, event_data->PortWidth);
8667 
8668     _scsih_block_io_all_device(ioc);
8669 
8670     spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8671     mpi_reply = ioc->tm_cmds.reply;
8672  broadcast_aen_retry:
8673 
8674     /* sanity checks for retrying this loop */
8675     if (max_retries++ == 5) {
8676         dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8677         goto out;
8678     } else if (max_retries > 1)
8679         dewtprintk(ioc,
8680                ioc_info(ioc, "%s: %d retry\n",
8681                     __func__, max_retries - 1));
8682 
8683     termination_count = 0;
8684     query_count = 0;
8685     for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8686         if (ioc->shost_recovery)
8687             goto out;
8688         scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8689         if (!scmd)
8690             continue;
8691         st = scsi_cmd_priv(scmd);
8692         sdev = scmd->device;
8693         sas_device_priv_data = sdev->hostdata;
8694         if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8695             continue;
8696          /* skip hidden raid components */
8697         if (sas_device_priv_data->sas_target->flags &
8698             MPT_TARGET_FLAGS_RAID_COMPONENT)
8699             continue;
8700          /* skip volumes */
8701         if (sas_device_priv_data->sas_target->flags &
8702             MPT_TARGET_FLAGS_VOLUME)
8703             continue;
8704          /* skip PCIe devices */
8705         if (sas_device_priv_data->sas_target->flags &
8706             MPT_TARGET_FLAGS_PCIE_DEVICE)
8707             continue;
8708 
8709         handle = sas_device_priv_data->sas_target->handle;
8710         lun = sas_device_priv_data->lun;
8711         query_count++;
8712 
8713         if (ioc->shost_recovery)
8714             goto out;
8715 
8716         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8717         r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8718             MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8719             st->msix_io, 30, 0);
8720         if (r == FAILED) {
8721             sdev_printk(KERN_WARNING, sdev,
8722                 "mpt3sas_scsih_issue_tm: FAILED when sending "
8723                 "QUERY_TASK: scmd(%p)\n", scmd);
8724             spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8725             goto broadcast_aen_retry;
8726         }
8727         ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8728             & MPI2_IOCSTATUS_MASK;
8729         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8730             sdev_printk(KERN_WARNING, sdev,
8731                 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8732                 ioc_status, scmd);
8733             spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8734             goto broadcast_aen_retry;
8735         }
8736 
8737         /* see if IO is still owned by IOC and target */
8738         if (mpi_reply->ResponseCode ==
8739              MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8740              mpi_reply->ResponseCode ==
8741              MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8742             spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8743             continue;
8744         }
8745         task_abort_retries = 0;
8746  tm_retry:
8747         if (task_abort_retries++ == 60) {
8748             dewtprintk(ioc,
8749                    ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8750                         __func__));
8751             spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8752             goto broadcast_aen_retry;
8753         }
8754 
8755         if (ioc->shost_recovery)
8756             goto out_no_lock;
8757 
8758         r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8759             sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8760             st->smid, st->msix_io, 30, 0);
8761         if (r == FAILED || st->cb_idx != 0xFF) {
8762             sdev_printk(KERN_WARNING, sdev,
8763                 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8764                 "scmd(%p)\n", scmd);
8765             goto tm_retry;
8766         }
8767 
8768         if (task_abort_retries > 1)
8769             sdev_printk(KERN_WARNING, sdev,
8770                 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8771                 " scmd(%p)\n",
8772                 task_abort_retries - 1, scmd);
8773 
8774         termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8775         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8776     }
8777 
8778     if (ioc->broadcast_aen_pending) {
8779         dewtprintk(ioc,
8780                ioc_info(ioc,
8781                     "%s: loop back due to pending AEN\n",
8782                     __func__));
8783          ioc->broadcast_aen_pending = 0;
8784          goto broadcast_aen_retry;
8785     }
8786 
8787  out:
8788     spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8789  out_no_lock:
8790 
8791     dewtprintk(ioc,
8792            ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8793                 __func__, query_count, termination_count));
8794 
8795     ioc->broadcast_aen_busy = 0;
8796     if (!ioc->shost_recovery)
8797         _scsih_ublock_io_all_device(ioc);
8798     mutex_unlock(&ioc->tm_cmds.mutex);
8799 }
8800 
8801 /**
8802  * _scsih_sas_discovery_event - handle discovery events
8803  * @ioc: per adapter object
8804  * @fw_event: The fw_event_work object
8805  * Context: user.
8806  */
8807 static void
8808 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8809     struct fw_event_work *fw_event)
8810 {
8811     Mpi2EventDataSasDiscovery_t *event_data =
8812         (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8813 
8814     if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8815         ioc_info(ioc, "discovery event: (%s)",
8816              event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8817              "start" : "stop");
8818         if (event_data->DiscoveryStatus)
8819             pr_cont("discovery_status(0x%08x)",
8820                 le32_to_cpu(event_data->DiscoveryStatus));
8821         pr_cont("\n");
8822     }
8823 
8824     if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8825         !ioc->sas_hba.num_phys) {
8826         if (disable_discovery > 0 && ioc->shost_recovery) {
8827             /* Wait for the reset to complete */
8828             while (ioc->shost_recovery)
8829                 ssleep(1);
8830         }
8831         _scsih_sas_host_add(ioc);
8832     }
8833 }
8834 
8835 /**
8836  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8837  *                      events
8838  * @ioc: per adapter object
8839  * @fw_event: The fw_event_work object
8840  * Context: user.
8841  */
8842 static void
8843 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8844     struct fw_event_work *fw_event)
8845 {
8846     Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8847         (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8848 
8849     switch (event_data->ReasonCode) {
8850     case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8851         ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8852              le16_to_cpu(event_data->DevHandle),
8853              (u64)le64_to_cpu(event_data->SASAddress),
8854              event_data->PhysicalPort);
8855         break;
8856     case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8857         ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8858              le16_to_cpu(event_data->DevHandle),
8859              (u64)le64_to_cpu(event_data->SASAddress),
8860              event_data->PhysicalPort);
8861         break;
8862     default:
8863         break;
8864     }
8865 }
8866 
8867 /**
8868  * _scsih_pcie_enumeration_event - handle enumeration events
8869  * @ioc: per adapter object
8870  * @fw_event: The fw_event_work object
8871  * Context: user.
8872  */
8873 static void
8874 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8875     struct fw_event_work *fw_event)
8876 {
8877     Mpi26EventDataPCIeEnumeration_t *event_data =
8878         (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8879 
8880     if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8881         return;
8882 
8883     ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8884          (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8885          "started" : "completed",
8886          event_data->Flags);
8887     if (event_data->EnumerationStatus)
8888         pr_cont("enumeration_status(0x%08x)",
8889             le32_to_cpu(event_data->EnumerationStatus));
8890     pr_cont("\n");
8891 }
8892 
8893 /**
8894  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8895  * @ioc: per adapter object
8896  * @handle: device handle for physical disk
8897  * @phys_disk_num: physical disk number
8898  *
8899  * Return: 0 for success, else failure.
8900  */
8901 static int
8902 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8903 {
8904     Mpi2RaidActionRequest_t *mpi_request;
8905     Mpi2RaidActionReply_t *mpi_reply;
8906     u16 smid;
8907     u8 issue_reset = 0;
8908     int rc = 0;
8909     u16 ioc_status;
8910     u32 log_info;
8911 
8912     if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8913         return rc;
8914 
8915     mutex_lock(&ioc->scsih_cmds.mutex);
8916 
8917     if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8918         ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8919         rc = -EAGAIN;
8920         goto out;
8921     }
8922     ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8923 
8924     smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8925     if (!smid) {
8926         ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8927         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8928         rc = -EAGAIN;
8929         goto out;
8930     }
8931 
8932     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8933     ioc->scsih_cmds.smid = smid;
8934     memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8935 
8936     mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8937     mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8938     mpi_request->PhysDiskNum = phys_disk_num;
8939 
8940     dewtprintk(ioc,
8941            ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8942                 handle, phys_disk_num));
8943 
8944     init_completion(&ioc->scsih_cmds.done);
8945     ioc->put_smid_default(ioc, smid);
8946     wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8947 
8948     if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8949         mpt3sas_check_cmd_timeout(ioc,
8950             ioc->scsih_cmds.status, mpi_request,
8951             sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8952         rc = -EFAULT;
8953         goto out;
8954     }
8955 
8956     if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8957 
8958         mpi_reply = ioc->scsih_cmds.reply;
8959         ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8960         if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8961             log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
8962         else
8963             log_info = 0;
8964         ioc_status &= MPI2_IOCSTATUS_MASK;
8965         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8966             dewtprintk(ioc,
8967                    ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8968                         ioc_status, log_info));
8969             rc = -EFAULT;
8970         } else
8971             dewtprintk(ioc,
8972                    ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8973     }
8974 
8975  out:
8976     ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8977     mutex_unlock(&ioc->scsih_cmds.mutex);
8978 
8979     if (issue_reset)
8980         mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8981     return rc;
8982 }
8983 
8984 /**
8985  * _scsih_reprobe_lun - reprobing lun
8986  * @sdev: scsi device struct
8987  * @no_uld_attach: sdev->no_uld_attach flag setting
8988  *
8989  **/
8990 static void
8991 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8992 {
8993     sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8994     sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8995         sdev->no_uld_attach ? "hiding" : "exposing");
8996     WARN_ON(scsi_device_reprobe(sdev));
8997 }
8998 
8999 /**
9000  * _scsih_sas_volume_add - add new volume
9001  * @ioc: per adapter object
9002  * @element: IR config element data
9003  * Context: user.
9004  */
9005 static void
9006 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9007     Mpi2EventIrConfigElement_t *element)
9008 {
9009     struct _raid_device *raid_device;
9010     unsigned long flags;
9011     u64 wwid;
9012     u16 handle = le16_to_cpu(element->VolDevHandle);
9013     int rc;
9014 
9015     mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9016     if (!wwid) {
9017         ioc_err(ioc, "failure at %s:%d/%s()!\n",
9018             __FILE__, __LINE__, __func__);
9019         return;
9020     }
9021 
9022     spin_lock_irqsave(&ioc->raid_device_lock, flags);
9023     raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9024     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9025 
9026     if (raid_device)
9027         return;
9028 
9029     raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9030     if (!raid_device) {
9031         ioc_err(ioc, "failure at %s:%d/%s()!\n",
9032             __FILE__, __LINE__, __func__);
9033         return;
9034     }
9035 
9036     raid_device->id = ioc->sas_id++;
9037     raid_device->channel = RAID_CHANNEL;
9038     raid_device->handle = handle;
9039     raid_device->wwid = wwid;
9040     _scsih_raid_device_add(ioc, raid_device);
9041     if (!ioc->wait_for_discovery_to_complete) {
9042         rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9043             raid_device->id, 0);
9044         if (rc)
9045             _scsih_raid_device_remove(ioc, raid_device);
9046     } else {
9047         spin_lock_irqsave(&ioc->raid_device_lock, flags);
9048         _scsih_determine_boot_device(ioc, raid_device, 1);
9049         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9050     }
9051 }
9052 
9053 /**
9054  * _scsih_sas_volume_delete - delete volume
9055  * @ioc: per adapter object
9056  * @handle: volume device handle
9057  * Context: user.
9058  */
9059 static void
9060 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9061 {
9062     struct _raid_device *raid_device;
9063     unsigned long flags;
9064     struct MPT3SAS_TARGET *sas_target_priv_data;
9065     struct scsi_target *starget = NULL;
9066 
9067     spin_lock_irqsave(&ioc->raid_device_lock, flags);
9068     raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9069     if (raid_device) {
9070         if (raid_device->starget) {
9071             starget = raid_device->starget;
9072             sas_target_priv_data = starget->hostdata;
9073             sas_target_priv_data->deleted = 1;
9074         }
9075         ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9076              raid_device->handle, (u64)raid_device->wwid);
9077         list_del(&raid_device->list);
9078         kfree(raid_device);
9079     }
9080     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9081     if (starget)
9082         scsi_remove_target(&starget->dev);
9083 }
9084 
9085 /**
9086  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9087  * @ioc: per adapter object
9088  * @element: IR config element data
9089  * Context: user.
9090  */
9091 static void
9092 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9093     Mpi2EventIrConfigElement_t *element)
9094 {
9095     struct _sas_device *sas_device;
9096     struct scsi_target *starget = NULL;
9097     struct MPT3SAS_TARGET *sas_target_priv_data;
9098     unsigned long flags;
9099     u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9100 
9101     spin_lock_irqsave(&ioc->sas_device_lock, flags);
9102     sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9103     if (sas_device) {
9104         sas_device->volume_handle = 0;
9105         sas_device->volume_wwid = 0;
9106         clear_bit(handle, ioc->pd_handles);
9107         if (sas_device->starget && sas_device->starget->hostdata) {
9108             starget = sas_device->starget;
9109             sas_target_priv_data = starget->hostdata;
9110             sas_target_priv_data->flags &=
9111                 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9112         }
9113     }
9114     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9115     if (!sas_device)
9116         return;
9117 
9118     /* exposing raid component */
9119     if (starget)
9120         starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9121 
9122     sas_device_put(sas_device);
9123 }
9124 
9125 /**
9126  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9127  * @ioc: per adapter object
9128  * @element: IR config element data
9129  * Context: user.
9130  */
9131 static void
9132 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9133     Mpi2EventIrConfigElement_t *element)
9134 {
9135     struct _sas_device *sas_device;
9136     struct scsi_target *starget = NULL;
9137     struct MPT3SAS_TARGET *sas_target_priv_data;
9138     unsigned long flags;
9139     u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9140     u16 volume_handle = 0;
9141     u64 volume_wwid = 0;
9142 
9143     mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9144     if (volume_handle)
9145         mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9146             &volume_wwid);
9147 
9148     spin_lock_irqsave(&ioc->sas_device_lock, flags);
9149     sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9150     if (sas_device) {
9151         set_bit(handle, ioc->pd_handles);
9152         if (sas_device->starget && sas_device->starget->hostdata) {
9153             starget = sas_device->starget;
9154             sas_target_priv_data = starget->hostdata;
9155             sas_target_priv_data->flags |=
9156                 MPT_TARGET_FLAGS_RAID_COMPONENT;
9157             sas_device->volume_handle = volume_handle;
9158             sas_device->volume_wwid = volume_wwid;
9159         }
9160     }
9161     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9162     if (!sas_device)
9163         return;
9164 
9165     /* hiding raid component */
9166     _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9167 
9168     if (starget)
9169         starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9170 
9171     sas_device_put(sas_device);
9172 }
9173 
9174 /**
9175  * _scsih_sas_pd_delete - delete pd component
9176  * @ioc: per adapter object
9177  * @element: IR config element data
9178  * Context: user.
9179  */
9180 static void
9181 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9182     Mpi2EventIrConfigElement_t *element)
9183 {
9184     u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9185 
9186     _scsih_device_remove_by_handle(ioc, handle);
9187 }
9188 
9189 /**
9190  * _scsih_sas_pd_add - remove pd component
9191  * @ioc: per adapter object
9192  * @element: IR config element data
9193  * Context: user.
9194  */
9195 static void
9196 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9197     Mpi2EventIrConfigElement_t *element)
9198 {
9199     struct _sas_device *sas_device;
9200     u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9201     Mpi2ConfigReply_t mpi_reply;
9202     Mpi2SasDevicePage0_t sas_device_pg0;
9203     u32 ioc_status;
9204     u64 sas_address;
9205     u16 parent_handle;
9206 
9207     set_bit(handle, ioc->pd_handles);
9208 
9209     sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9210     if (sas_device) {
9211         _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9212         sas_device_put(sas_device);
9213         return;
9214     }
9215 
9216     if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9217         MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9218         ioc_err(ioc, "failure at %s:%d/%s()!\n",
9219             __FILE__, __LINE__, __func__);
9220         return;
9221     }
9222 
9223     ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9224         MPI2_IOCSTATUS_MASK;
9225     if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9226         ioc_err(ioc, "failure at %s:%d/%s()!\n",
9227             __FILE__, __LINE__, __func__);
9228         return;
9229     }
9230 
9231     parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9232     if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9233         mpt3sas_transport_update_links(ioc, sas_address, handle,
9234             sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9235             mpt3sas_get_port_by_id(ioc,
9236             sas_device_pg0.PhysicalPort, 0));
9237 
9238     _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9239     _scsih_add_device(ioc, handle, 0, 1);
9240 }
9241 
9242 /**
9243  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9244  * @ioc: per adapter object
9245  * @event_data: event data payload
9246  * Context: user.
9247  */
9248 static void
9249 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9250     Mpi2EventDataIrConfigChangeList_t *event_data)
9251 {
9252     Mpi2EventIrConfigElement_t *element;
9253     u8 element_type;
9254     int i;
9255     char *reason_str = NULL, *element_str = NULL;
9256 
9257     element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9258 
9259     ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9260          le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9261          "foreign" : "native",
9262          event_data->NumElements);
9263     for (i = 0; i < event_data->NumElements; i++, element++) {
9264         switch (element->ReasonCode) {
9265         case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9266             reason_str = "add";
9267             break;
9268         case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9269             reason_str = "remove";
9270             break;
9271         case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9272             reason_str = "no change";
9273             break;
9274         case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9275             reason_str = "hide";
9276             break;
9277         case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9278             reason_str = "unhide";
9279             break;
9280         case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9281             reason_str = "volume_created";
9282             break;
9283         case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9284             reason_str = "volume_deleted";
9285             break;
9286         case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9287             reason_str = "pd_created";
9288             break;
9289         case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9290             reason_str = "pd_deleted";
9291             break;
9292         default:
9293             reason_str = "unknown reason";
9294             break;
9295         }
9296         element_type = le16_to_cpu(element->ElementFlags) &
9297             MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9298         switch (element_type) {
9299         case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9300             element_str = "volume";
9301             break;
9302         case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9303             element_str = "phys disk";
9304             break;
9305         case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9306             element_str = "hot spare";
9307             break;
9308         default:
9309             element_str = "unknown element";
9310             break;
9311         }
9312         pr_info("\t(%s:%s), vol handle(0x%04x), " \
9313             "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9314             reason_str, le16_to_cpu(element->VolDevHandle),
9315             le16_to_cpu(element->PhysDiskDevHandle),
9316             element->PhysDiskNum);
9317     }
9318 }
9319 
9320 /**
9321  * _scsih_sas_ir_config_change_event - handle ir configuration change events
9322  * @ioc: per adapter object
9323  * @fw_event: The fw_event_work object
9324  * Context: user.
9325  */
9326 static void
9327 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9328     struct fw_event_work *fw_event)
9329 {
9330     Mpi2EventIrConfigElement_t *element;
9331     int i;
9332     u8 foreign_config;
9333     Mpi2EventDataIrConfigChangeList_t *event_data =
9334         (Mpi2EventDataIrConfigChangeList_t *)
9335         fw_event->event_data;
9336 
9337     if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9338          (!ioc->hide_ir_msg))
9339         _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9340 
9341     foreign_config = (le32_to_cpu(event_data->Flags) &
9342         MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9343 
9344     element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9345     if (ioc->shost_recovery &&
9346         ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9347         for (i = 0; i < event_data->NumElements; i++, element++) {
9348             if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9349                 _scsih_ir_fastpath(ioc,
9350                     le16_to_cpu(element->PhysDiskDevHandle),
9351                     element->PhysDiskNum);
9352         }
9353         return;
9354     }
9355 
9356     for (i = 0; i < event_data->NumElements; i++, element++) {
9357 
9358         switch (element->ReasonCode) {
9359         case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9360         case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9361             if (!foreign_config)
9362                 _scsih_sas_volume_add(ioc, element);
9363             break;
9364         case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9365         case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9366             if (!foreign_config)
9367                 _scsih_sas_volume_delete(ioc,
9368                     le16_to_cpu(element->VolDevHandle));
9369             break;
9370         case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9371             if (!ioc->is_warpdrive)
9372                 _scsih_sas_pd_hide(ioc, element);
9373             break;
9374         case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9375             if (!ioc->is_warpdrive)
9376                 _scsih_sas_pd_expose(ioc, element);
9377             break;
9378         case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9379             if (!ioc->is_warpdrive)
9380                 _scsih_sas_pd_add(ioc, element);
9381             break;
9382         case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9383             if (!ioc->is_warpdrive)
9384                 _scsih_sas_pd_delete(ioc, element);
9385             break;
9386         }
9387     }
9388 }
9389 
9390 /**
9391  * _scsih_sas_ir_volume_event - IR volume event
9392  * @ioc: per adapter object
9393  * @fw_event: The fw_event_work object
9394  * Context: user.
9395  */
9396 static void
9397 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9398     struct fw_event_work *fw_event)
9399 {
9400     u64 wwid;
9401     unsigned long flags;
9402     struct _raid_device *raid_device;
9403     u16 handle;
9404     u32 state;
9405     int rc;
9406     Mpi2EventDataIrVolume_t *event_data =
9407         (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9408 
9409     if (ioc->shost_recovery)
9410         return;
9411 
9412     if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9413         return;
9414 
9415     handle = le16_to_cpu(event_data->VolDevHandle);
9416     state = le32_to_cpu(event_data->NewValue);
9417     if (!ioc->hide_ir_msg)
9418         dewtprintk(ioc,
9419                ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9420                     __func__, handle,
9421                     le32_to_cpu(event_data->PreviousValue),
9422                     state));
9423     switch (state) {
9424     case MPI2_RAID_VOL_STATE_MISSING:
9425     case MPI2_RAID_VOL_STATE_FAILED:
9426         _scsih_sas_volume_delete(ioc, handle);
9427         break;
9428 
9429     case MPI2_RAID_VOL_STATE_ONLINE:
9430     case MPI2_RAID_VOL_STATE_DEGRADED:
9431     case MPI2_RAID_VOL_STATE_OPTIMAL:
9432 
9433         spin_lock_irqsave(&ioc->raid_device_lock, flags);
9434         raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9435         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9436 
9437         if (raid_device)
9438             break;
9439 
9440         mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9441         if (!wwid) {
9442             ioc_err(ioc, "failure at %s:%d/%s()!\n",
9443                 __FILE__, __LINE__, __func__);
9444             break;
9445         }
9446 
9447         raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9448         if (!raid_device) {
9449             ioc_err(ioc, "failure at %s:%d/%s()!\n",
9450                 __FILE__, __LINE__, __func__);
9451             break;
9452         }
9453 
9454         raid_device->id = ioc->sas_id++;
9455         raid_device->channel = RAID_CHANNEL;
9456         raid_device->handle = handle;
9457         raid_device->wwid = wwid;
9458         _scsih_raid_device_add(ioc, raid_device);
9459         rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9460             raid_device->id, 0);
9461         if (rc)
9462             _scsih_raid_device_remove(ioc, raid_device);
9463         break;
9464 
9465     case MPI2_RAID_VOL_STATE_INITIALIZING:
9466     default:
9467         break;
9468     }
9469 }
9470 
9471 /**
9472  * _scsih_sas_ir_physical_disk_event - PD event
9473  * @ioc: per adapter object
9474  * @fw_event: The fw_event_work object
9475  * Context: user.
9476  */
9477 static void
9478 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9479     struct fw_event_work *fw_event)
9480 {
9481     u16 handle, parent_handle;
9482     u32 state;
9483     struct _sas_device *sas_device;
9484     Mpi2ConfigReply_t mpi_reply;
9485     Mpi2SasDevicePage0_t sas_device_pg0;
9486     u32 ioc_status;
9487     Mpi2EventDataIrPhysicalDisk_t *event_data =
9488         (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9489     u64 sas_address;
9490 
9491     if (ioc->shost_recovery)
9492         return;
9493 
9494     if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9495         return;
9496 
9497     handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9498     state = le32_to_cpu(event_data->NewValue);
9499 
9500     if (!ioc->hide_ir_msg)
9501         dewtprintk(ioc,
9502                ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9503                     __func__, handle,
9504                     le32_to_cpu(event_data->PreviousValue),
9505                     state));
9506 
9507     switch (state) {
9508     case MPI2_RAID_PD_STATE_ONLINE:
9509     case MPI2_RAID_PD_STATE_DEGRADED:
9510     case MPI2_RAID_PD_STATE_REBUILDING:
9511     case MPI2_RAID_PD_STATE_OPTIMAL:
9512     case MPI2_RAID_PD_STATE_HOT_SPARE:
9513 
9514         if (!ioc->is_warpdrive)
9515             set_bit(handle, ioc->pd_handles);
9516 
9517         sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9518         if (sas_device) {
9519             sas_device_put(sas_device);
9520             return;
9521         }
9522 
9523         if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9524             &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9525             handle))) {
9526             ioc_err(ioc, "failure at %s:%d/%s()!\n",
9527                 __FILE__, __LINE__, __func__);
9528             return;
9529         }
9530 
9531         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9532             MPI2_IOCSTATUS_MASK;
9533         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9534             ioc_err(ioc, "failure at %s:%d/%s()!\n",
9535                 __FILE__, __LINE__, __func__);
9536             return;
9537         }
9538 
9539         parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9540         if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9541             mpt3sas_transport_update_links(ioc, sas_address, handle,
9542                 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9543                 mpt3sas_get_port_by_id(ioc,
9544                 sas_device_pg0.PhysicalPort, 0));
9545 
9546         _scsih_add_device(ioc, handle, 0, 1);
9547 
9548         break;
9549 
9550     case MPI2_RAID_PD_STATE_OFFLINE:
9551     case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9552     case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9553     default:
9554         break;
9555     }
9556 }
9557 
9558 /**
9559  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9560  * @ioc: per adapter object
9561  * @event_data: event data payload
9562  * Context: user.
9563  */
9564 static void
9565 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9566     Mpi2EventDataIrOperationStatus_t *event_data)
9567 {
9568     char *reason_str = NULL;
9569 
9570     switch (event_data->RAIDOperation) {
9571     case MPI2_EVENT_IR_RAIDOP_RESYNC:
9572         reason_str = "resync";
9573         break;
9574     case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9575         reason_str = "online capacity expansion";
9576         break;
9577     case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9578         reason_str = "consistency check";
9579         break;
9580     case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9581         reason_str = "background init";
9582         break;
9583     case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9584         reason_str = "make data consistent";
9585         break;
9586     }
9587 
9588     if (!reason_str)
9589         return;
9590 
9591     ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9592          reason_str,
9593          le16_to_cpu(event_data->VolDevHandle),
9594          event_data->PercentComplete);
9595 }
9596 
9597 /**
9598  * _scsih_sas_ir_operation_status_event - handle RAID operation events
9599  * @ioc: per adapter object
9600  * @fw_event: The fw_event_work object
9601  * Context: user.
9602  */
9603 static void
9604 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9605     struct fw_event_work *fw_event)
9606 {
9607     Mpi2EventDataIrOperationStatus_t *event_data =
9608         (Mpi2EventDataIrOperationStatus_t *)
9609         fw_event->event_data;
9610     static struct _raid_device *raid_device;
9611     unsigned long flags;
9612     u16 handle;
9613 
9614     if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9615         (!ioc->hide_ir_msg))
9616         _scsih_sas_ir_operation_status_event_debug(ioc,
9617              event_data);
9618 
9619     /* code added for raid transport support */
9620     if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9621 
9622         spin_lock_irqsave(&ioc->raid_device_lock, flags);
9623         handle = le16_to_cpu(event_data->VolDevHandle);
9624         raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9625         if (raid_device)
9626             raid_device->percent_complete =
9627                 event_data->PercentComplete;
9628         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9629     }
9630 }
9631 
9632 /**
9633  * _scsih_prep_device_scan - initialize parameters prior to device scan
9634  * @ioc: per adapter object
9635  *
9636  * Set the deleted flag prior to device scan.  If the device is found during
9637  * the scan, then we clear the deleted flag.
9638  */
9639 static void
9640 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9641 {
9642     struct MPT3SAS_DEVICE *sas_device_priv_data;
9643     struct scsi_device *sdev;
9644 
9645     shost_for_each_device(sdev, ioc->shost) {
9646         sas_device_priv_data = sdev->hostdata;
9647         if (sas_device_priv_data && sas_device_priv_data->sas_target)
9648             sas_device_priv_data->sas_target->deleted = 1;
9649     }
9650 }
9651 
9652 /**
9653  * _scsih_update_device_qdepth - Update QD during Reset.
9654  * @ioc: per adapter object
9655  *
9656  */
9657 static void
9658 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9659 {
9660     struct MPT3SAS_DEVICE *sas_device_priv_data;
9661     struct MPT3SAS_TARGET *sas_target_priv_data;
9662     struct _sas_device *sas_device;
9663     struct scsi_device *sdev;
9664     u16 qdepth;
9665 
9666     ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9667     shost_for_each_device(sdev, ioc->shost) {
9668         sas_device_priv_data = sdev->hostdata;
9669         if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9670             sas_target_priv_data = sas_device_priv_data->sas_target;
9671             sas_device = sas_device_priv_data->sas_target->sas_dev;
9672             if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9673                 qdepth = ioc->max_nvme_qd;
9674             else if (sas_device &&
9675                 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9676                 qdepth = (sas_device->port_type > 1) ?
9677                     ioc->max_wideport_qd : ioc->max_narrowport_qd;
9678             else if (sas_device &&
9679                 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9680                 qdepth = ioc->max_sata_qd;
9681             else
9682                 continue;
9683             mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9684         }
9685     }
9686 }
9687 
9688 /**
9689  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9690  * @ioc: per adapter object
9691  * @sas_device_pg0: SAS Device page 0
9692  *
9693  * After host reset, find out whether devices are still responding.
9694  * Used in _scsih_remove_unresponsive_sas_devices.
9695  */
9696 static void
9697 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9698 Mpi2SasDevicePage0_t *sas_device_pg0)
9699 {
9700     struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9701     struct scsi_target *starget;
9702     struct _sas_device *sas_device = NULL;
9703     struct _enclosure_node *enclosure_dev = NULL;
9704     unsigned long flags;
9705     struct hba_port *port = mpt3sas_get_port_by_id(
9706         ioc, sas_device_pg0->PhysicalPort, 0);
9707 
9708     if (sas_device_pg0->EnclosureHandle) {
9709         enclosure_dev =
9710             mpt3sas_scsih_enclosure_find_by_handle(ioc,
9711                 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9712         if (enclosure_dev == NULL)
9713             ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9714                  sas_device_pg0->EnclosureHandle);
9715     }
9716     spin_lock_irqsave(&ioc->sas_device_lock, flags);
9717     list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9718         if (sas_device->sas_address != le64_to_cpu(
9719             sas_device_pg0->SASAddress))
9720             continue;
9721         if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9722             continue;
9723         if (sas_device->port != port)
9724             continue;
9725         sas_device->responding = 1;
9726         starget = sas_device->starget;
9727         if (starget && starget->hostdata) {
9728             sas_target_priv_data = starget->hostdata;
9729             sas_target_priv_data->tm_busy = 0;
9730             sas_target_priv_data->deleted = 0;
9731         } else
9732             sas_target_priv_data = NULL;
9733         if (starget) {
9734             starget_printk(KERN_INFO, starget,
9735                 "handle(0x%04x), sas_addr(0x%016llx)\n",
9736                 le16_to_cpu(sas_device_pg0->DevHandle),
9737                 (unsigned long long)
9738                 sas_device->sas_address);
9739 
9740             if (sas_device->enclosure_handle != 0)
9741                 starget_printk(KERN_INFO, starget,
9742                  "enclosure logical id(0x%016llx), slot(%d)\n",
9743                  (unsigned long long)
9744                  sas_device->enclosure_logical_id,
9745                  sas_device->slot);
9746         }
9747         if (le16_to_cpu(sas_device_pg0->Flags) &
9748               MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9749             sas_device->enclosure_level =
9750                sas_device_pg0->EnclosureLevel;
9751             memcpy(&sas_device->connector_name[0],
9752                 &sas_device_pg0->ConnectorName[0], 4);
9753         } else {
9754             sas_device->enclosure_level = 0;
9755             sas_device->connector_name[0] = '\0';
9756         }
9757 
9758         sas_device->enclosure_handle =
9759             le16_to_cpu(sas_device_pg0->EnclosureHandle);
9760         sas_device->is_chassis_slot_valid = 0;
9761         if (enclosure_dev) {
9762             sas_device->enclosure_logical_id = le64_to_cpu(
9763                 enclosure_dev->pg0.EnclosureLogicalID);
9764             if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9765                 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9766                 sas_device->is_chassis_slot_valid = 1;
9767                 sas_device->chassis_slot =
9768                     enclosure_dev->pg0.ChassisSlot;
9769             }
9770         }
9771 
9772         if (sas_device->handle == le16_to_cpu(
9773             sas_device_pg0->DevHandle))
9774             goto out;
9775         pr_info("\thandle changed from(0x%04x)!!!\n",
9776             sas_device->handle);
9777         sas_device->handle = le16_to_cpu(
9778             sas_device_pg0->DevHandle);
9779         if (sas_target_priv_data)
9780             sas_target_priv_data->handle =
9781                 le16_to_cpu(sas_device_pg0->DevHandle);
9782         goto out;
9783     }
9784  out:
9785     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9786 }
9787 
9788 /**
9789  * _scsih_create_enclosure_list_after_reset - Free Existing list,
9790  *  And create enclosure list by scanning all Enclosure Page(0)s
9791  * @ioc: per adapter object
9792  */
9793 static void
9794 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9795 {
9796     struct _enclosure_node *enclosure_dev;
9797     Mpi2ConfigReply_t mpi_reply;
9798     u16 enclosure_handle;
9799     int rc;
9800 
9801     /* Free existing enclosure list */
9802     mpt3sas_free_enclosure_list(ioc);
9803 
9804     /* Re constructing enclosure list after reset*/
9805     enclosure_handle = 0xFFFF;
9806     do {
9807         enclosure_dev =
9808             kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9809         if (!enclosure_dev) {
9810             ioc_err(ioc, "failure at %s:%d/%s()!\n",
9811                 __FILE__, __LINE__, __func__);
9812             return;
9813         }
9814         rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9815                 &enclosure_dev->pg0,
9816                 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9817                 enclosure_handle);
9818 
9819         if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9820                         MPI2_IOCSTATUS_MASK)) {
9821             kfree(enclosure_dev);
9822             return;
9823         }
9824         list_add_tail(&enclosure_dev->list,
9825                         &ioc->enclosure_list);
9826         enclosure_handle =
9827             le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9828     } while (1);
9829 }
9830 
9831 /**
9832  * _scsih_search_responding_sas_devices -
9833  * @ioc: per adapter object
9834  *
9835  * After host reset, find out whether devices are still responding.
9836  * If not remove.
9837  */
9838 static void
9839 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9840 {
9841     Mpi2SasDevicePage0_t sas_device_pg0;
9842     Mpi2ConfigReply_t mpi_reply;
9843     u16 ioc_status;
9844     u16 handle;
9845     u32 device_info;
9846 
9847     ioc_info(ioc, "search for end-devices: start\n");
9848 
9849     if (list_empty(&ioc->sas_device_list))
9850         goto out;
9851 
9852     handle = 0xFFFF;
9853     while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9854         &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9855         handle))) {
9856         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9857             MPI2_IOCSTATUS_MASK;
9858         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9859             break;
9860         handle = le16_to_cpu(sas_device_pg0.DevHandle);
9861         device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9862         if (!(_scsih_is_end_device(device_info)))
9863             continue;
9864         _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9865     }
9866 
9867  out:
9868     ioc_info(ioc, "search for end-devices: complete\n");
9869 }
9870 
9871 /**
9872  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9873  * @ioc: per adapter object
9874  * @pcie_device_pg0: PCIe Device page 0
9875  *
9876  * After host reset, find out whether devices are still responding.
9877  * Used in _scsih_remove_unresponding_devices.
9878  */
9879 static void
9880 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9881     Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9882 {
9883     struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9884     struct scsi_target *starget;
9885     struct _pcie_device *pcie_device;
9886     unsigned long flags;
9887 
9888     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9889     list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9890         if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9891             && (pcie_device->slot == le16_to_cpu(
9892             pcie_device_pg0->Slot))) {
9893             pcie_device->access_status =
9894                     pcie_device_pg0->AccessStatus;
9895             pcie_device->responding = 1;
9896             starget = pcie_device->starget;
9897             if (starget && starget->hostdata) {
9898                 sas_target_priv_data = starget->hostdata;
9899                 sas_target_priv_data->tm_busy = 0;
9900                 sas_target_priv_data->deleted = 0;
9901             } else
9902                 sas_target_priv_data = NULL;
9903             if (starget) {
9904                 starget_printk(KERN_INFO, starget,
9905                     "handle(0x%04x), wwid(0x%016llx) ",
9906                     pcie_device->handle,
9907                     (unsigned long long)pcie_device->wwid);
9908                 if (pcie_device->enclosure_handle != 0)
9909                     starget_printk(KERN_INFO, starget,
9910                         "enclosure logical id(0x%016llx), "
9911                         "slot(%d)\n",
9912                         (unsigned long long)
9913                         pcie_device->enclosure_logical_id,
9914                         pcie_device->slot);
9915             }
9916 
9917             if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9918                 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9919                 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9920                 pcie_device->enclosure_level =
9921                     pcie_device_pg0->EnclosureLevel;
9922                 memcpy(&pcie_device->connector_name[0],
9923                     &pcie_device_pg0->ConnectorName[0], 4);
9924             } else {
9925                 pcie_device->enclosure_level = 0;
9926                 pcie_device->connector_name[0] = '\0';
9927             }
9928 
9929             if (pcie_device->handle == le16_to_cpu(
9930                 pcie_device_pg0->DevHandle))
9931                 goto out;
9932             pr_info("\thandle changed from(0x%04x)!!!\n",
9933                 pcie_device->handle);
9934             pcie_device->handle = le16_to_cpu(
9935                 pcie_device_pg0->DevHandle);
9936             if (sas_target_priv_data)
9937                 sas_target_priv_data->handle =
9938                     le16_to_cpu(pcie_device_pg0->DevHandle);
9939             goto out;
9940         }
9941     }
9942 
9943  out:
9944     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9945 }
9946 
9947 /**
9948  * _scsih_search_responding_pcie_devices -
9949  * @ioc: per adapter object
9950  *
9951  * After host reset, find out whether devices are still responding.
9952  * If not remove.
9953  */
9954 static void
9955 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9956 {
9957     Mpi26PCIeDevicePage0_t pcie_device_pg0;
9958     Mpi2ConfigReply_t mpi_reply;
9959     u16 ioc_status;
9960     u16 handle;
9961     u32 device_info;
9962 
9963     ioc_info(ioc, "search for end-devices: start\n");
9964 
9965     if (list_empty(&ioc->pcie_device_list))
9966         goto out;
9967 
9968     handle = 0xFFFF;
9969     while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9970         &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9971         handle))) {
9972         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9973             MPI2_IOCSTATUS_MASK;
9974         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9975             ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9976                  __func__, ioc_status,
9977                  le32_to_cpu(mpi_reply.IOCLogInfo));
9978             break;
9979         }
9980         handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9981         device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9982         if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9983             continue;
9984         _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9985     }
9986 out:
9987     ioc_info(ioc, "search for PCIe end-devices: complete\n");
9988 }
9989 
9990 /**
9991  * _scsih_mark_responding_raid_device - mark a raid_device as responding
9992  * @ioc: per adapter object
9993  * @wwid: world wide identifier for raid volume
9994  * @handle: device handle
9995  *
9996  * After host reset, find out whether devices are still responding.
9997  * Used in _scsih_remove_unresponsive_raid_devices.
9998  */
9999 static void
10000 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10001     u16 handle)
10002 {
10003     struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10004     struct scsi_target *starget;
10005     struct _raid_device *raid_device;
10006     unsigned long flags;
10007 
10008     spin_lock_irqsave(&ioc->raid_device_lock, flags);
10009     list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10010         if (raid_device->wwid == wwid && raid_device->starget) {
10011             starget = raid_device->starget;
10012             if (starget && starget->hostdata) {
10013                 sas_target_priv_data = starget->hostdata;
10014                 sas_target_priv_data->deleted = 0;
10015             } else
10016                 sas_target_priv_data = NULL;
10017             raid_device->responding = 1;
10018             spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10019             starget_printk(KERN_INFO, raid_device->starget,
10020                 "handle(0x%04x), wwid(0x%016llx)\n", handle,
10021                 (unsigned long long)raid_device->wwid);
10022 
10023             /*
10024              * WARPDRIVE: The handles of the PDs might have changed
10025              * across the host reset so re-initialize the
10026              * required data for Direct IO
10027              */
10028             mpt3sas_init_warpdrive_properties(ioc, raid_device);
10029             spin_lock_irqsave(&ioc->raid_device_lock, flags);
10030             if (raid_device->handle == handle) {
10031                 spin_unlock_irqrestore(&ioc->raid_device_lock,
10032                     flags);
10033                 return;
10034             }
10035             pr_info("\thandle changed from(0x%04x)!!!\n",
10036                 raid_device->handle);
10037             raid_device->handle = handle;
10038             if (sas_target_priv_data)
10039                 sas_target_priv_data->handle = handle;
10040             spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10041             return;
10042         }
10043     }
10044     spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10045 }
10046 
10047 /**
10048  * _scsih_search_responding_raid_devices -
10049  * @ioc: per adapter object
10050  *
10051  * After host reset, find out whether devices are still responding.
10052  * If not remove.
10053  */
10054 static void
10055 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10056 {
10057     Mpi2RaidVolPage1_t volume_pg1;
10058     Mpi2RaidVolPage0_t volume_pg0;
10059     Mpi2RaidPhysDiskPage0_t pd_pg0;
10060     Mpi2ConfigReply_t mpi_reply;
10061     u16 ioc_status;
10062     u16 handle;
10063     u8 phys_disk_num;
10064 
10065     if (!ioc->ir_firmware)
10066         return;
10067 
10068     ioc_info(ioc, "search for raid volumes: start\n");
10069 
10070     if (list_empty(&ioc->raid_device_list))
10071         goto out;
10072 
10073     handle = 0xFFFF;
10074     while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10075         &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10076         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10077             MPI2_IOCSTATUS_MASK;
10078         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10079             break;
10080         handle = le16_to_cpu(volume_pg1.DevHandle);
10081 
10082         if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10083             &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10084              sizeof(Mpi2RaidVolPage0_t)))
10085             continue;
10086 
10087         if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10088             volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10089             volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10090             _scsih_mark_responding_raid_device(ioc,
10091                 le64_to_cpu(volume_pg1.WWID), handle);
10092     }
10093 
10094     /* refresh the pd_handles */
10095     if (!ioc->is_warpdrive) {
10096         phys_disk_num = 0xFF;
10097         memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10098         while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10099             &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10100             phys_disk_num))) {
10101             ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10102                 MPI2_IOCSTATUS_MASK;
10103             if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10104                 break;
10105             phys_disk_num = pd_pg0.PhysDiskNum;
10106             handle = le16_to_cpu(pd_pg0.DevHandle);
10107             set_bit(handle, ioc->pd_handles);
10108         }
10109     }
10110  out:
10111     ioc_info(ioc, "search for responding raid volumes: complete\n");
10112 }
10113 
10114 /**
10115  * _scsih_mark_responding_expander - mark a expander as responding
10116  * @ioc: per adapter object
10117  * @expander_pg0:SAS Expander Config Page0
10118  *
10119  * After host reset, find out whether devices are still responding.
10120  * Used in _scsih_remove_unresponsive_expanders.
10121  */
10122 static void
10123 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10124     Mpi2ExpanderPage0_t *expander_pg0)
10125 {
10126     struct _sas_node *sas_expander = NULL;
10127     unsigned long flags;
10128     int i;
10129     struct _enclosure_node *enclosure_dev = NULL;
10130     u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10131     u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10132     u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10133     struct hba_port *port = mpt3sas_get_port_by_id(
10134         ioc, expander_pg0->PhysicalPort, 0);
10135 
10136     if (enclosure_handle)
10137         enclosure_dev =
10138             mpt3sas_scsih_enclosure_find_by_handle(ioc,
10139                             enclosure_handle);
10140 
10141     spin_lock_irqsave(&ioc->sas_node_lock, flags);
10142     list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10143         if (sas_expander->sas_address != sas_address)
10144             continue;
10145         if (sas_expander->port != port)
10146             continue;
10147         sas_expander->responding = 1;
10148 
10149         if (enclosure_dev) {
10150             sas_expander->enclosure_logical_id =
10151                 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10152             sas_expander->enclosure_handle =
10153                 le16_to_cpu(expander_pg0->EnclosureHandle);
10154         }
10155 
10156         if (sas_expander->handle == handle)
10157             goto out;
10158         pr_info("\texpander(0x%016llx): handle changed" \
10159             " from(0x%04x) to (0x%04x)!!!\n",
10160             (unsigned long long)sas_expander->sas_address,
10161             sas_expander->handle, handle);
10162         sas_expander->handle = handle;
10163         for (i = 0 ; i < sas_expander->num_phys ; i++)
10164             sas_expander->phy[i].handle = handle;
10165         goto out;
10166     }
10167  out:
10168     spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10169 }
10170 
10171 /**
10172  * _scsih_search_responding_expanders -
10173  * @ioc: per adapter object
10174  *
10175  * After host reset, find out whether devices are still responding.
10176  * If not remove.
10177  */
10178 static void
10179 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10180 {
10181     Mpi2ExpanderPage0_t expander_pg0;
10182     Mpi2ConfigReply_t mpi_reply;
10183     u16 ioc_status;
10184     u64 sas_address;
10185     u16 handle;
10186     u8 port;
10187 
10188     ioc_info(ioc, "search for expanders: start\n");
10189 
10190     if (list_empty(&ioc->sas_expander_list))
10191         goto out;
10192 
10193     handle = 0xFFFF;
10194     while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10195         MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10196 
10197         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10198             MPI2_IOCSTATUS_MASK;
10199         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10200             break;
10201 
10202         handle = le16_to_cpu(expander_pg0.DevHandle);
10203         sas_address = le64_to_cpu(expander_pg0.SASAddress);
10204         port = expander_pg0.PhysicalPort;
10205         pr_info(
10206             "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10207             handle, (unsigned long long)sas_address,
10208             (ioc->multipath_on_hba ?
10209             port : MULTIPATH_DISABLED_PORT_ID));
10210         _scsih_mark_responding_expander(ioc, &expander_pg0);
10211     }
10212 
10213  out:
10214     ioc_info(ioc, "search for expanders: complete\n");
10215 }
10216 
10217 /**
10218  * _scsih_remove_unresponding_devices - removing unresponding devices
10219  * @ioc: per adapter object
10220  */
10221 static void
10222 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10223 {
10224     struct _sas_device *sas_device, *sas_device_next;
10225     struct _sas_node *sas_expander, *sas_expander_next;
10226     struct _raid_device *raid_device, *raid_device_next;
10227     struct _pcie_device *pcie_device, *pcie_device_next;
10228     struct list_head tmp_list;
10229     unsigned long flags;
10230     LIST_HEAD(head);
10231 
10232     ioc_info(ioc, "removing unresponding devices: start\n");
10233 
10234     /* removing unresponding end devices */
10235     ioc_info(ioc, "removing unresponding devices: end-devices\n");
10236     /*
10237      * Iterate, pulling off devices marked as non-responding. We become the
10238      * owner for the reference the list had on any object we prune.
10239      */
10240     spin_lock_irqsave(&ioc->sas_device_lock, flags);
10241 
10242     /*
10243      * Clean up the sas_device_init_list list as
10244      * driver goes for fresh scan as part of diag reset.
10245      */
10246     list_for_each_entry_safe(sas_device, sas_device_next,
10247         &ioc->sas_device_init_list, list) {
10248         list_del_init(&sas_device->list);
10249         sas_device_put(sas_device);
10250     }
10251 
10252     list_for_each_entry_safe(sas_device, sas_device_next,
10253         &ioc->sas_device_list, list) {
10254         if (!sas_device->responding)
10255             list_move_tail(&sas_device->list, &head);
10256         else
10257             sas_device->responding = 0;
10258     }
10259     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10260 
10261     /*
10262      * Now, uninitialize and remove the unresponding devices we pruned.
10263      */
10264     list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10265         _scsih_remove_device(ioc, sas_device);
10266         list_del_init(&sas_device->list);
10267         sas_device_put(sas_device);
10268     }
10269 
10270     ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10271     INIT_LIST_HEAD(&head);
10272     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10273     /*
10274      * Clean up the pcie_device_init_list list as
10275      * driver goes for fresh scan as part of diag reset.
10276      */
10277     list_for_each_entry_safe(pcie_device, pcie_device_next,
10278         &ioc->pcie_device_init_list, list) {
10279         list_del_init(&pcie_device->list);
10280         pcie_device_put(pcie_device);
10281     }
10282 
10283     list_for_each_entry_safe(pcie_device, pcie_device_next,
10284         &ioc->pcie_device_list, list) {
10285         if (!pcie_device->responding)
10286             list_move_tail(&pcie_device->list, &head);
10287         else
10288             pcie_device->responding = 0;
10289     }
10290     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10291 
10292     list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10293         _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10294         list_del_init(&pcie_device->list);
10295         pcie_device_put(pcie_device);
10296     }
10297 
10298     /* removing unresponding volumes */
10299     if (ioc->ir_firmware) {
10300         ioc_info(ioc, "removing unresponding devices: volumes\n");
10301         list_for_each_entry_safe(raid_device, raid_device_next,
10302             &ioc->raid_device_list, list) {
10303             if (!raid_device->responding)
10304                 _scsih_sas_volume_delete(ioc,
10305                     raid_device->handle);
10306             else
10307                 raid_device->responding = 0;
10308         }
10309     }
10310 
10311     /* removing unresponding expanders */
10312     ioc_info(ioc, "removing unresponding devices: expanders\n");
10313     spin_lock_irqsave(&ioc->sas_node_lock, flags);
10314     INIT_LIST_HEAD(&tmp_list);
10315     list_for_each_entry_safe(sas_expander, sas_expander_next,
10316         &ioc->sas_expander_list, list) {
10317         if (!sas_expander->responding)
10318             list_move_tail(&sas_expander->list, &tmp_list);
10319         else
10320             sas_expander->responding = 0;
10321     }
10322     spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10323     list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10324         list) {
10325         _scsih_expander_node_remove(ioc, sas_expander);
10326     }
10327 
10328     ioc_info(ioc, "removing unresponding devices: complete\n");
10329 
10330     /* unblock devices */
10331     _scsih_ublock_io_all_device(ioc);
10332 }
10333 
10334 static void
10335 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10336     struct _sas_node *sas_expander, u16 handle)
10337 {
10338     Mpi2ExpanderPage1_t expander_pg1;
10339     Mpi2ConfigReply_t mpi_reply;
10340     int i;
10341 
10342     for (i = 0 ; i < sas_expander->num_phys ; i++) {
10343         if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10344             &expander_pg1, i, handle))) {
10345             ioc_err(ioc, "failure at %s:%d/%s()!\n",
10346                 __FILE__, __LINE__, __func__);
10347             return;
10348         }
10349 
10350         mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10351             le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10352             expander_pg1.NegotiatedLinkRate >> 4,
10353             sas_expander->port);
10354     }
10355 }
10356 
10357 /**
10358  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10359  * @ioc: per adapter object
10360  */
10361 static void
10362 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10363 {
10364     Mpi2ExpanderPage0_t expander_pg0;
10365     Mpi2SasDevicePage0_t sas_device_pg0;
10366     Mpi26PCIeDevicePage0_t pcie_device_pg0;
10367     Mpi2RaidVolPage1_t *volume_pg1;
10368     Mpi2RaidVolPage0_t *volume_pg0;
10369     Mpi2RaidPhysDiskPage0_t pd_pg0;
10370     Mpi2EventIrConfigElement_t element;
10371     Mpi2ConfigReply_t mpi_reply;
10372     u8 phys_disk_num, port_id;
10373     u16 ioc_status;
10374     u16 handle, parent_handle;
10375     u64 sas_address;
10376     struct _sas_device *sas_device;
10377     struct _pcie_device *pcie_device;
10378     struct _sas_node *expander_device;
10379     static struct _raid_device *raid_device;
10380     u8 retry_count;
10381     unsigned long flags;
10382 
10383     volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10384     if (!volume_pg0)
10385         return;
10386 
10387     volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10388     if (!volume_pg1) {
10389         kfree(volume_pg0);
10390         return;
10391     }
10392 
10393     ioc_info(ioc, "scan devices: start\n");
10394 
10395     _scsih_sas_host_refresh(ioc);
10396 
10397     ioc_info(ioc, "\tscan devices: expanders start\n");
10398 
10399     /* expanders */
10400     handle = 0xFFFF;
10401     while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10402         MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10403         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10404             MPI2_IOCSTATUS_MASK;
10405         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10406             ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10407                  ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10408             break;
10409         }
10410         handle = le16_to_cpu(expander_pg0.DevHandle);
10411         spin_lock_irqsave(&ioc->sas_node_lock, flags);
10412         port_id = expander_pg0.PhysicalPort;
10413         expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10414             ioc, le64_to_cpu(expander_pg0.SASAddress),
10415             mpt3sas_get_port_by_id(ioc, port_id, 0));
10416         spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10417         if (expander_device)
10418             _scsih_refresh_expander_links(ioc, expander_device,
10419                 handle);
10420         else {
10421             ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10422                  handle,
10423                  (u64)le64_to_cpu(expander_pg0.SASAddress));
10424             _scsih_expander_add(ioc, handle);
10425             ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10426                  handle,
10427                  (u64)le64_to_cpu(expander_pg0.SASAddress));
10428         }
10429     }
10430 
10431     ioc_info(ioc, "\tscan devices: expanders complete\n");
10432 
10433     if (!ioc->ir_firmware)
10434         goto skip_to_sas;
10435 
10436     ioc_info(ioc, "\tscan devices: phys disk start\n");
10437 
10438     /* phys disk */
10439     phys_disk_num = 0xFF;
10440     while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10441         &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10442         phys_disk_num))) {
10443         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10444             MPI2_IOCSTATUS_MASK;
10445         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10446             ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10447                  ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10448             break;
10449         }
10450         phys_disk_num = pd_pg0.PhysDiskNum;
10451         handle = le16_to_cpu(pd_pg0.DevHandle);
10452         sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10453         if (sas_device) {
10454             sas_device_put(sas_device);
10455             continue;
10456         }
10457         if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10458             &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10459             handle) != 0)
10460             continue;
10461         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10462             MPI2_IOCSTATUS_MASK;
10463         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10464             ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10465                  ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10466             break;
10467         }
10468         parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10469         if (!_scsih_get_sas_address(ioc, parent_handle,
10470             &sas_address)) {
10471             ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10472                  handle,
10473                  (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10474             port_id = sas_device_pg0.PhysicalPort;
10475             mpt3sas_transport_update_links(ioc, sas_address,
10476                 handle, sas_device_pg0.PhyNum,
10477                 MPI2_SAS_NEG_LINK_RATE_1_5,
10478                 mpt3sas_get_port_by_id(ioc, port_id, 0));
10479             set_bit(handle, ioc->pd_handles);
10480             retry_count = 0;
10481             /* This will retry adding the end device.
10482              * _scsih_add_device() will decide on retries and
10483              * return "1" when it should be retried
10484              */
10485             while (_scsih_add_device(ioc, handle, retry_count++,
10486                 1)) {
10487                 ssleep(1);
10488             }
10489             ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10490                  handle,
10491                  (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10492         }
10493     }
10494 
10495     ioc_info(ioc, "\tscan devices: phys disk complete\n");
10496 
10497     ioc_info(ioc, "\tscan devices: volumes start\n");
10498 
10499     /* volumes */
10500     handle = 0xFFFF;
10501     while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10502         volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10503         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10504             MPI2_IOCSTATUS_MASK;
10505         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10506             ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10507                  ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10508             break;
10509         }
10510         handle = le16_to_cpu(volume_pg1->DevHandle);
10511         spin_lock_irqsave(&ioc->raid_device_lock, flags);
10512         raid_device = _scsih_raid_device_find_by_wwid(ioc,
10513             le64_to_cpu(volume_pg1->WWID));
10514         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10515         if (raid_device)
10516             continue;
10517         if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10518             volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10519              sizeof(Mpi2RaidVolPage0_t)))
10520             continue;
10521         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10522             MPI2_IOCSTATUS_MASK;
10523         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10524             ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10525                  ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10526             break;
10527         }
10528         if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10529             volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10530             volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10531             memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10532             element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10533             element.VolDevHandle = volume_pg1->DevHandle;
10534             ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10535                  volume_pg1->DevHandle);
10536             _scsih_sas_volume_add(ioc, &element);
10537             ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10538                  volume_pg1->DevHandle);
10539         }
10540     }
10541 
10542     ioc_info(ioc, "\tscan devices: volumes complete\n");
10543 
10544  skip_to_sas:
10545 
10546     ioc_info(ioc, "\tscan devices: end devices start\n");
10547 
10548     /* sas devices */
10549     handle = 0xFFFF;
10550     while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10551         &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10552         handle))) {
10553         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10554             MPI2_IOCSTATUS_MASK;
10555         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10556             ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10557                  ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10558             break;
10559         }
10560         handle = le16_to_cpu(sas_device_pg0.DevHandle);
10561         if (!(_scsih_is_end_device(
10562             le32_to_cpu(sas_device_pg0.DeviceInfo))))
10563             continue;
10564         port_id = sas_device_pg0.PhysicalPort;
10565         sas_device = mpt3sas_get_sdev_by_addr(ioc,
10566             le64_to_cpu(sas_device_pg0.SASAddress),
10567             mpt3sas_get_port_by_id(ioc, port_id, 0));
10568         if (sas_device) {
10569             sas_device_put(sas_device);
10570             continue;
10571         }
10572         parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10573         if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10574             ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10575                  handle,
10576                  (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10577             mpt3sas_transport_update_links(ioc, sas_address, handle,
10578                 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10579                 mpt3sas_get_port_by_id(ioc, port_id, 0));
10580             retry_count = 0;
10581             /* This will retry adding the end device.
10582              * _scsih_add_device() will decide on retries and
10583              * return "1" when it should be retried
10584              */
10585             while (_scsih_add_device(ioc, handle, retry_count++,
10586                 0)) {
10587                 ssleep(1);
10588             }
10589             ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10590                  handle,
10591                  (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10592         }
10593     }
10594     ioc_info(ioc, "\tscan devices: end devices complete\n");
10595     ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10596 
10597     /* pcie devices */
10598     handle = 0xFFFF;
10599     while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10600         &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10601         handle))) {
10602         ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10603                 & MPI2_IOCSTATUS_MASK;
10604         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10605             ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10606                  ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10607             break;
10608         }
10609         handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10610         if (!(_scsih_is_nvme_pciescsi_device(
10611             le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10612             continue;
10613         pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10614                 le64_to_cpu(pcie_device_pg0.WWID));
10615         if (pcie_device) {
10616             pcie_device_put(pcie_device);
10617             continue;
10618         }
10619         retry_count = 0;
10620         parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10621         _scsih_pcie_add_device(ioc, handle);
10622 
10623         ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10624              handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10625     }
10626 
10627     kfree(volume_pg0);
10628     kfree(volume_pg1);
10629 
10630     ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10631     ioc_info(ioc, "scan devices: complete\n");
10632 }
10633 
10634 /**
10635  * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10636  * @ioc: per adapter object
10637  *
10638  * The handler for doing any required cleanup or initialization.
10639  */
10640 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10641 {
10642     dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10643 }
10644 
10645 /**
10646  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10647  *                          scsi & tm cmds.
10648  * @ioc: per adapter object
10649  *
10650  * The handler for doing any required cleanup or initialization.
10651  */
10652 void
10653 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10654 {
10655     dtmprintk(ioc,
10656         ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10657     if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10658         ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10659         mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10660         complete(&ioc->scsih_cmds.done);
10661     }
10662     if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10663         ioc->tm_cmds.status |= MPT3_CMD_RESET;
10664         mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10665         complete(&ioc->tm_cmds.done);
10666     }
10667 
10668     memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10669     memset(ioc->device_remove_in_progress, 0,
10670            ioc->device_remove_in_progress_sz);
10671     _scsih_fw_event_cleanup_queue(ioc);
10672     _scsih_flush_running_cmds(ioc);
10673 }
10674 
10675 /**
10676  * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10677  * @ioc: per adapter object
10678  *
10679  * The handler for doing any required cleanup or initialization.
10680  */
10681 void
10682 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10683 {
10684     dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10685     if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10686         if (ioc->multipath_on_hba) {
10687             _scsih_sas_port_refresh(ioc);
10688             _scsih_update_vphys_after_reset(ioc);
10689         }
10690         _scsih_prep_device_scan(ioc);
10691         _scsih_create_enclosure_list_after_reset(ioc);
10692         _scsih_search_responding_sas_devices(ioc);
10693         _scsih_search_responding_pcie_devices(ioc);
10694         _scsih_search_responding_raid_devices(ioc);
10695         _scsih_search_responding_expanders(ioc);
10696         _scsih_error_recovery_delete_devices(ioc);
10697     }
10698 }
10699 
10700 /**
10701  * _mpt3sas_fw_work - delayed task for processing firmware events
10702  * @ioc: per adapter object
10703  * @fw_event: The fw_event_work object
10704  * Context: user.
10705  */
10706 static void
10707 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10708 {
10709     ioc->current_event = fw_event;
10710     _scsih_fw_event_del_from_list(ioc, fw_event);
10711 
10712     /* the queue is being flushed so ignore this event */
10713     if (ioc->remove_host || ioc->pci_error_recovery) {
10714         fw_event_work_put(fw_event);
10715         ioc->current_event = NULL;
10716         return;
10717     }
10718 
10719     switch (fw_event->event) {
10720     case MPT3SAS_PROCESS_TRIGGER_DIAG:
10721         mpt3sas_process_trigger_data(ioc,
10722             (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10723             fw_event->event_data);
10724         break;
10725     case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10726         while (scsi_host_in_recovery(ioc->shost) ||
10727                      ioc->shost_recovery) {
10728             /*
10729              * If we're unloading or cancelling the work, bail.
10730              * Otherwise, this can become an infinite loop.
10731              */
10732             if (ioc->remove_host || ioc->fw_events_cleanup)
10733                 goto out;
10734             ssleep(1);
10735         }
10736         _scsih_remove_unresponding_devices(ioc);
10737         _scsih_del_dirty_vphy(ioc);
10738         _scsih_del_dirty_port_entries(ioc);
10739         if (ioc->is_gen35_ioc)
10740             _scsih_update_device_qdepth(ioc);
10741         _scsih_scan_for_devices_after_reset(ioc);
10742         /*
10743          * If diag reset has occurred during the driver load
10744          * then driver has to complete the driver load operation
10745          * by executing the following items:
10746          *- Register the devices from sas_device_init_list to SML
10747          *- clear is_driver_loading flag,
10748          *- start the watchdog thread.
10749          * In happy driver load path, above things are taken care of when
10750          * driver executes scsih_scan_finished().
10751          */
10752         if (ioc->is_driver_loading)
10753             _scsih_complete_devices_scanning(ioc);
10754         _scsih_set_nvme_max_shutdown_latency(ioc);
10755         break;
10756     case MPT3SAS_PORT_ENABLE_COMPLETE:
10757         ioc->start_scan = 0;
10758         if (missing_delay[0] != -1 && missing_delay[1] != -1)
10759             mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10760                 missing_delay[1]);
10761         dewtprintk(ioc,
10762                ioc_info(ioc, "port enable: complete from worker thread\n"));
10763         break;
10764     case MPT3SAS_TURN_ON_PFA_LED:
10765         _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10766         break;
10767     case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10768         _scsih_sas_topology_change_event(ioc, fw_event);
10769         break;
10770     case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10771         if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10772             _scsih_sas_device_status_change_event_debug(ioc,
10773                 (Mpi2EventDataSasDeviceStatusChange_t *)
10774                 fw_event->event_data);
10775         break;
10776     case MPI2_EVENT_SAS_DISCOVERY:
10777         _scsih_sas_discovery_event(ioc, fw_event);
10778         break;
10779     case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10780         _scsih_sas_device_discovery_error_event(ioc, fw_event);
10781         break;
10782     case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10783         _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10784         break;
10785     case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10786         _scsih_sas_enclosure_dev_status_change_event(ioc,
10787             fw_event);
10788         break;
10789     case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10790         _scsih_sas_ir_config_change_event(ioc, fw_event);
10791         break;
10792     case MPI2_EVENT_IR_VOLUME:
10793         _scsih_sas_ir_volume_event(ioc, fw_event);
10794         break;
10795     case MPI2_EVENT_IR_PHYSICAL_DISK:
10796         _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10797         break;
10798     case MPI2_EVENT_IR_OPERATION_STATUS:
10799         _scsih_sas_ir_operation_status_event(ioc, fw_event);
10800         break;
10801     case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10802         _scsih_pcie_device_status_change_event(ioc, fw_event);
10803         break;
10804     case MPI2_EVENT_PCIE_ENUMERATION:
10805         _scsih_pcie_enumeration_event(ioc, fw_event);
10806         break;
10807     case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10808         _scsih_pcie_topology_change_event(ioc, fw_event);
10809         ioc->current_event = NULL;
10810         return;
10811     }
10812 out:
10813     fw_event_work_put(fw_event);
10814     ioc->current_event = NULL;
10815 }
10816 
10817 /**
10818  * _firmware_event_work
10819  * @work: The fw_event_work object
10820  * Context: user.
10821  *
10822  * wrappers for the work thread handling firmware events
10823  */
10824 
10825 static void
10826 _firmware_event_work(struct work_struct *work)
10827 {
10828     struct fw_event_work *fw_event = container_of(work,
10829         struct fw_event_work, work);
10830 
10831     _mpt3sas_fw_work(fw_event->ioc, fw_event);
10832 }
10833 
10834 /**
10835  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10836  * @ioc: per adapter object
10837  * @msix_index: MSIX table index supplied by the OS
10838  * @reply: reply message frame(lower 32bit addr)
10839  * Context: interrupt.
10840  *
10841  * This function merely adds a new work task into ioc->firmware_event_thread.
10842  * The tasks are worked from _firmware_event_work in user context.
10843  *
10844  * Return: 1 meaning mf should be freed from _base_interrupt
10845  *         0 means the mf is freed from this function.
10846  */
10847 u8
10848 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10849     u32 reply)
10850 {
10851     struct fw_event_work *fw_event;
10852     Mpi2EventNotificationReply_t *mpi_reply;
10853     u16 event;
10854     u16 sz;
10855     Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10856 
10857     /* events turned off due to host reset */
10858     if (ioc->pci_error_recovery)
10859         return 1;
10860 
10861     mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10862 
10863     if (unlikely(!mpi_reply)) {
10864         ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10865             __FILE__, __LINE__, __func__);
10866         return 1;
10867     }
10868 
10869     event = le16_to_cpu(mpi_reply->Event);
10870 
10871     if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10872         mpt3sas_trigger_event(ioc, event, 0);
10873 
10874     switch (event) {
10875     /* handle these */
10876     case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10877     {
10878         Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10879             (Mpi2EventDataSasBroadcastPrimitive_t *)
10880             mpi_reply->EventData;
10881 
10882         if (baen_data->Primitive !=
10883             MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10884             return 1;
10885 
10886         if (ioc->broadcast_aen_busy) {
10887             ioc->broadcast_aen_pending++;
10888             return 1;
10889         } else
10890             ioc->broadcast_aen_busy = 1;
10891         break;
10892     }
10893 
10894     case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10895         _scsih_check_topo_delete_events(ioc,
10896             (Mpi2EventDataSasTopologyChangeList_t *)
10897             mpi_reply->EventData);
10898         /*
10899          * No need to add the topology change list
10900          * event to fw event work queue when
10901          * diag reset is going on. Since during diag
10902          * reset driver scan the devices by reading
10903          * sas device page0's not by processing the
10904          * events.
10905          */
10906         if (ioc->shost_recovery)
10907             return 1;
10908         break;
10909     case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10910     _scsih_check_pcie_topo_remove_events(ioc,
10911             (Mpi26EventDataPCIeTopologyChangeList_t *)
10912             mpi_reply->EventData);
10913         if (ioc->shost_recovery)
10914             return 1;
10915         break;
10916     case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10917         _scsih_check_ir_config_unhide_events(ioc,
10918             (Mpi2EventDataIrConfigChangeList_t *)
10919             mpi_reply->EventData);
10920         break;
10921     case MPI2_EVENT_IR_VOLUME:
10922         _scsih_check_volume_delete_events(ioc,
10923             (Mpi2EventDataIrVolume_t *)
10924             mpi_reply->EventData);
10925         break;
10926     case MPI2_EVENT_LOG_ENTRY_ADDED:
10927     {
10928         Mpi2EventDataLogEntryAdded_t *log_entry;
10929         u32 log_code;
10930 
10931         if (!ioc->is_warpdrive)
10932             break;
10933 
10934         log_entry = (Mpi2EventDataLogEntryAdded_t *)
10935             mpi_reply->EventData;
10936         log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
10937 
10938         if (le16_to_cpu(log_entry->LogEntryQualifier)
10939             != MPT2_WARPDRIVE_LOGENTRY)
10940             break;
10941 
10942         switch (log_code) {
10943         case MPT2_WARPDRIVE_LC_SSDT:
10944             ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10945             break;
10946         case MPT2_WARPDRIVE_LC_SSDLW:
10947             ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10948             break;
10949         case MPT2_WARPDRIVE_LC_SSDLF:
10950             ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10951             break;
10952         case MPT2_WARPDRIVE_LC_BRMF:
10953             ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10954             break;
10955         }
10956 
10957         break;
10958     }
10959     case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10960         _scsih_sas_device_status_change_event(ioc,
10961             (Mpi2EventDataSasDeviceStatusChange_t *)
10962             mpi_reply->EventData);
10963         break;
10964     case MPI2_EVENT_IR_OPERATION_STATUS:
10965     case MPI2_EVENT_SAS_DISCOVERY:
10966     case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10967     case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10968     case MPI2_EVENT_IR_PHYSICAL_DISK:
10969     case MPI2_EVENT_PCIE_ENUMERATION:
10970     case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10971         break;
10972 
10973     case MPI2_EVENT_TEMP_THRESHOLD:
10974         _scsih_temp_threshold_events(ioc,
10975             (Mpi2EventDataTemperature_t *)
10976             mpi_reply->EventData);
10977         break;
10978     case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10979         ActiveCableEventData =
10980             (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10981         switch (ActiveCableEventData->ReasonCode) {
10982         case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10983             ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10984                    ActiveCableEventData->ReceptacleID);
10985             pr_notice("cannot be powered and devices connected\n");
10986             pr_notice("to this active cable will not be seen\n");
10987             pr_notice("This active cable requires %d mW of power\n",
10988                 le32_to_cpu(
10989                 ActiveCableEventData->ActiveCablePowerRequirement));
10990             break;
10991 
10992         case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10993             ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10994                    ActiveCableEventData->ReceptacleID);
10995             pr_notice(
10996                 "is not running at optimal speed(12 Gb/s rate)\n");
10997             break;
10998         }
10999 
11000         break;
11001 
11002     default: /* ignore the rest */
11003         return 1;
11004     }
11005 
11006     sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
11007     fw_event = alloc_fw_event_work(sz);
11008     if (!fw_event) {
11009         ioc_err(ioc, "failure at %s:%d/%s()!\n",
11010             __FILE__, __LINE__, __func__);
11011         return 1;
11012     }
11013 
11014     memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11015     fw_event->ioc = ioc;
11016     fw_event->VF_ID = mpi_reply->VF_ID;
11017     fw_event->VP_ID = mpi_reply->VP_ID;
11018     fw_event->event = event;
11019     _scsih_fw_event_add(ioc, fw_event);
11020     fw_event_work_put(fw_event);
11021     return 1;
11022 }
11023 
11024 /**
11025  * _scsih_expander_node_remove - removing expander device from list.
11026  * @ioc: per adapter object
11027  * @sas_expander: the sas_device object
11028  *
11029  * Removing object and freeing associated memory from the
11030  * ioc->sas_expander_list.
11031  */
11032 static void
11033 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11034     struct _sas_node *sas_expander)
11035 {
11036     struct _sas_port *mpt3sas_port, *next;
11037     unsigned long flags;
11038     int port_id;
11039 
11040     /* remove sibling ports attached to this expander */
11041     list_for_each_entry_safe(mpt3sas_port, next,
11042        &sas_expander->sas_port_list, port_list) {
11043         if (ioc->shost_recovery)
11044             return;
11045         if (mpt3sas_port->remote_identify.device_type ==
11046             SAS_END_DEVICE)
11047             mpt3sas_device_remove_by_sas_address(ioc,
11048                 mpt3sas_port->remote_identify.sas_address,
11049                 mpt3sas_port->hba_port);
11050         else if (mpt3sas_port->remote_identify.device_type ==
11051             SAS_EDGE_EXPANDER_DEVICE ||
11052             mpt3sas_port->remote_identify.device_type ==
11053             SAS_FANOUT_EXPANDER_DEVICE)
11054             mpt3sas_expander_remove(ioc,
11055                 mpt3sas_port->remote_identify.sas_address,
11056                 mpt3sas_port->hba_port);
11057     }
11058 
11059     port_id = sas_expander->port->port_id;
11060 
11061     mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11062         sas_expander->sas_address_parent, sas_expander->port);
11063 
11064     ioc_info(ioc,
11065         "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11066         sas_expander->handle, (unsigned long long)
11067         sas_expander->sas_address,
11068         port_id);
11069 
11070     spin_lock_irqsave(&ioc->sas_node_lock, flags);
11071     list_del(&sas_expander->list);
11072     spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11073 
11074     kfree(sas_expander->phy);
11075     kfree(sas_expander);
11076 }
11077 
11078 /**
11079  * _scsih_nvme_shutdown - NVMe shutdown notification
11080  * @ioc: per adapter object
11081  *
11082  * Sending IoUnitControl request with shutdown operation code to alert IOC that
11083  * the host system is shutting down so that IOC can issue NVMe shutdown to
11084  * NVMe drives attached to it.
11085  */
11086 static void
11087 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11088 {
11089     Mpi26IoUnitControlRequest_t *mpi_request;
11090     Mpi26IoUnitControlReply_t *mpi_reply;
11091     u16 smid;
11092 
11093     /* are there any NVMe devices ? */
11094     if (list_empty(&ioc->pcie_device_list))
11095         return;
11096 
11097     mutex_lock(&ioc->scsih_cmds.mutex);
11098 
11099     if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11100         ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11101         goto out;
11102     }
11103 
11104     ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11105 
11106     smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11107     if (!smid) {
11108         ioc_err(ioc,
11109             "%s: failed obtaining a smid\n", __func__);
11110         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11111         goto out;
11112     }
11113 
11114     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11115     ioc->scsih_cmds.smid = smid;
11116     memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11117     mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11118     mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11119 
11120     init_completion(&ioc->scsih_cmds.done);
11121     ioc->put_smid_default(ioc, smid);
11122     /* Wait for max_shutdown_latency seconds */
11123     ioc_info(ioc,
11124         "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11125         ioc->max_shutdown_latency);
11126     wait_for_completion_timeout(&ioc->scsih_cmds.done,
11127             ioc->max_shutdown_latency*HZ);
11128 
11129     if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11130         ioc_err(ioc, "%s: timeout\n", __func__);
11131         goto out;
11132     }
11133 
11134     if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11135         mpi_reply = ioc->scsih_cmds.reply;
11136         ioc_info(ioc, "Io Unit Control shutdown (complete):"
11137             "ioc_status(0x%04x), loginfo(0x%08x)\n",
11138             le16_to_cpu(mpi_reply->IOCStatus),
11139             le32_to_cpu(mpi_reply->IOCLogInfo));
11140     }
11141  out:
11142     ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11143     mutex_unlock(&ioc->scsih_cmds.mutex);
11144 }
11145 
11146 
11147 /**
11148  * _scsih_ir_shutdown - IR shutdown notification
11149  * @ioc: per adapter object
11150  *
11151  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11152  * the host system is shutting down.
11153  */
11154 static void
11155 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11156 {
11157     Mpi2RaidActionRequest_t *mpi_request;
11158     Mpi2RaidActionReply_t *mpi_reply;
11159     u16 smid;
11160 
11161     /* is IR firmware build loaded ? */
11162     if (!ioc->ir_firmware)
11163         return;
11164 
11165     /* are there any volumes ? */
11166     if (list_empty(&ioc->raid_device_list))
11167         return;
11168 
11169     mutex_lock(&ioc->scsih_cmds.mutex);
11170 
11171     if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11172         ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11173         goto out;
11174     }
11175     ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11176 
11177     smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11178     if (!smid) {
11179         ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11180         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11181         goto out;
11182     }
11183 
11184     mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11185     ioc->scsih_cmds.smid = smid;
11186     memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11187 
11188     mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11189     mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11190 
11191     if (!ioc->hide_ir_msg)
11192         ioc_info(ioc, "IR shutdown (sending)\n");
11193     init_completion(&ioc->scsih_cmds.done);
11194     ioc->put_smid_default(ioc, smid);
11195     wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11196 
11197     if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11198         ioc_err(ioc, "%s: timeout\n", __func__);
11199         goto out;
11200     }
11201 
11202     if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11203         mpi_reply = ioc->scsih_cmds.reply;
11204         if (!ioc->hide_ir_msg)
11205             ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11206                  le16_to_cpu(mpi_reply->IOCStatus),
11207                  le32_to_cpu(mpi_reply->IOCLogInfo));
11208     }
11209 
11210  out:
11211     ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11212     mutex_unlock(&ioc->scsih_cmds.mutex);
11213 }
11214 
11215 /**
11216  * _scsih_get_shost_and_ioc - get shost and ioc
11217  *          and verify whether they are NULL or not
11218  * @pdev: PCI device struct
11219  * @shost: address of scsi host pointer
11220  * @ioc: address of HBA adapter pointer
11221  *
11222  * Return zero if *shost and *ioc are not NULL otherwise return error number.
11223  */
11224 static int
11225 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11226     struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11227 {
11228     *shost = pci_get_drvdata(pdev);
11229     if (*shost == NULL) {
11230         dev_err(&pdev->dev, "pdev's driver data is null\n");
11231         return -ENXIO;
11232     }
11233 
11234     *ioc = shost_priv(*shost);
11235     if (*ioc == NULL) {
11236         dev_err(&pdev->dev, "shost's private data is null\n");
11237         return -ENXIO;
11238     }
11239 
11240     return 0;
11241 }
11242 
11243 /**
11244  * scsih_remove - detach and remove add host
11245  * @pdev: PCI device struct
11246  *
11247  * Routine called when unloading the driver.
11248  */
11249 static void scsih_remove(struct pci_dev *pdev)
11250 {
11251     struct Scsi_Host *shost;
11252     struct MPT3SAS_ADAPTER *ioc;
11253     struct _sas_port *mpt3sas_port, *next_port;
11254     struct _raid_device *raid_device, *next;
11255     struct MPT3SAS_TARGET *sas_target_priv_data;
11256     struct _pcie_device *pcie_device, *pcienext;
11257     struct workqueue_struct *wq;
11258     unsigned long flags;
11259     Mpi2ConfigReply_t mpi_reply;
11260     struct hba_port *port, *port_next;
11261 
11262     if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11263         return;
11264 
11265     ioc->remove_host = 1;
11266 
11267     if (!pci_device_is_present(pdev)) {
11268         mpt3sas_base_pause_mq_polling(ioc);
11269         _scsih_flush_running_cmds(ioc);
11270     }
11271 
11272     _scsih_fw_event_cleanup_queue(ioc);
11273 
11274     spin_lock_irqsave(&ioc->fw_event_lock, flags);
11275     wq = ioc->firmware_event_thread;
11276     ioc->firmware_event_thread = NULL;
11277     spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11278     if (wq)
11279         destroy_workqueue(wq);
11280     /*
11281      * Copy back the unmodified ioc page1. so that on next driver load,
11282      * current modified changes on ioc page1 won't take effect.
11283      */
11284     if (ioc->is_aero_ioc)
11285         mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11286                 &ioc->ioc_pg1_copy);
11287     /* release all the volumes */
11288     _scsih_ir_shutdown(ioc);
11289     mpt3sas_destroy_debugfs(ioc);
11290     sas_remove_host(shost);
11291     list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11292         list) {
11293         if (raid_device->starget) {
11294             sas_target_priv_data =
11295                 raid_device->starget->hostdata;
11296             sas_target_priv_data->deleted = 1;
11297             scsi_remove_target(&raid_device->starget->dev);
11298         }
11299         ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11300              raid_device->handle, (u64)raid_device->wwid);
11301         _scsih_raid_device_remove(ioc, raid_device);
11302     }
11303     list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11304         list) {
11305         _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11306         list_del_init(&pcie_device->list);
11307         pcie_device_put(pcie_device);
11308     }
11309 
11310     /* free ports attached to the sas_host */
11311     list_for_each_entry_safe(mpt3sas_port, next_port,
11312        &ioc->sas_hba.sas_port_list, port_list) {
11313         if (mpt3sas_port->remote_identify.device_type ==
11314             SAS_END_DEVICE)
11315             mpt3sas_device_remove_by_sas_address(ioc,
11316                 mpt3sas_port->remote_identify.sas_address,
11317                 mpt3sas_port->hba_port);
11318         else if (mpt3sas_port->remote_identify.device_type ==
11319             SAS_EDGE_EXPANDER_DEVICE ||
11320             mpt3sas_port->remote_identify.device_type ==
11321             SAS_FANOUT_EXPANDER_DEVICE)
11322             mpt3sas_expander_remove(ioc,
11323                 mpt3sas_port->remote_identify.sas_address,
11324                 mpt3sas_port->hba_port);
11325     }
11326 
11327     list_for_each_entry_safe(port, port_next,
11328         &ioc->port_table_list, list) {
11329         list_del(&port->list);
11330         kfree(port);
11331     }
11332 
11333     /* free phys attached to the sas_host */
11334     if (ioc->sas_hba.num_phys) {
11335         kfree(ioc->sas_hba.phy);
11336         ioc->sas_hba.phy = NULL;
11337         ioc->sas_hba.num_phys = 0;
11338     }
11339 
11340     mpt3sas_base_detach(ioc);
11341     spin_lock(&gioc_lock);
11342     list_del(&ioc->list);
11343     spin_unlock(&gioc_lock);
11344     scsi_host_put(shost);
11345 }
11346 
11347 /**
11348  * scsih_shutdown - routine call during system shutdown
11349  * @pdev: PCI device struct
11350  */
11351 static void
11352 scsih_shutdown(struct pci_dev *pdev)
11353 {
11354     struct Scsi_Host *shost;
11355     struct MPT3SAS_ADAPTER *ioc;
11356     struct workqueue_struct *wq;
11357     unsigned long flags;
11358     Mpi2ConfigReply_t mpi_reply;
11359 
11360     if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11361         return;
11362 
11363     ioc->remove_host = 1;
11364 
11365     if (!pci_device_is_present(pdev)) {
11366         mpt3sas_base_pause_mq_polling(ioc);
11367         _scsih_flush_running_cmds(ioc);
11368     }
11369 
11370     _scsih_fw_event_cleanup_queue(ioc);
11371 
11372     spin_lock_irqsave(&ioc->fw_event_lock, flags);
11373     wq = ioc->firmware_event_thread;
11374     ioc->firmware_event_thread = NULL;
11375     spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11376     if (wq)
11377         destroy_workqueue(wq);
11378     /*
11379      * Copy back the unmodified ioc page1 so that on next driver load,
11380      * current modified changes on ioc page1 won't take effect.
11381      */
11382     if (ioc->is_aero_ioc)
11383         mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11384                 &ioc->ioc_pg1_copy);
11385 
11386     _scsih_ir_shutdown(ioc);
11387     _scsih_nvme_shutdown(ioc);
11388     mpt3sas_base_mask_interrupts(ioc);
11389     mpt3sas_base_stop_watchdog(ioc);
11390     ioc->shost_recovery = 1;
11391     mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11392     ioc->shost_recovery = 0;
11393     mpt3sas_base_free_irq(ioc);
11394     mpt3sas_base_disable_msix(ioc);
11395 }
11396 
11397 
11398 /**
11399  * _scsih_probe_boot_devices - reports 1st device
11400  * @ioc: per adapter object
11401  *
11402  * If specified in bios page 2, this routine reports the 1st
11403  * device scsi-ml or sas transport for persistent boot device
11404  * purposes.  Please refer to function _scsih_determine_boot_device()
11405  */
11406 static void
11407 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11408 {
11409     u32 channel;
11410     void *device;
11411     struct _sas_device *sas_device;
11412     struct _raid_device *raid_device;
11413     struct _pcie_device *pcie_device;
11414     u16 handle;
11415     u64 sas_address_parent;
11416     u64 sas_address;
11417     unsigned long flags;
11418     int rc;
11419     int tid;
11420     struct hba_port *port;
11421 
11422      /* no Bios, return immediately */
11423     if (!ioc->bios_pg3.BiosVersion)
11424         return;
11425 
11426     device = NULL;
11427     if (ioc->req_boot_device.device) {
11428         device =  ioc->req_boot_device.device;
11429         channel = ioc->req_boot_device.channel;
11430     } else if (ioc->req_alt_boot_device.device) {
11431         device =  ioc->req_alt_boot_device.device;
11432         channel = ioc->req_alt_boot_device.channel;
11433     } else if (ioc->current_boot_device.device) {
11434         device =  ioc->current_boot_device.device;
11435         channel = ioc->current_boot_device.channel;
11436     }
11437 
11438     if (!device)
11439         return;
11440 
11441     if (channel == RAID_CHANNEL) {
11442         raid_device = device;
11443         /*
11444          * If this boot vd is already registered with SML then
11445          * no need to register it again as part of device scanning
11446          * after diag reset during driver load operation.
11447          */
11448         if (raid_device->starget)
11449             return;
11450         rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11451             raid_device->id, 0);
11452         if (rc)
11453             _scsih_raid_device_remove(ioc, raid_device);
11454     } else if (channel == PCIE_CHANNEL) {
11455         pcie_device = device;
11456         /*
11457          * If this boot NVMe device is already registered with SML then
11458          * no need to register it again as part of device scanning
11459          * after diag reset during driver load operation.
11460          */
11461         if (pcie_device->starget)
11462             return;
11463         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11464         tid = pcie_device->id;
11465         list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11466         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11467         rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11468         if (rc)
11469             _scsih_pcie_device_remove(ioc, pcie_device);
11470     } else {
11471         sas_device = device;
11472         /*
11473          * If this boot sas/sata device is already registered with SML
11474          * then no need to register it again as part of device scanning
11475          * after diag reset during driver load operation.
11476          */
11477         if (sas_device->starget)
11478             return;
11479         spin_lock_irqsave(&ioc->sas_device_lock, flags);
11480         handle = sas_device->handle;
11481         sas_address_parent = sas_device->sas_address_parent;
11482         sas_address = sas_device->sas_address;
11483         port = sas_device->port;
11484         list_move_tail(&sas_device->list, &ioc->sas_device_list);
11485         spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11486 
11487         if (ioc->hide_drives)
11488             return;
11489 
11490         if (!port)
11491             return;
11492 
11493         if (!mpt3sas_transport_port_add(ioc, handle,
11494             sas_address_parent, port)) {
11495             _scsih_sas_device_remove(ioc, sas_device);
11496         } else if (!sas_device->starget) {
11497             if (!ioc->is_driver_loading) {
11498                 mpt3sas_transport_port_remove(ioc,
11499                     sas_address,
11500                     sas_address_parent, port);
11501                 _scsih_sas_device_remove(ioc, sas_device);
11502             }
11503         }
11504     }
11505 }
11506 
11507 /**
11508  * _scsih_probe_raid - reporting raid volumes to scsi-ml
11509  * @ioc: per adapter object
11510  *
11511  * Called during initial loading of the driver.
11512  */
11513 static void
11514 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11515 {
11516     struct _raid_device *raid_device, *raid_next;
11517     int rc;
11518 
11519     list_for_each_entry_safe(raid_device, raid_next,
11520         &ioc->raid_device_list, list) {
11521         if (raid_device->starget)
11522             continue;
11523         rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11524             raid_device->id, 0);
11525         if (rc)
11526             _scsih_raid_device_remove(ioc, raid_device);
11527     }
11528 }
11529 
11530 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11531 {
11532     struct _sas_device *sas_device = NULL;
11533     unsigned long flags;
11534 
11535     spin_lock_irqsave(&ioc->sas_device_lock, flags);
11536     if (!list_empty(&ioc->sas_device_init_list)) {
11537         sas_device = list_first_entry(&ioc->sas_device_init_list,
11538                 struct _sas_device, list);
11539         sas_device_get(sas_device);
11540     }
11541     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11542 
11543     return sas_device;
11544 }
11545 
11546 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11547         struct _sas_device *sas_device)
11548 {
11549     unsigned long flags;
11550 
11551     spin_lock_irqsave(&ioc->sas_device_lock, flags);
11552 
11553     /*
11554      * Since we dropped the lock during the call to port_add(), we need to
11555      * be careful here that somebody else didn't move or delete this item
11556      * while we were busy with other things.
11557      *
11558      * If it was on the list, we need a put() for the reference the list
11559      * had. Either way, we need a get() for the destination list.
11560      */
11561     if (!list_empty(&sas_device->list)) {
11562         list_del_init(&sas_device->list);
11563         sas_device_put(sas_device);
11564     }
11565 
11566     sas_device_get(sas_device);
11567     list_add_tail(&sas_device->list, &ioc->sas_device_list);
11568 
11569     spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11570 }
11571 
11572 /**
11573  * _scsih_probe_sas - reporting sas devices to sas transport
11574  * @ioc: per adapter object
11575  *
11576  * Called during initial loading of the driver.
11577  */
11578 static void
11579 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11580 {
11581     struct _sas_device *sas_device;
11582 
11583     if (ioc->hide_drives)
11584         return;
11585 
11586     while ((sas_device = get_next_sas_device(ioc))) {
11587         if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11588             sas_device->sas_address_parent, sas_device->port)) {
11589             _scsih_sas_device_remove(ioc, sas_device);
11590             sas_device_put(sas_device);
11591             continue;
11592         } else if (!sas_device->starget) {
11593             /*
11594              * When asyn scanning is enabled, its not possible to
11595              * remove devices while scanning is turned on due to an
11596              * oops in scsi_sysfs_add_sdev()->add_device()->
11597              * sysfs_addrm_start()
11598              */
11599             if (!ioc->is_driver_loading) {
11600                 mpt3sas_transport_port_remove(ioc,
11601                     sas_device->sas_address,
11602                     sas_device->sas_address_parent,
11603                     sas_device->port);
11604                 _scsih_sas_device_remove(ioc, sas_device);
11605                 sas_device_put(sas_device);
11606                 continue;
11607             }
11608         }
11609         sas_device_make_active(ioc, sas_device);
11610         sas_device_put(sas_device);
11611     }
11612 }
11613 
11614 /**
11615  * get_next_pcie_device - Get the next pcie device
11616  * @ioc: per adapter object
11617  *
11618  * Get the next pcie device from pcie_device_init_list list.
11619  *
11620  * Return: pcie device structure if pcie_device_init_list list is not empty
11621  * otherwise returns NULL
11622  */
11623 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11624 {
11625     struct _pcie_device *pcie_device = NULL;
11626     unsigned long flags;
11627 
11628     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11629     if (!list_empty(&ioc->pcie_device_init_list)) {
11630         pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11631                 struct _pcie_device, list);
11632         pcie_device_get(pcie_device);
11633     }
11634     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11635 
11636     return pcie_device;
11637 }
11638 
11639 /**
11640  * pcie_device_make_active - Add pcie device to pcie_device_list list
11641  * @ioc: per adapter object
11642  * @pcie_device: pcie device object
11643  *
11644  * Add the pcie device which has registered with SCSI Transport Later to
11645  * pcie_device_list list
11646  */
11647 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11648         struct _pcie_device *pcie_device)
11649 {
11650     unsigned long flags;
11651 
11652     spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11653 
11654     if (!list_empty(&pcie_device->list)) {
11655         list_del_init(&pcie_device->list);
11656         pcie_device_put(pcie_device);
11657     }
11658     pcie_device_get(pcie_device);
11659     list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11660 
11661     spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11662 }
11663 
11664 /**
11665  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11666  * @ioc: per adapter object
11667  *
11668  * Called during initial loading of the driver.
11669  */
11670 static void
11671 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11672 {
11673     struct _pcie_device *pcie_device;
11674     int rc;
11675 
11676     /* PCIe Device List */
11677     while ((pcie_device = get_next_pcie_device(ioc))) {
11678         if (pcie_device->starget) {
11679             pcie_device_put(pcie_device);
11680             continue;
11681         }
11682         if (pcie_device->access_status ==
11683             MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11684             pcie_device_make_active(ioc, pcie_device);
11685             pcie_device_put(pcie_device);
11686             continue;
11687         }
11688         rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11689             pcie_device->id, 0);
11690         if (rc) {
11691             _scsih_pcie_device_remove(ioc, pcie_device);
11692             pcie_device_put(pcie_device);
11693             continue;
11694         } else if (!pcie_device->starget) {
11695             /*
11696              * When async scanning is enabled, its not possible to
11697              * remove devices while scanning is turned on due to an
11698              * oops in scsi_sysfs_add_sdev()->add_device()->
11699              * sysfs_addrm_start()
11700              */
11701             if (!ioc->is_driver_loading) {
11702             /* TODO-- Need to find out whether this condition will
11703              * occur or not
11704              */
11705                 _scsih_pcie_device_remove(ioc, pcie_device);
11706                 pcie_device_put(pcie_device);
11707                 continue;
11708             }
11709         }
11710         pcie_device_make_active(ioc, pcie_device);
11711         pcie_device_put(pcie_device);
11712     }
11713 }
11714 
11715 /**
11716  * _scsih_probe_devices - probing for devices
11717  * @ioc: per adapter object
11718  *
11719  * Called during initial loading of the driver.
11720  */
11721 static void
11722 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11723 {
11724     u16 volume_mapping_flags;
11725 
11726     if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11727         return;  /* return when IOC doesn't support initiator mode */
11728 
11729     _scsih_probe_boot_devices(ioc);
11730 
11731     if (ioc->ir_firmware) {
11732         volume_mapping_flags =
11733             le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11734             MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11735         if (volume_mapping_flags ==
11736             MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11737             _scsih_probe_raid(ioc);
11738             _scsih_probe_sas(ioc);
11739         } else {
11740             _scsih_probe_sas(ioc);
11741             _scsih_probe_raid(ioc);
11742         }
11743     } else {
11744         _scsih_probe_sas(ioc);
11745         _scsih_probe_pcie(ioc);
11746     }
11747 }
11748 
11749 /**
11750  * scsih_scan_start - scsi lld callback for .scan_start
11751  * @shost: SCSI host pointer
11752  *
11753  * The shost has the ability to discover targets on its own instead
11754  * of scanning the entire bus.  In our implemention, we will kick off
11755  * firmware discovery.
11756  */
11757 static void
11758 scsih_scan_start(struct Scsi_Host *shost)
11759 {
11760     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11761     int rc;
11762     if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11763         mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11764     else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11765         mpt3sas_enable_diag_buffer(ioc, 1);
11766 
11767     if (disable_discovery > 0)
11768         return;
11769 
11770     ioc->start_scan = 1;
11771     rc = mpt3sas_port_enable(ioc);
11772 
11773     if (rc != 0)
11774         ioc_info(ioc, "port enable: FAILED\n");
11775 }
11776 
11777 /**
11778  * _scsih_complete_devices_scanning - add the devices to sml and
11779  * complete ioc initialization.
11780  * @ioc: per adapter object
11781  *
11782  * Return nothing.
11783  */
11784 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11785 {
11786 
11787     if (ioc->wait_for_discovery_to_complete) {
11788         ioc->wait_for_discovery_to_complete = 0;
11789         _scsih_probe_devices(ioc);
11790     }
11791 
11792     mpt3sas_base_start_watchdog(ioc);
11793     ioc->is_driver_loading = 0;
11794 }
11795 
11796 /**
11797  * scsih_scan_finished - scsi lld callback for .scan_finished
11798  * @shost: SCSI host pointer
11799  * @time: elapsed time of the scan in jiffies
11800  *
11801  * This function will be called periodicallyn until it returns 1 with the
11802  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11803  * we wait for firmware discovery to complete, then return 1.
11804  */
11805 static int
11806 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11807 {
11808     struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11809     u32 ioc_state;
11810     int issue_hard_reset = 0;
11811 
11812     if (disable_discovery > 0) {
11813         ioc->is_driver_loading = 0;
11814         ioc->wait_for_discovery_to_complete = 0;
11815         return 1;
11816     }
11817 
11818     if (time >= (300 * HZ)) {
11819         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11820         ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11821         ioc->is_driver_loading = 0;
11822         return 1;
11823     }
11824 
11825     if (ioc->start_scan) {
11826         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11827         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11828             mpt3sas_print_fault_code(ioc, ioc_state &
11829                 MPI2_DOORBELL_DATA_MASK);
11830             issue_hard_reset = 1;
11831             goto out;
11832         } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11833                 MPI2_IOC_STATE_COREDUMP) {
11834             mpt3sas_base_coredump_info(ioc, ioc_state &
11835                 MPI2_DOORBELL_DATA_MASK);
11836             mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11837             issue_hard_reset = 1;
11838             goto out;
11839         }
11840         return 0;
11841     }
11842 
11843     if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11844         ioc_info(ioc,
11845             "port enable: aborted due to diag reset\n");
11846         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11847         goto out;
11848     }
11849     if (ioc->start_scan_failed) {
11850         ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11851              ioc->start_scan_failed);
11852         ioc->is_driver_loading = 0;
11853         ioc->wait_for_discovery_to_complete = 0;
11854         ioc->remove_host = 1;
11855         return 1;
11856     }
11857 
11858     ioc_info(ioc, "port enable: SUCCESS\n");
11859     ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11860     _scsih_complete_devices_scanning(ioc);
11861 
11862 out:
11863     if (issue_hard_reset) {
11864         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11865         if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11866             ioc->is_driver_loading = 0;
11867     }
11868     return 1;
11869 }
11870 
11871 /**
11872  * scsih_map_queues - map reply queues with request queues
11873  * @shost: SCSI host pointer
11874  */
11875 static int scsih_map_queues(struct Scsi_Host *shost)
11876 {
11877     struct MPT3SAS_ADAPTER *ioc =
11878         (struct MPT3SAS_ADAPTER *)shost->hostdata;
11879     struct blk_mq_queue_map *map;
11880     int i, qoff, offset;
11881     int nr_msix_vectors = ioc->iopoll_q_start_index;
11882     int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
11883 
11884     if (shost->nr_hw_queues == 1)
11885         return 0;
11886 
11887     for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11888         map = &shost->tag_set.map[i];
11889         map->nr_queues = 0;
11890         offset = 0;
11891         if (i == HCTX_TYPE_DEFAULT) {
11892             map->nr_queues =
11893                 nr_msix_vectors - ioc->high_iops_queues;
11894             offset = ioc->high_iops_queues;
11895         } else if (i == HCTX_TYPE_POLL)
11896             map->nr_queues = iopoll_q_count;
11897 
11898         if (!map->nr_queues)
11899             BUG_ON(i == HCTX_TYPE_DEFAULT);
11900 
11901         /*
11902          * The poll queue(s) doesn't have an IRQ (and hence IRQ
11903          * affinity), so use the regular blk-mq cpu mapping
11904          */
11905         map->queue_offset = qoff;
11906         if (i != HCTX_TYPE_POLL)
11907             blk_mq_pci_map_queues(map, ioc->pdev, offset);
11908         else
11909             blk_mq_map_queues(map);
11910 
11911         qoff += map->nr_queues;
11912     }
11913     return 0;
11914 }
11915 
11916 /* shost template for SAS 2.0 HBA devices */
11917 static struct scsi_host_template mpt2sas_driver_template = {
11918     .module             = THIS_MODULE,
11919     .name               = "Fusion MPT SAS Host",
11920     .proc_name          = MPT2SAS_DRIVER_NAME,
11921     .queuecommand           = scsih_qcmd,
11922     .target_alloc           = scsih_target_alloc,
11923     .slave_alloc            = scsih_slave_alloc,
11924     .slave_configure        = scsih_slave_configure,
11925     .target_destroy         = scsih_target_destroy,
11926     .slave_destroy          = scsih_slave_destroy,
11927     .scan_finished          = scsih_scan_finished,
11928     .scan_start         = scsih_scan_start,
11929     .change_queue_depth     = scsih_change_queue_depth,
11930     .eh_abort_handler       = scsih_abort,
11931     .eh_device_reset_handler    = scsih_dev_reset,
11932     .eh_target_reset_handler    = scsih_target_reset,
11933     .eh_host_reset_handler      = scsih_host_reset,
11934     .bios_param         = scsih_bios_param,
11935     .can_queue          = 1,
11936     .this_id            = -1,
11937     .sg_tablesize           = MPT2SAS_SG_DEPTH,
11938     .max_sectors            = 32767,
11939     .cmd_per_lun            = 7,
11940     .shost_groups           = mpt3sas_host_groups,
11941     .sdev_groups            = mpt3sas_dev_groups,
11942     .track_queue_depth      = 1,
11943     .cmd_size           = sizeof(struct scsiio_tracker),
11944 };
11945 
11946 /* raid transport support for SAS 2.0 HBA devices */
11947 static struct raid_function_template mpt2sas_raid_functions = {
11948     .cookie     = &mpt2sas_driver_template,
11949     .is_raid    = scsih_is_raid,
11950     .get_resync = scsih_get_resync,
11951     .get_state  = scsih_get_state,
11952 };
11953 
11954 /* shost template for SAS 3.0 HBA devices */
11955 static struct scsi_host_template mpt3sas_driver_template = {
11956     .module             = THIS_MODULE,
11957     .name               = "Fusion MPT SAS Host",
11958     .proc_name          = MPT3SAS_DRIVER_NAME,
11959     .queuecommand           = scsih_qcmd,
11960     .target_alloc           = scsih_target_alloc,
11961     .slave_alloc            = scsih_slave_alloc,
11962     .slave_configure        = scsih_slave_configure,
11963     .target_destroy         = scsih_target_destroy,
11964     .slave_destroy          = scsih_slave_destroy,
11965     .scan_finished          = scsih_scan_finished,
11966     .scan_start         = scsih_scan_start,
11967     .change_queue_depth     = scsih_change_queue_depth,
11968     .eh_abort_handler       = scsih_abort,
11969     .eh_device_reset_handler    = scsih_dev_reset,
11970     .eh_target_reset_handler    = scsih_target_reset,
11971     .eh_host_reset_handler      = scsih_host_reset,
11972     .bios_param         = scsih_bios_param,
11973     .can_queue          = 1,
11974     .this_id            = -1,
11975     .sg_tablesize           = MPT3SAS_SG_DEPTH,
11976     .max_sectors            = 32767,
11977     .max_segment_size       = 0xffffffff,
11978     .cmd_per_lun            = 7,
11979     .shost_groups           = mpt3sas_host_groups,
11980     .sdev_groups            = mpt3sas_dev_groups,
11981     .track_queue_depth      = 1,
11982     .cmd_size           = sizeof(struct scsiio_tracker),
11983     .map_queues         = scsih_map_queues,
11984     .mq_poll            = mpt3sas_blk_mq_poll,
11985 };
11986 
11987 /* raid transport support for SAS 3.0 HBA devices */
11988 static struct raid_function_template mpt3sas_raid_functions = {
11989     .cookie     = &mpt3sas_driver_template,
11990     .is_raid    = scsih_is_raid,
11991     .get_resync = scsih_get_resync,
11992     .get_state  = scsih_get_state,
11993 };
11994 
11995 /**
11996  * _scsih_determine_hba_mpi_version - determine in which MPI version class
11997  *                  this device belongs to.
11998  * @pdev: PCI device struct
11999  *
12000  * return MPI2_VERSION for SAS 2.0 HBA devices,
12001  *  MPI25_VERSION for SAS 3.0 HBA devices, and
12002  *  MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
12003  */
12004 static u16
12005 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
12006 {
12007 
12008     switch (pdev->device) {
12009     case MPI2_MFGPAGE_DEVID_SSS6200:
12010     case MPI2_MFGPAGE_DEVID_SAS2004:
12011     case MPI2_MFGPAGE_DEVID_SAS2008:
12012     case MPI2_MFGPAGE_DEVID_SAS2108_1:
12013     case MPI2_MFGPAGE_DEVID_SAS2108_2:
12014     case MPI2_MFGPAGE_DEVID_SAS2108_3:
12015     case MPI2_MFGPAGE_DEVID_SAS2116_1:
12016     case MPI2_MFGPAGE_DEVID_SAS2116_2:
12017     case MPI2_MFGPAGE_DEVID_SAS2208_1:
12018     case MPI2_MFGPAGE_DEVID_SAS2208_2:
12019     case MPI2_MFGPAGE_DEVID_SAS2208_3:
12020     case MPI2_MFGPAGE_DEVID_SAS2208_4:
12021     case MPI2_MFGPAGE_DEVID_SAS2208_5:
12022     case MPI2_MFGPAGE_DEVID_SAS2208_6:
12023     case MPI2_MFGPAGE_DEVID_SAS2308_1:
12024     case MPI2_MFGPAGE_DEVID_SAS2308_2:
12025     case MPI2_MFGPAGE_DEVID_SAS2308_3:
12026     case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12027     case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12028         return MPI2_VERSION;
12029     case MPI25_MFGPAGE_DEVID_SAS3004:
12030     case MPI25_MFGPAGE_DEVID_SAS3008:
12031     case MPI25_MFGPAGE_DEVID_SAS3108_1:
12032     case MPI25_MFGPAGE_DEVID_SAS3108_2:
12033     case MPI25_MFGPAGE_DEVID_SAS3108_5:
12034     case MPI25_MFGPAGE_DEVID_SAS3108_6:
12035         return MPI25_VERSION;
12036     case MPI26_MFGPAGE_DEVID_SAS3216:
12037     case MPI26_MFGPAGE_DEVID_SAS3224:
12038     case MPI26_MFGPAGE_DEVID_SAS3316_1:
12039     case MPI26_MFGPAGE_DEVID_SAS3316_2:
12040     case MPI26_MFGPAGE_DEVID_SAS3316_3:
12041     case MPI26_MFGPAGE_DEVID_SAS3316_4:
12042     case MPI26_MFGPAGE_DEVID_SAS3324_1:
12043     case MPI26_MFGPAGE_DEVID_SAS3324_2:
12044     case MPI26_MFGPAGE_DEVID_SAS3324_3:
12045     case MPI26_MFGPAGE_DEVID_SAS3324_4:
12046     case MPI26_MFGPAGE_DEVID_SAS3508:
12047     case MPI26_MFGPAGE_DEVID_SAS3508_1:
12048     case MPI26_MFGPAGE_DEVID_SAS3408:
12049     case MPI26_MFGPAGE_DEVID_SAS3516:
12050     case MPI26_MFGPAGE_DEVID_SAS3516_1:
12051     case MPI26_MFGPAGE_DEVID_SAS3416:
12052     case MPI26_MFGPAGE_DEVID_SAS3616:
12053     case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12054     case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12055     case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12056     case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12057     case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12058     case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12059     case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12060     case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12061     case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12062         return MPI26_VERSION;
12063     }
12064     return 0;
12065 }
12066 
12067 /**
12068  * _scsih_probe - attach and add scsi host
12069  * @pdev: PCI device struct
12070  * @id: pci device id
12071  *
12072  * Return: 0 success, anything else error.
12073  */
12074 static int
12075 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12076 {
12077     struct MPT3SAS_ADAPTER *ioc;
12078     struct Scsi_Host *shost = NULL;
12079     int rv;
12080     u16 hba_mpi_version;
12081     int iopoll_q_count = 0;
12082 
12083     /* Determine in which MPI version class this pci device belongs */
12084     hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12085     if (hba_mpi_version == 0)
12086         return -ENODEV;
12087 
12088     /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12089      * for other generation HBA's return with -ENODEV
12090      */
12091     if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
12092         return -ENODEV;
12093 
12094     /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12095      * for other generation HBA's return with -ENODEV
12096      */
12097     if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
12098         || hba_mpi_version ==  MPI26_VERSION)))
12099         return -ENODEV;
12100 
12101     switch (hba_mpi_version) {
12102     case MPI2_VERSION:
12103         pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12104             PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12105         /* Use mpt2sas driver host template for SAS 2.0 HBA's */
12106         shost = scsi_host_alloc(&mpt2sas_driver_template,
12107           sizeof(struct MPT3SAS_ADAPTER));
12108         if (!shost)
12109             return -ENODEV;
12110         ioc = shost_priv(shost);
12111         memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12112         ioc->hba_mpi_version_belonged = hba_mpi_version;
12113         ioc->id = mpt2_ids++;
12114         sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12115         switch (pdev->device) {
12116         case MPI2_MFGPAGE_DEVID_SSS6200:
12117             ioc->is_warpdrive = 1;
12118             ioc->hide_ir_msg = 1;
12119             break;
12120         case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12121         case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12122             ioc->is_mcpu_endpoint = 1;
12123             break;
12124         default:
12125             ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12126             break;
12127         }
12128 
12129         if (multipath_on_hba == -1 || multipath_on_hba == 0)
12130             ioc->multipath_on_hba = 0;
12131         else
12132             ioc->multipath_on_hba = 1;
12133 
12134         break;
12135     case MPI25_VERSION:
12136     case MPI26_VERSION:
12137         /* Use mpt3sas driver host template for SAS 3.0 HBA's */
12138         shost = scsi_host_alloc(&mpt3sas_driver_template,
12139           sizeof(struct MPT3SAS_ADAPTER));
12140         if (!shost)
12141             return -ENODEV;
12142         ioc = shost_priv(shost);
12143         memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12144         ioc->hba_mpi_version_belonged = hba_mpi_version;
12145         ioc->id = mpt3_ids++;
12146         sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12147         switch (pdev->device) {
12148         case MPI26_MFGPAGE_DEVID_SAS3508:
12149         case MPI26_MFGPAGE_DEVID_SAS3508_1:
12150         case MPI26_MFGPAGE_DEVID_SAS3408:
12151         case MPI26_MFGPAGE_DEVID_SAS3516:
12152         case MPI26_MFGPAGE_DEVID_SAS3516_1:
12153         case MPI26_MFGPAGE_DEVID_SAS3416:
12154         case MPI26_MFGPAGE_DEVID_SAS3616:
12155         case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12156             ioc->is_gen35_ioc = 1;
12157             break;
12158         case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12159         case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12160             dev_err(&pdev->dev,
12161                 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12162                 pdev->device, pdev->subsystem_vendor,
12163                 pdev->subsystem_device);
12164             return 1;
12165         case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12166         case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12167             dev_err(&pdev->dev,
12168                 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12169                 pdev->device, pdev->subsystem_vendor,
12170                 pdev->subsystem_device);
12171             return 1;
12172         case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12173         case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12174             dev_info(&pdev->dev,
12175                 "HBA is in Configurable Secure mode\n");
12176             fallthrough;
12177         case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12178         case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12179             ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12180             break;
12181         default:
12182             ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12183         }
12184         if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12185             pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12186             (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12187             ioc->combined_reply_queue = 1;
12188             if (ioc->is_gen35_ioc)
12189                 ioc->combined_reply_index_count =
12190                  MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12191             else
12192                 ioc->combined_reply_index_count =
12193                  MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12194         }
12195 
12196         switch (ioc->is_gen35_ioc) {
12197         case 0:
12198             if (multipath_on_hba == -1 || multipath_on_hba == 0)
12199                 ioc->multipath_on_hba = 0;
12200             else
12201                 ioc->multipath_on_hba = 1;
12202             break;
12203         case 1:
12204             if (multipath_on_hba == -1 || multipath_on_hba > 0)
12205                 ioc->multipath_on_hba = 1;
12206             else
12207                 ioc->multipath_on_hba = 0;
12208             break;
12209         default:
12210             break;
12211         }
12212 
12213         break;
12214     default:
12215         return -ENODEV;
12216     }
12217 
12218     INIT_LIST_HEAD(&ioc->list);
12219     spin_lock(&gioc_lock);
12220     list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12221     spin_unlock(&gioc_lock);
12222     ioc->shost = shost;
12223     ioc->pdev = pdev;
12224     ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12225     ioc->tm_cb_idx = tm_cb_idx;
12226     ioc->ctl_cb_idx = ctl_cb_idx;
12227     ioc->base_cb_idx = base_cb_idx;
12228     ioc->port_enable_cb_idx = port_enable_cb_idx;
12229     ioc->transport_cb_idx = transport_cb_idx;
12230     ioc->scsih_cb_idx = scsih_cb_idx;
12231     ioc->config_cb_idx = config_cb_idx;
12232     ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12233     ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12234     ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12235     ioc->logging_level = logging_level;
12236     ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12237     /* Host waits for minimum of six seconds */
12238     ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12239     /*
12240      * Enable MEMORY MOVE support flag.
12241      */
12242     ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12243     /* Enable ADDITIONAL QUERY support flag. */
12244     ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12245 
12246     ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12247 
12248     /* misc semaphores and spin locks */
12249     mutex_init(&ioc->reset_in_progress_mutex);
12250     /* initializing pci_access_mutex lock */
12251     mutex_init(&ioc->pci_access_mutex);
12252     spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12253     spin_lock_init(&ioc->scsi_lookup_lock);
12254     spin_lock_init(&ioc->sas_device_lock);
12255     spin_lock_init(&ioc->sas_node_lock);
12256     spin_lock_init(&ioc->fw_event_lock);
12257     spin_lock_init(&ioc->raid_device_lock);
12258     spin_lock_init(&ioc->pcie_device_lock);
12259     spin_lock_init(&ioc->diag_trigger_lock);
12260 
12261     INIT_LIST_HEAD(&ioc->sas_device_list);
12262     INIT_LIST_HEAD(&ioc->sas_device_init_list);
12263     INIT_LIST_HEAD(&ioc->sas_expander_list);
12264     INIT_LIST_HEAD(&ioc->enclosure_list);
12265     INIT_LIST_HEAD(&ioc->pcie_device_list);
12266     INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12267     INIT_LIST_HEAD(&ioc->fw_event_list);
12268     INIT_LIST_HEAD(&ioc->raid_device_list);
12269     INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12270     INIT_LIST_HEAD(&ioc->delayed_tr_list);
12271     INIT_LIST_HEAD(&ioc->delayed_sc_list);
12272     INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12273     INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12274     INIT_LIST_HEAD(&ioc->reply_queue_list);
12275     INIT_LIST_HEAD(&ioc->port_table_list);
12276 
12277     sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12278 
12279     /* init shost parameters */
12280     shost->max_cmd_len = 32;
12281     shost->max_lun = max_lun;
12282     shost->transportt = mpt3sas_transport_template;
12283     shost->unique_id = ioc->id;
12284 
12285     if (ioc->is_mcpu_endpoint) {
12286         /* mCPU MPI support 64K max IO */
12287         shost->max_sectors = 128;
12288         ioc_info(ioc, "The max_sectors value is set to %d\n",
12289              shost->max_sectors);
12290     } else {
12291         if (max_sectors != 0xFFFF) {
12292             if (max_sectors < 64) {
12293                 shost->max_sectors = 64;
12294                 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12295                      max_sectors);
12296             } else if (max_sectors > 32767) {
12297                 shost->max_sectors = 32767;
12298                 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12299                      max_sectors);
12300             } else {
12301                 shost->max_sectors = max_sectors & 0xFFFE;
12302                 ioc_info(ioc, "The max_sectors value is set to %d\n",
12303                      shost->max_sectors);
12304             }
12305         }
12306     }
12307     /* register EEDP capabilities with SCSI layer */
12308     if (prot_mask >= 0)
12309         scsi_host_set_prot(shost, (prot_mask & 0x07));
12310     else
12311         scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12312                    | SHOST_DIF_TYPE2_PROTECTION
12313                    | SHOST_DIF_TYPE3_PROTECTION);
12314 
12315     scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12316 
12317     /* event thread */
12318     snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12319         "fw_event_%s%d", ioc->driver_name, ioc->id);
12320     ioc->firmware_event_thread = alloc_ordered_workqueue(
12321         ioc->firmware_event_name, 0);
12322     if (!ioc->firmware_event_thread) {
12323         ioc_err(ioc, "failure at %s:%d/%s()!\n",
12324             __FILE__, __LINE__, __func__);
12325         rv = -ENODEV;
12326         goto out_thread_fail;
12327     }
12328 
12329     shost->host_tagset = 0;
12330 
12331     if (ioc->is_gen35_ioc && host_tagset_enable)
12332         shost->host_tagset = 1;
12333 
12334     ioc->is_driver_loading = 1;
12335     if ((mpt3sas_base_attach(ioc))) {
12336         ioc_err(ioc, "failure at %s:%d/%s()!\n",
12337             __FILE__, __LINE__, __func__);
12338         rv = -ENODEV;
12339         goto out_attach_fail;
12340     }
12341 
12342     if (ioc->is_warpdrive) {
12343         if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
12344             ioc->hide_drives = 0;
12345         else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
12346             ioc->hide_drives = 1;
12347         else {
12348             if (mpt3sas_get_num_volumes(ioc))
12349                 ioc->hide_drives = 1;
12350             else
12351                 ioc->hide_drives = 0;
12352         }
12353     } else
12354         ioc->hide_drives = 0;
12355 
12356     shost->nr_hw_queues = 1;
12357 
12358     if (shost->host_tagset) {
12359         shost->nr_hw_queues =
12360             ioc->reply_queue_count - ioc->high_iops_queues;
12361 
12362         iopoll_q_count =
12363             ioc->reply_queue_count - ioc->iopoll_q_start_index;
12364 
12365         shost->nr_maps = iopoll_q_count ? 3 : 1;
12366 
12367         dev_info(&ioc->pdev->dev,
12368             "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12369             shost->can_queue, shost->nr_hw_queues);
12370     }
12371 
12372     rv = scsi_add_host(shost, &pdev->dev);
12373     if (rv) {
12374         ioc_err(ioc, "failure at %s:%d/%s()!\n",
12375             __FILE__, __LINE__, __func__);
12376         goto out_add_shost_fail;
12377     }
12378 
12379     scsi_scan_host(shost);
12380     mpt3sas_setup_debugfs(ioc);
12381     return 0;
12382 out_add_shost_fail:
12383     mpt3sas_base_detach(ioc);
12384  out_attach_fail:
12385     destroy_workqueue(ioc->firmware_event_thread);
12386  out_thread_fail:
12387     spin_lock(&gioc_lock);
12388     list_del(&ioc->list);
12389     spin_unlock(&gioc_lock);
12390     scsi_host_put(shost);
12391     return rv;
12392 }
12393 
12394 /**
12395  * scsih_suspend - power management suspend main entry point
12396  * @dev: Device struct
12397  *
12398  * Return: 0 success, anything else error.
12399  */
12400 static int __maybe_unused
12401 scsih_suspend(struct device *dev)
12402 {
12403     struct pci_dev *pdev = to_pci_dev(dev);
12404     struct Scsi_Host *shost;
12405     struct MPT3SAS_ADAPTER *ioc;
12406     int rc;
12407 
12408     rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12409     if (rc)
12410         return rc;
12411 
12412     mpt3sas_base_stop_watchdog(ioc);
12413     scsi_block_requests(shost);
12414     _scsih_nvme_shutdown(ioc);
12415     ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12416          pdev, pci_name(pdev));
12417 
12418     mpt3sas_base_free_resources(ioc);
12419     return 0;
12420 }
12421 
12422 /**
12423  * scsih_resume - power management resume main entry point
12424  * @dev: Device struct
12425  *
12426  * Return: 0 success, anything else error.
12427  */
12428 static int __maybe_unused
12429 scsih_resume(struct device *dev)
12430 {
12431     struct pci_dev *pdev = to_pci_dev(dev);
12432     struct Scsi_Host *shost;
12433     struct MPT3SAS_ADAPTER *ioc;
12434     pci_power_t device_state = pdev->current_state;
12435     int r;
12436 
12437     r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12438     if (r)
12439         return r;
12440 
12441     ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12442          pdev, pci_name(pdev), device_state);
12443 
12444     ioc->pdev = pdev;
12445     r = mpt3sas_base_map_resources(ioc);
12446     if (r)
12447         return r;
12448     ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12449     mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12450     scsi_unblock_requests(shost);
12451     mpt3sas_base_start_watchdog(ioc);
12452     return 0;
12453 }
12454 
12455 /**
12456  * scsih_pci_error_detected - Called when a PCI error is detected.
12457  * @pdev: PCI device struct
12458  * @state: PCI channel state
12459  *
12460  * Description: Called when a PCI error is detected.
12461  *
12462  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12463  */
12464 static pci_ers_result_t
12465 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12466 {
12467     struct Scsi_Host *shost;
12468     struct MPT3SAS_ADAPTER *ioc;
12469 
12470     if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12471         return PCI_ERS_RESULT_DISCONNECT;
12472 
12473     ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12474 
12475     switch (state) {
12476     case pci_channel_io_normal:
12477         return PCI_ERS_RESULT_CAN_RECOVER;
12478     case pci_channel_io_frozen:
12479         /* Fatal error, prepare for slot reset */
12480         ioc->pci_error_recovery = 1;
12481         scsi_block_requests(ioc->shost);
12482         mpt3sas_base_stop_watchdog(ioc);
12483         mpt3sas_base_free_resources(ioc);
12484         return PCI_ERS_RESULT_NEED_RESET;
12485     case pci_channel_io_perm_failure:
12486         /* Permanent error, prepare for device removal */
12487         ioc->pci_error_recovery = 1;
12488         mpt3sas_base_stop_watchdog(ioc);
12489         mpt3sas_base_pause_mq_polling(ioc);
12490         _scsih_flush_running_cmds(ioc);
12491         return PCI_ERS_RESULT_DISCONNECT;
12492     }
12493     return PCI_ERS_RESULT_NEED_RESET;
12494 }
12495 
12496 /**
12497  * scsih_pci_slot_reset - Called when PCI slot has been reset.
12498  * @pdev: PCI device struct
12499  *
12500  * Description: This routine is called by the pci error recovery
12501  * code after the PCI slot has been reset, just before we
12502  * should resume normal operations.
12503  */
12504 static pci_ers_result_t
12505 scsih_pci_slot_reset(struct pci_dev *pdev)
12506 {
12507     struct Scsi_Host *shost;
12508     struct MPT3SAS_ADAPTER *ioc;
12509     int rc;
12510 
12511     if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12512         return PCI_ERS_RESULT_DISCONNECT;
12513 
12514     ioc_info(ioc, "PCI error: slot reset callback!!\n");
12515 
12516     ioc->pci_error_recovery = 0;
12517     ioc->pdev = pdev;
12518     pci_restore_state(pdev);
12519     rc = mpt3sas_base_map_resources(ioc);
12520     if (rc)
12521         return PCI_ERS_RESULT_DISCONNECT;
12522 
12523     ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12524     rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12525 
12526     ioc_warn(ioc, "hard reset: %s\n",
12527          (rc == 0) ? "success" : "failed");
12528 
12529     if (!rc)
12530         return PCI_ERS_RESULT_RECOVERED;
12531     else
12532         return PCI_ERS_RESULT_DISCONNECT;
12533 }
12534 
12535 /**
12536  * scsih_pci_resume() - resume normal ops after PCI reset
12537  * @pdev: pointer to PCI device
12538  *
12539  * Called when the error recovery driver tells us that its
12540  * OK to resume normal operation. Use completion to allow
12541  * halted scsi ops to resume.
12542  */
12543 static void
12544 scsih_pci_resume(struct pci_dev *pdev)
12545 {
12546     struct Scsi_Host *shost;
12547     struct MPT3SAS_ADAPTER *ioc;
12548 
12549     if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12550         return;
12551 
12552     ioc_info(ioc, "PCI error: resume callback!!\n");
12553 
12554     mpt3sas_base_start_watchdog(ioc);
12555     scsi_unblock_requests(ioc->shost);
12556 }
12557 
12558 /**
12559  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12560  * @pdev: pointer to PCI device
12561  */
12562 static pci_ers_result_t
12563 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12564 {
12565     struct Scsi_Host *shost;
12566     struct MPT3SAS_ADAPTER *ioc;
12567 
12568     if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12569         return PCI_ERS_RESULT_DISCONNECT;
12570 
12571     ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12572 
12573     /* TODO - dump whatever for debugging purposes */
12574 
12575     /* This called only if scsih_pci_error_detected returns
12576      * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12577      * works, no need to reset slot.
12578      */
12579     return PCI_ERS_RESULT_RECOVERED;
12580 }
12581 
12582 /**
12583  * scsih_ncq_prio_supp - Check for NCQ command priority support
12584  * @sdev: scsi device struct
12585  *
12586  * This is called when a user indicates they would like to enable
12587  * ncq command priorities. This works only on SATA devices.
12588  */
12589 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12590 {
12591     struct scsi_vpd *vpd;
12592     bool ncq_prio_supp = false;
12593 
12594     rcu_read_lock();
12595     vpd = rcu_dereference(sdev->vpd_pg89);
12596     if (!vpd || vpd->len < 214)
12597         goto out;
12598 
12599     ncq_prio_supp = (vpd->data[213] >> 4) & 1;
12600 out:
12601     rcu_read_unlock();
12602 
12603     return ncq_prio_supp;
12604 }
12605 /*
12606  * The pci device ids are defined in mpi/mpi2_cnfg.h.
12607  */
12608 static const struct pci_device_id mpt3sas_pci_table[] = {
12609     /* Spitfire ~ 2004 */
12610     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12611         PCI_ANY_ID, PCI_ANY_ID },
12612     /* Falcon ~ 2008 */
12613     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12614         PCI_ANY_ID, PCI_ANY_ID },
12615     /* Liberator ~ 2108 */
12616     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12617         PCI_ANY_ID, PCI_ANY_ID },
12618     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12619         PCI_ANY_ID, PCI_ANY_ID },
12620     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12621         PCI_ANY_ID, PCI_ANY_ID },
12622     /* Meteor ~ 2116 */
12623     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12624         PCI_ANY_ID, PCI_ANY_ID },
12625     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12626         PCI_ANY_ID, PCI_ANY_ID },
12627     /* Thunderbolt ~ 2208 */
12628     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12629         PCI_ANY_ID, PCI_ANY_ID },
12630     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12631         PCI_ANY_ID, PCI_ANY_ID },
12632     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12633         PCI_ANY_ID, PCI_ANY_ID },
12634     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12635         PCI_ANY_ID, PCI_ANY_ID },
12636     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12637         PCI_ANY_ID, PCI_ANY_ID },
12638     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12639         PCI_ANY_ID, PCI_ANY_ID },
12640     /* Mustang ~ 2308 */
12641     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12642         PCI_ANY_ID, PCI_ANY_ID },
12643     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12644         PCI_ANY_ID, PCI_ANY_ID },
12645     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12646         PCI_ANY_ID, PCI_ANY_ID },
12647     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12648         PCI_ANY_ID, PCI_ANY_ID },
12649     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12650         PCI_ANY_ID, PCI_ANY_ID },
12651     /* SSS6200 */
12652     { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12653         PCI_ANY_ID, PCI_ANY_ID },
12654     /* Fury ~ 3004 and 3008 */
12655     { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12656         PCI_ANY_ID, PCI_ANY_ID },
12657     { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12658         PCI_ANY_ID, PCI_ANY_ID },
12659     /* Invader ~ 3108 */
12660     { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12661         PCI_ANY_ID, PCI_ANY_ID },
12662     { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12663         PCI_ANY_ID, PCI_ANY_ID },
12664     { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12665         PCI_ANY_ID, PCI_ANY_ID },
12666     { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12667         PCI_ANY_ID, PCI_ANY_ID },
12668     /* Cutlass ~ 3216 and 3224 */
12669     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12670         PCI_ANY_ID, PCI_ANY_ID },
12671     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12672         PCI_ANY_ID, PCI_ANY_ID },
12673     /* Intruder ~ 3316 and 3324 */
12674     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12675         PCI_ANY_ID, PCI_ANY_ID },
12676     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12677         PCI_ANY_ID, PCI_ANY_ID },
12678     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12679         PCI_ANY_ID, PCI_ANY_ID },
12680     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12681         PCI_ANY_ID, PCI_ANY_ID },
12682     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12683         PCI_ANY_ID, PCI_ANY_ID },
12684     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12685         PCI_ANY_ID, PCI_ANY_ID },
12686     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12687         PCI_ANY_ID, PCI_ANY_ID },
12688     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12689         PCI_ANY_ID, PCI_ANY_ID },
12690     /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12691     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12692         PCI_ANY_ID, PCI_ANY_ID },
12693     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12694         PCI_ANY_ID, PCI_ANY_ID },
12695     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12696         PCI_ANY_ID, PCI_ANY_ID },
12697     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12698         PCI_ANY_ID, PCI_ANY_ID },
12699     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12700         PCI_ANY_ID, PCI_ANY_ID },
12701     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12702         PCI_ANY_ID, PCI_ANY_ID },
12703     /* Mercator ~ 3616*/
12704     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12705         PCI_ANY_ID, PCI_ANY_ID },
12706 
12707     /* Aero SI 0x00E1 Configurable Secure
12708      * 0x00E2 Hard Secure
12709      */
12710     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12711         PCI_ANY_ID, PCI_ANY_ID },
12712     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12713         PCI_ANY_ID, PCI_ANY_ID },
12714 
12715     /*
12716      *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12717      */
12718     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12719         PCI_ANY_ID, PCI_ANY_ID },
12720     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12721         PCI_ANY_ID, PCI_ANY_ID },
12722 
12723     /* Atlas PCIe Switch Management Port */
12724     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12725         PCI_ANY_ID, PCI_ANY_ID },
12726 
12727     /* Sea SI 0x00E5 Configurable Secure
12728      * 0x00E6 Hard Secure
12729      */
12730     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12731         PCI_ANY_ID, PCI_ANY_ID },
12732     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12733         PCI_ANY_ID, PCI_ANY_ID },
12734 
12735     /*
12736      *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12737      */
12738     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12739         PCI_ANY_ID, PCI_ANY_ID },
12740     { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12741         PCI_ANY_ID, PCI_ANY_ID },
12742 
12743     {0}     /* Terminating entry */
12744 };
12745 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12746 
12747 static struct pci_error_handlers _mpt3sas_err_handler = {
12748     .error_detected = scsih_pci_error_detected,
12749     .mmio_enabled   = scsih_pci_mmio_enabled,
12750     .slot_reset = scsih_pci_slot_reset,
12751     .resume     = scsih_pci_resume,
12752 };
12753 
12754 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12755 
12756 static struct pci_driver mpt3sas_driver = {
12757     .name       = MPT3SAS_DRIVER_NAME,
12758     .id_table   = mpt3sas_pci_table,
12759     .probe      = _scsih_probe,
12760     .remove     = scsih_remove,
12761     .shutdown   = scsih_shutdown,
12762     .err_handler    = &_mpt3sas_err_handler,
12763     .driver.pm  = &scsih_pm_ops,
12764 };
12765 
12766 /**
12767  * scsih_init - main entry point for this driver.
12768  *
12769  * Return: 0 success, anything else error.
12770  */
12771 static int
12772 scsih_init(void)
12773 {
12774     mpt2_ids = 0;
12775     mpt3_ids = 0;
12776 
12777     mpt3sas_base_initialize_callback_handler();
12778 
12779      /* queuecommand callback hander */
12780     scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12781 
12782     /* task management callback handler */
12783     tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12784 
12785     /* base internal commands callback handler */
12786     base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12787     port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12788         mpt3sas_port_enable_done);
12789 
12790     /* transport internal commands callback handler */
12791     transport_cb_idx = mpt3sas_base_register_callback_handler(
12792         mpt3sas_transport_done);
12793 
12794     /* scsih internal commands callback handler */
12795     scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12796 
12797     /* configuration page API internal commands callback handler */
12798     config_cb_idx = mpt3sas_base_register_callback_handler(
12799         mpt3sas_config_done);
12800 
12801     /* ctl module callback handler */
12802     ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12803 
12804     tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12805         _scsih_tm_tr_complete);
12806 
12807     tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12808         _scsih_tm_volume_tr_complete);
12809 
12810     tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12811         _scsih_sas_control_complete);
12812 
12813     mpt3sas_init_debugfs();
12814     return 0;
12815 }
12816 
12817 /**
12818  * scsih_exit - exit point for this driver (when it is a module).
12819  *
12820  * Return: 0 success, anything else error.
12821  */
12822 static void
12823 scsih_exit(void)
12824 {
12825 
12826     mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12827     mpt3sas_base_release_callback_handler(tm_cb_idx);
12828     mpt3sas_base_release_callback_handler(base_cb_idx);
12829     mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12830     mpt3sas_base_release_callback_handler(transport_cb_idx);
12831     mpt3sas_base_release_callback_handler(scsih_cb_idx);
12832     mpt3sas_base_release_callback_handler(config_cb_idx);
12833     mpt3sas_base_release_callback_handler(ctl_cb_idx);
12834 
12835     mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12836     mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12837     mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12838 
12839 /* raid transport support */
12840     if (hbas_to_enumerate != 1)
12841         raid_class_release(mpt3sas_raid_template);
12842     if (hbas_to_enumerate != 2)
12843         raid_class_release(mpt2sas_raid_template);
12844     sas_release_transport(mpt3sas_transport_template);
12845     mpt3sas_exit_debugfs();
12846 }
12847 
12848 /**
12849  * _mpt3sas_init - main entry point for this driver.
12850  *
12851  * Return: 0 success, anything else error.
12852  */
12853 static int __init
12854 _mpt3sas_init(void)
12855 {
12856     int error;
12857 
12858     pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12859                     MPT3SAS_DRIVER_VERSION);
12860 
12861     mpt3sas_transport_template =
12862         sas_attach_transport(&mpt3sas_transport_functions);
12863     if (!mpt3sas_transport_template)
12864         return -ENODEV;
12865 
12866     /* No need attach mpt3sas raid functions template
12867      * if hbas_to_enumarate value is one.
12868      */
12869     if (hbas_to_enumerate != 1) {
12870         mpt3sas_raid_template =
12871                 raid_class_attach(&mpt3sas_raid_functions);
12872         if (!mpt3sas_raid_template) {
12873             sas_release_transport(mpt3sas_transport_template);
12874             return -ENODEV;
12875         }
12876     }
12877 
12878     /* No need to attach mpt2sas raid functions template
12879      * if hbas_to_enumarate value is two
12880      */
12881     if (hbas_to_enumerate != 2) {
12882         mpt2sas_raid_template =
12883                 raid_class_attach(&mpt2sas_raid_functions);
12884         if (!mpt2sas_raid_template) {
12885             sas_release_transport(mpt3sas_transport_template);
12886             return -ENODEV;
12887         }
12888     }
12889 
12890     error = scsih_init();
12891     if (error) {
12892         scsih_exit();
12893         return error;
12894     }
12895 
12896     mpt3sas_ctl_init(hbas_to_enumerate);
12897 
12898     error = pci_register_driver(&mpt3sas_driver);
12899     if (error)
12900         scsih_exit();
12901 
12902     return error;
12903 }
12904 
12905 /**
12906  * _mpt3sas_exit - exit point for this driver (when it is a module).
12907  *
12908  */
12909 static void __exit
12910 _mpt3sas_exit(void)
12911 {
12912     pr_info("mpt3sas version %s unloading\n",
12913                 MPT3SAS_DRIVER_VERSION);
12914 
12915     mpt3sas_ctl_exit(hbas_to_enumerate);
12916 
12917     pci_unregister_driver(&mpt3sas_driver);
12918 
12919     scsih_exit();
12920 }
12921 
12922 module_init(_mpt3sas_init);
12923 module_exit(_mpt3sas_exit);