Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *  linux/drivers/scsi/esas2r/esas2r_init.c
0003  *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
0004  *
0005  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
0006  *  (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
0007  *
0008  * This program is free software; you can redistribute it and/or
0009  * modify it under the terms of the GNU General Public License
0010  * as published by the Free Software Foundation; either version 2
0011  * of the License, or (at your option) any later version.
0012  *
0013  * This program is distributed in the hope that it will be useful,
0014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0016  * GNU General Public License for more details.
0017  *
0018  * NO WARRANTY
0019  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
0020  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
0021  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
0022  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
0023  * solely responsible for determining the appropriateness of using and
0024  * distributing the Program and assumes all risks associated with its
0025  * exercise of rights under this Agreement, including but not limited to
0026  * the risks and costs of program errors, damage to or loss of data,
0027  * programs or equipment, and unavailability or interruption of operations.
0028  *
0029  * DISCLAIMER OF LIABILITY
0030  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
0031  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0032  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
0033  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
0034  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
0035  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
0036  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
0037  *
0038  * You should have received a copy of the GNU General Public License
0039  * along with this program; if not, write to the Free Software
0040  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
0041  * USA.
0042  */
0043 
0044 #include "esas2r.h"
0045 
0046 static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
0047                  struct esas2r_mem_desc *mem_desc,
0048                  u32 align)
0049 {
0050     mem_desc->esas2r_param = mem_desc->size + align;
0051     mem_desc->virt_addr = NULL;
0052     mem_desc->phys_addr = 0;
0053     mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
0054                            (size_t)mem_desc->
0055                            esas2r_param,
0056                            (dma_addr_t *)&mem_desc->
0057                            phys_addr,
0058                            GFP_KERNEL);
0059 
0060     if (mem_desc->esas2r_data == NULL) {
0061         esas2r_log(ESAS2R_LOG_CRIT,
0062                "failed to allocate %lu bytes of consistent memory!",
0063                (long
0064                 unsigned
0065                 int)mem_desc->esas2r_param);
0066         return false;
0067     }
0068 
0069     mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
0070     mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
0071     memset(mem_desc->virt_addr, 0, mem_desc->size);
0072     return true;
0073 }
0074 
0075 static void esas2r_initmem_free(struct esas2r_adapter *a,
0076                 struct esas2r_mem_desc *mem_desc)
0077 {
0078     if (mem_desc->virt_addr == NULL)
0079         return;
0080 
0081     /*
0082      * Careful!  phys_addr and virt_addr may have been adjusted from the
0083      * original allocation in order to return the desired alignment.  That
0084      * means we have to use the original address (in esas2r_data) and size
0085      * (esas2r_param) and calculate the original physical address based on
0086      * the difference between the requested and actual allocation size.
0087      */
0088     if (mem_desc->phys_addr) {
0089         int unalign = ((u8 *)mem_desc->virt_addr) -
0090                   ((u8 *)mem_desc->esas2r_data);
0091 
0092         dma_free_coherent(&a->pcid->dev,
0093                   (size_t)mem_desc->esas2r_param,
0094                   mem_desc->esas2r_data,
0095                   (dma_addr_t)(mem_desc->phys_addr - unalign));
0096     } else {
0097         kfree(mem_desc->esas2r_data);
0098     }
0099 
0100     mem_desc->virt_addr = NULL;
0101 }
0102 
0103 static bool alloc_vda_req(struct esas2r_adapter *a,
0104               struct esas2r_request *rq)
0105 {
0106     struct esas2r_mem_desc *memdesc = kzalloc(
0107         sizeof(struct esas2r_mem_desc), GFP_KERNEL);
0108 
0109     if (memdesc == NULL) {
0110         esas2r_hdebug("could not alloc mem for vda request memdesc\n");
0111         return false;
0112     }
0113 
0114     memdesc->size = sizeof(union atto_vda_req) +
0115             ESAS2R_DATA_BUF_LEN;
0116 
0117     if (!esas2r_initmem_alloc(a, memdesc, 256)) {
0118         esas2r_hdebug("could not alloc mem for vda request\n");
0119         kfree(memdesc);
0120         return false;
0121     }
0122 
0123     a->num_vrqs++;
0124     list_add(&memdesc->next_desc, &a->vrq_mds_head);
0125 
0126     rq->vrq_md = memdesc;
0127     rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
0128     rq->vrq->scsi.handle = a->num_vrqs;
0129 
0130     return true;
0131 }
0132 
0133 static void esas2r_unmap_regions(struct esas2r_adapter *a)
0134 {
0135     if (a->regs)
0136         iounmap((void __iomem *)a->regs);
0137 
0138     a->regs = NULL;
0139 
0140     pci_release_region(a->pcid, 2);
0141 
0142     if (a->data_window)
0143         iounmap((void __iomem *)a->data_window);
0144 
0145     a->data_window = NULL;
0146 
0147     pci_release_region(a->pcid, 0);
0148 }
0149 
0150 static int esas2r_map_regions(struct esas2r_adapter *a)
0151 {
0152     int error;
0153 
0154     a->regs = NULL;
0155     a->data_window = NULL;
0156 
0157     error = pci_request_region(a->pcid, 2, a->name);
0158     if (error != 0) {
0159         esas2r_log(ESAS2R_LOG_CRIT,
0160                "pci_request_region(2) failed, error %d",
0161                error);
0162 
0163         return error;
0164     }
0165 
0166     a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
0167                       pci_resource_len(a->pcid, 2));
0168     if (a->regs == NULL) {
0169         esas2r_log(ESAS2R_LOG_CRIT,
0170                "ioremap failed for regs mem region\n");
0171         pci_release_region(a->pcid, 2);
0172         return -EFAULT;
0173     }
0174 
0175     error = pci_request_region(a->pcid, 0, a->name);
0176     if (error != 0) {
0177         esas2r_log(ESAS2R_LOG_CRIT,
0178                "pci_request_region(2) failed, error %d",
0179                error);
0180         esas2r_unmap_regions(a);
0181         return error;
0182     }
0183 
0184     a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
0185                                     0),
0186                          pci_resource_len(a->pcid, 0));
0187     if (a->data_window == NULL) {
0188         esas2r_log(ESAS2R_LOG_CRIT,
0189                "ioremap failed for data_window mem region\n");
0190         esas2r_unmap_regions(a);
0191         return -EFAULT;
0192     }
0193 
0194     return 0;
0195 }
0196 
0197 static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
0198 {
0199     int i;
0200 
0201     /* Set up interrupt mode based on the requested value */
0202     switch (intr_mode) {
0203     case INTR_MODE_LEGACY:
0204 use_legacy_interrupts:
0205         a->intr_mode = INTR_MODE_LEGACY;
0206         break;
0207 
0208     case INTR_MODE_MSI:
0209         i = pci_enable_msi(a->pcid);
0210         if (i != 0) {
0211             esas2r_log(ESAS2R_LOG_WARN,
0212                    "failed to enable MSI for adapter %d, "
0213                    "falling back to legacy interrupts "
0214                    "(err=%d)", a->index,
0215                    i);
0216             goto use_legacy_interrupts;
0217         }
0218         a->intr_mode = INTR_MODE_MSI;
0219         set_bit(AF2_MSI_ENABLED, &a->flags2);
0220         break;
0221 
0222 
0223     default:
0224         esas2r_log(ESAS2R_LOG_WARN,
0225                "unknown interrupt_mode %d requested, "
0226                "falling back to legacy interrupt",
0227                interrupt_mode);
0228         goto use_legacy_interrupts;
0229     }
0230 }
0231 
0232 static void esas2r_claim_interrupts(struct esas2r_adapter *a)
0233 {
0234     unsigned long flags = 0;
0235 
0236     if (a->intr_mode == INTR_MODE_LEGACY)
0237         flags |= IRQF_SHARED;
0238 
0239     esas2r_log(ESAS2R_LOG_INFO,
0240            "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
0241            a->pcid->irq, a, a->name, flags);
0242 
0243     if (request_irq(a->pcid->irq,
0244             (a->intr_mode ==
0245              INTR_MODE_LEGACY) ? esas2r_interrupt :
0246             esas2r_msi_interrupt,
0247             flags,
0248             a->name,
0249             a)) {
0250         esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
0251                a->pcid->irq);
0252         return;
0253     }
0254 
0255     set_bit(AF2_IRQ_CLAIMED, &a->flags2);
0256     esas2r_log(ESAS2R_LOG_INFO,
0257            "claimed IRQ %d flags: 0x%lx",
0258            a->pcid->irq, flags);
0259 }
0260 
0261 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
0262             int index)
0263 {
0264     struct esas2r_adapter *a;
0265     u64 bus_addr = 0;
0266     int i;
0267     void *next_uncached;
0268     struct esas2r_request *first_request, *last_request;
0269     bool dma64 = false;
0270 
0271     if (index >= MAX_ADAPTERS) {
0272         esas2r_log(ESAS2R_LOG_CRIT,
0273                "tried to init invalid adapter index %u!",
0274                index);
0275         return 0;
0276     }
0277 
0278     if (esas2r_adapters[index]) {
0279         esas2r_log(ESAS2R_LOG_CRIT,
0280                "tried to init existing adapter index %u!",
0281                index);
0282         return 0;
0283     }
0284 
0285     a = (struct esas2r_adapter *)host->hostdata;
0286     memset(a, 0, sizeof(struct esas2r_adapter));
0287     a->pcid = pcid;
0288     a->host = host;
0289 
0290     if (sizeof(dma_addr_t) > 4 &&
0291         dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
0292         !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
0293         dma64 = true;
0294 
0295     if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
0296         esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
0297         esas2r_kill_adapter(index);
0298         return 0;
0299     }
0300 
0301     esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
0302                "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
0303 
0304     esas2r_adapters[index] = a;
0305     sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
0306     esas2r_debug("new adapter %p, name %s", a, a->name);
0307     spin_lock_init(&a->request_lock);
0308     spin_lock_init(&a->fw_event_lock);
0309     mutex_init(&a->fm_api_mutex);
0310     mutex_init(&a->fs_api_mutex);
0311     sema_init(&a->nvram_semaphore, 1);
0312 
0313     esas2r_fw_event_off(a);
0314     snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
0315          a->index);
0316     a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
0317 
0318     init_waitqueue_head(&a->buffered_ioctl_waiter);
0319     init_waitqueue_head(&a->nvram_waiter);
0320     init_waitqueue_head(&a->fm_api_waiter);
0321     init_waitqueue_head(&a->fs_api_waiter);
0322     init_waitqueue_head(&a->vda_waiter);
0323 
0324     INIT_LIST_HEAD(&a->general_req.req_list);
0325     INIT_LIST_HEAD(&a->active_list);
0326     INIT_LIST_HEAD(&a->defer_list);
0327     INIT_LIST_HEAD(&a->free_sg_list_head);
0328     INIT_LIST_HEAD(&a->avail_request);
0329     INIT_LIST_HEAD(&a->vrq_mds_head);
0330     INIT_LIST_HEAD(&a->fw_event_list);
0331 
0332     first_request = (struct esas2r_request *)((u8 *)(a + 1));
0333 
0334     for (last_request = first_request, i = 1; i < num_requests;
0335          last_request++, i++) {
0336         INIT_LIST_HEAD(&last_request->req_list);
0337         list_add_tail(&last_request->comp_list, &a->avail_request);
0338         if (!alloc_vda_req(a, last_request)) {
0339             esas2r_log(ESAS2R_LOG_CRIT,
0340                    "failed to allocate a VDA request!");
0341             esas2r_kill_adapter(index);
0342             return 0;
0343         }
0344     }
0345 
0346     esas2r_debug("requests: %p to %p (%d, %d)", first_request,
0347              last_request,
0348              sizeof(*first_request),
0349              num_requests);
0350 
0351     if (esas2r_map_regions(a) != 0) {
0352         esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
0353         esas2r_kill_adapter(index);
0354         return 0;
0355     }
0356 
0357     a->index = index;
0358 
0359     /* interrupts will be disabled until we are done with init */
0360     atomic_inc(&a->dis_ints_cnt);
0361     atomic_inc(&a->disable_cnt);
0362     set_bit(AF_CHPRST_PENDING, &a->flags);
0363     set_bit(AF_DISC_PENDING, &a->flags);
0364     set_bit(AF_FIRST_INIT, &a->flags);
0365     set_bit(AF_LEGACY_SGE_MODE, &a->flags);
0366 
0367     a->init_msg = ESAS2R_INIT_MSG_START;
0368     a->max_vdareq_size = 128;
0369     a->build_sgl = esas2r_build_sg_list_sge;
0370 
0371     esas2r_setup_interrupts(a, interrupt_mode);
0372 
0373     a->uncached_size = esas2r_get_uncached_size(a);
0374     a->uncached = dma_alloc_coherent(&pcid->dev,
0375                      (size_t)a->uncached_size,
0376                      (dma_addr_t *)&bus_addr,
0377                      GFP_KERNEL);
0378     if (a->uncached == NULL) {
0379         esas2r_log(ESAS2R_LOG_CRIT,
0380                "failed to allocate %d bytes of consistent memory!",
0381                a->uncached_size);
0382         esas2r_kill_adapter(index);
0383         return 0;
0384     }
0385 
0386     a->uncached_phys = bus_addr;
0387 
0388     esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
0389              a->uncached_size,
0390              a->uncached,
0391              upper_32_bits(bus_addr),
0392              lower_32_bits(bus_addr));
0393     memset(a->uncached, 0, a->uncached_size);
0394     next_uncached = a->uncached;
0395 
0396     if (!esas2r_init_adapter_struct(a,
0397                     &next_uncached)) {
0398         esas2r_log(ESAS2R_LOG_CRIT,
0399                "failed to initialize adapter structure (2)!");
0400         esas2r_kill_adapter(index);
0401         return 0;
0402     }
0403 
0404     tasklet_init(&a->tasklet,
0405              esas2r_adapter_tasklet,
0406              (unsigned long)a);
0407 
0408     /*
0409      * Disable chip interrupts to prevent spurious interrupts
0410      * until we claim the IRQ.
0411      */
0412     esas2r_disable_chip_interrupts(a);
0413     esas2r_check_adapter(a);
0414 
0415     if (!esas2r_init_adapter_hw(a, true)) {
0416         esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
0417     } else {
0418         esas2r_debug("esas2r_init_adapter ok");
0419     }
0420 
0421     esas2r_claim_interrupts(a);
0422 
0423     if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
0424         esas2r_enable_chip_interrupts(a);
0425 
0426     set_bit(AF2_INIT_DONE, &a->flags2);
0427     if (!test_bit(AF_DEGRADED_MODE, &a->flags))
0428         esas2r_kickoff_timer(a);
0429     esas2r_debug("esas2r_init_adapter done for %p (%d)",
0430              a, a->disable_cnt);
0431 
0432     return 1;
0433 }
0434 
0435 static void esas2r_adapter_power_down(struct esas2r_adapter *a,
0436                       int power_management)
0437 {
0438     struct esas2r_mem_desc *memdesc, *next;
0439 
0440     if ((test_bit(AF2_INIT_DONE, &a->flags2))
0441         &&  (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
0442         if (!power_management) {
0443             del_timer_sync(&a->timer);
0444             tasklet_kill(&a->tasklet);
0445         }
0446         esas2r_power_down(a);
0447 
0448         /*
0449          * There are versions of firmware that do not handle the sync
0450          * cache command correctly.  Stall here to ensure that the
0451          * cache is lazily flushed.
0452          */
0453         mdelay(500);
0454         esas2r_debug("chip halted");
0455     }
0456 
0457     /* Remove sysfs binary files */
0458     if (a->sysfs_fw_created) {
0459         sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
0460         a->sysfs_fw_created = 0;
0461     }
0462 
0463     if (a->sysfs_fs_created) {
0464         sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
0465         a->sysfs_fs_created = 0;
0466     }
0467 
0468     if (a->sysfs_vda_created) {
0469         sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
0470         a->sysfs_vda_created = 0;
0471     }
0472 
0473     if (a->sysfs_hw_created) {
0474         sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
0475         a->sysfs_hw_created = 0;
0476     }
0477 
0478     if (a->sysfs_live_nvram_created) {
0479         sysfs_remove_bin_file(&a->host->shost_dev.kobj,
0480                       &bin_attr_live_nvram);
0481         a->sysfs_live_nvram_created = 0;
0482     }
0483 
0484     if (a->sysfs_default_nvram_created) {
0485         sysfs_remove_bin_file(&a->host->shost_dev.kobj,
0486                       &bin_attr_default_nvram);
0487         a->sysfs_default_nvram_created = 0;
0488     }
0489 
0490     /* Clean up interrupts */
0491     if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
0492         esas2r_log_dev(ESAS2R_LOG_INFO,
0493                    &(a->pcid->dev),
0494                    "free_irq(%d) called", a->pcid->irq);
0495 
0496         free_irq(a->pcid->irq, a);
0497         esas2r_debug("IRQ released");
0498         clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
0499     }
0500 
0501     if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
0502         pci_disable_msi(a->pcid);
0503         clear_bit(AF2_MSI_ENABLED, &a->flags2);
0504         esas2r_debug("MSI disabled");
0505     }
0506 
0507     if (a->inbound_list_md.virt_addr)
0508         esas2r_initmem_free(a, &a->inbound_list_md);
0509 
0510     if (a->outbound_list_md.virt_addr)
0511         esas2r_initmem_free(a, &a->outbound_list_md);
0512 
0513     list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
0514                  next_desc) {
0515         esas2r_initmem_free(a, memdesc);
0516     }
0517 
0518     /* Following frees everything allocated via alloc_vda_req */
0519     list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
0520         esas2r_initmem_free(a, memdesc);
0521         list_del(&memdesc->next_desc);
0522         kfree(memdesc);
0523     }
0524 
0525     kfree(a->first_ae_req);
0526     a->first_ae_req = NULL;
0527 
0528     kfree(a->sg_list_mds);
0529     a->sg_list_mds = NULL;
0530 
0531     kfree(a->req_table);
0532     a->req_table = NULL;
0533 
0534     if (a->regs) {
0535         esas2r_unmap_regions(a);
0536         a->regs = NULL;
0537         a->data_window = NULL;
0538         esas2r_debug("regions unmapped");
0539     }
0540 }
0541 
0542 /* Release/free allocated resources for specified adapters. */
0543 void esas2r_kill_adapter(int i)
0544 {
0545     struct esas2r_adapter *a = esas2r_adapters[i];
0546 
0547     if (a) {
0548         unsigned long flags;
0549         struct workqueue_struct *wq;
0550         esas2r_debug("killing adapter %p [%d] ", a, i);
0551         esas2r_fw_event_off(a);
0552         esas2r_adapter_power_down(a, 0);
0553         if (esas2r_buffered_ioctl &&
0554             (a->pcid == esas2r_buffered_ioctl_pcid)) {
0555             dma_free_coherent(&a->pcid->dev,
0556                       (size_t)esas2r_buffered_ioctl_size,
0557                       esas2r_buffered_ioctl,
0558                       esas2r_buffered_ioctl_addr);
0559             esas2r_buffered_ioctl = NULL;
0560         }
0561 
0562         if (a->vda_buffer) {
0563             dma_free_coherent(&a->pcid->dev,
0564                       (size_t)VDA_MAX_BUFFER_SIZE,
0565                       a->vda_buffer,
0566                       (dma_addr_t)a->ppvda_buffer);
0567             a->vda_buffer = NULL;
0568         }
0569         if (a->fs_api_buffer) {
0570             dma_free_coherent(&a->pcid->dev,
0571                       (size_t)a->fs_api_buffer_size,
0572                       a->fs_api_buffer,
0573                       (dma_addr_t)a->ppfs_api_buffer);
0574             a->fs_api_buffer = NULL;
0575         }
0576 
0577         kfree(a->local_atto_ioctl);
0578         a->local_atto_ioctl = NULL;
0579 
0580         spin_lock_irqsave(&a->fw_event_lock, flags);
0581         wq = a->fw_event_q;
0582         a->fw_event_q = NULL;
0583         spin_unlock_irqrestore(&a->fw_event_lock, flags);
0584         if (wq)
0585             destroy_workqueue(wq);
0586 
0587         if (a->uncached) {
0588             dma_free_coherent(&a->pcid->dev,
0589                       (size_t)a->uncached_size,
0590                       a->uncached,
0591                       (dma_addr_t)a->uncached_phys);
0592             a->uncached = NULL;
0593             esas2r_debug("uncached area freed");
0594         }
0595 
0596         esas2r_log_dev(ESAS2R_LOG_INFO,
0597                    &(a->pcid->dev),
0598                    "pci_disable_device() called.  msix_enabled: %d "
0599                    "msi_enabled: %d irq: %d pin: %d",
0600                    a->pcid->msix_enabled,
0601                    a->pcid->msi_enabled,
0602                    a->pcid->irq,
0603                    a->pcid->pin);
0604 
0605         esas2r_log_dev(ESAS2R_LOG_INFO,
0606                    &(a->pcid->dev),
0607                    "before pci_disable_device() enable_cnt: %d",
0608                    a->pcid->enable_cnt.counter);
0609 
0610         pci_disable_device(a->pcid);
0611         esas2r_log_dev(ESAS2R_LOG_INFO,
0612                    &(a->pcid->dev),
0613                    "after pci_disable_device() enable_cnt: %d",
0614                    a->pcid->enable_cnt.counter);
0615 
0616         esas2r_log_dev(ESAS2R_LOG_INFO,
0617                    &(a->pcid->dev),
0618                    "pci_set_drv_data(%p, NULL) called",
0619                    a->pcid);
0620 
0621         pci_set_drvdata(a->pcid, NULL);
0622         esas2r_adapters[i] = NULL;
0623 
0624         if (test_bit(AF2_INIT_DONE, &a->flags2)) {
0625             clear_bit(AF2_INIT_DONE, &a->flags2);
0626 
0627             set_bit(AF_DEGRADED_MODE, &a->flags);
0628 
0629             esas2r_log_dev(ESAS2R_LOG_INFO,
0630                        &(a->host->shost_gendev),
0631                        "scsi_remove_host() called");
0632 
0633             scsi_remove_host(a->host);
0634 
0635             esas2r_log_dev(ESAS2R_LOG_INFO,
0636                        &(a->host->shost_gendev),
0637                        "scsi_host_put() called");
0638 
0639             scsi_host_put(a->host);
0640         }
0641     }
0642 }
0643 
0644 static int __maybe_unused esas2r_suspend(struct device *dev)
0645 {
0646     struct Scsi_Host *host = dev_get_drvdata(dev);
0647     struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
0648 
0649     esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()");
0650     if (!a)
0651         return -ENODEV;
0652 
0653     esas2r_adapter_power_down(a, 1);
0654     esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0");
0655     return 0;
0656 }
0657 
0658 static int __maybe_unused esas2r_resume(struct device *dev)
0659 {
0660     struct Scsi_Host *host = dev_get_drvdata(dev);
0661     struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
0662     int rez = 0;
0663 
0664     esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()");
0665 
0666     if (!a) {
0667         rez = -ENODEV;
0668         goto error_exit;
0669     }
0670 
0671     if (esas2r_map_regions(a) != 0) {
0672         esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
0673         rez = -ENOMEM;
0674         goto error_exit;
0675     }
0676 
0677     /* Set up interupt mode */
0678     esas2r_setup_interrupts(a, a->intr_mode);
0679 
0680     /*
0681      * Disable chip interrupts to prevent spurious interrupts until we
0682      * claim the IRQ.
0683      */
0684     esas2r_disable_chip_interrupts(a);
0685     if (!esas2r_power_up(a, true)) {
0686         esas2r_debug("yikes, esas2r_power_up failed");
0687         rez = -ENOMEM;
0688         goto error_exit;
0689     }
0690 
0691     esas2r_claim_interrupts(a);
0692 
0693     if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
0694         /*
0695          * Now that system interrupt(s) are claimed, we can enable
0696          * chip interrupts.
0697          */
0698         esas2r_enable_chip_interrupts(a);
0699         esas2r_kickoff_timer(a);
0700     } else {
0701         esas2r_debug("yikes, unable to claim IRQ");
0702         esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
0703         rez = -ENOMEM;
0704         goto error_exit;
0705     }
0706 
0707 error_exit:
0708     esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d",
0709                rez);
0710     return rez;
0711 }
0712 
0713 SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume);
0714 
0715 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
0716 {
0717     set_bit(AF_DEGRADED_MODE, &a->flags);
0718     esas2r_log(ESAS2R_LOG_CRIT,
0719            "setting adapter to degraded mode: %s\n", error_str);
0720     return false;
0721 }
0722 
0723 u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
0724 {
0725     return sizeof(struct esas2r_sas_nvram)
0726            + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
0727            + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
0728            + 8
0729            + (num_sg_lists * (u16)sgl_page_size)
0730            + ALIGN((num_requests + num_ae_requests + 1 +
0731             ESAS2R_LIST_EXTRA) *
0732                sizeof(struct esas2r_inbound_list_source_entry),
0733                8)
0734            + ALIGN((num_requests + num_ae_requests + 1 +
0735             ESAS2R_LIST_EXTRA) *
0736                sizeof(struct atto_vda_ob_rsp), 8)
0737            + 256; /* VDA request and buffer align */
0738 }
0739 
0740 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
0741 {
0742     if (pci_is_pcie(a->pcid)) {
0743         u16 devcontrol;
0744 
0745         pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
0746 
0747         if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
0748              PCI_EXP_DEVCTL_READRQ_512B) {
0749             esas2r_log(ESAS2R_LOG_INFO,
0750                    "max read request size > 512B");
0751 
0752             devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
0753             devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
0754             pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
0755                            devcontrol);
0756         }
0757     }
0758 }
0759 
0760 /*
0761  * Determine the organization of the uncached data area and
0762  * finish initializing the adapter structure
0763  */
0764 bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
0765                 void **uncached_area)
0766 {
0767     u32 i;
0768     u8 *high;
0769     struct esas2r_inbound_list_source_entry *element;
0770     struct esas2r_request *rq;
0771     struct esas2r_mem_desc *sgl;
0772 
0773     spin_lock_init(&a->sg_list_lock);
0774     spin_lock_init(&a->mem_lock);
0775     spin_lock_init(&a->queue_lock);
0776 
0777     a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
0778 
0779     if (!alloc_vda_req(a, &a->general_req)) {
0780         esas2r_hdebug(
0781             "failed to allocate a VDA request for the general req!");
0782         return false;
0783     }
0784 
0785     /* allocate requests for asynchronous events */
0786     a->first_ae_req =
0787         kcalloc(num_ae_requests, sizeof(struct esas2r_request),
0788             GFP_KERNEL);
0789 
0790     if (a->first_ae_req == NULL) {
0791         esas2r_log(ESAS2R_LOG_CRIT,
0792                "failed to allocate memory for asynchronous events");
0793         return false;
0794     }
0795 
0796     /* allocate the S/G list memory descriptors */
0797     a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc),
0798                  GFP_KERNEL);
0799 
0800     if (a->sg_list_mds == NULL) {
0801         esas2r_log(ESAS2R_LOG_CRIT,
0802                "failed to allocate memory for s/g list descriptors");
0803         return false;
0804     }
0805 
0806     /* allocate the request table */
0807     a->req_table =
0808         kcalloc(num_requests + num_ae_requests + 1,
0809             sizeof(struct esas2r_request *),
0810             GFP_KERNEL);
0811 
0812     if (a->req_table == NULL) {
0813         esas2r_log(ESAS2R_LOG_CRIT,
0814                "failed to allocate memory for the request table");
0815         return false;
0816     }
0817 
0818     /* initialize PCI configuration space */
0819     esas2r_init_pci_cfg_space(a);
0820 
0821     /*
0822      * the thunder_stream boards all have a serial flash part that has a
0823      * different base address on the AHB bus.
0824      */
0825     if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
0826         && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
0827         a->flags2 |= AF2_THUNDERBOLT;
0828 
0829     if (test_bit(AF2_THUNDERBOLT, &a->flags2))
0830         a->flags2 |= AF2_SERIAL_FLASH;
0831 
0832     if (a->pcid->subsystem_device == ATTO_TLSH_1068)
0833         a->flags2 |= AF2_THUNDERLINK;
0834 
0835     /* Uncached Area */
0836     high = (u8 *)*uncached_area;
0837 
0838     /* initialize the scatter/gather table pages */
0839 
0840     for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
0841         sgl->size = sgl_page_size;
0842 
0843         list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
0844 
0845         if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
0846             /* Allow the driver to load if the minimum count met. */
0847             if (i < NUM_SGL_MIN)
0848                 return false;
0849             break;
0850         }
0851     }
0852 
0853     /* compute the size of the lists */
0854     a->list_size = num_requests + ESAS2R_LIST_EXTRA;
0855 
0856     /* allocate the inbound list */
0857     a->inbound_list_md.size = a->list_size *
0858                   sizeof(struct
0859                      esas2r_inbound_list_source_entry);
0860 
0861     if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
0862         esas2r_hdebug("failed to allocate IB list");
0863         return false;
0864     }
0865 
0866     /* allocate the outbound list */
0867     a->outbound_list_md.size = a->list_size *
0868                    sizeof(struct atto_vda_ob_rsp);
0869 
0870     if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
0871                   ESAS2R_LIST_ALIGN)) {
0872         esas2r_hdebug("failed to allocate IB list");
0873         return false;
0874     }
0875 
0876     /* allocate the NVRAM structure */
0877     a->nvram = (struct esas2r_sas_nvram *)high;
0878     high += sizeof(struct esas2r_sas_nvram);
0879 
0880     /* allocate the discovery buffer */
0881     a->disc_buffer = high;
0882     high += ESAS2R_DISC_BUF_LEN;
0883     high = PTR_ALIGN(high, 8);
0884 
0885     /* allocate the outbound list copy pointer */
0886     a->outbound_copy = (u32 volatile *)high;
0887     high += sizeof(u32);
0888 
0889     if (!test_bit(AF_NVR_VALID, &a->flags))
0890         esas2r_nvram_set_defaults(a);
0891 
0892     /* update the caller's uncached memory area pointer */
0893     *uncached_area = (void *)high;
0894 
0895     /* initialize the allocated memory */
0896     if (test_bit(AF_FIRST_INIT, &a->flags)) {
0897         esas2r_targ_db_initialize(a);
0898 
0899         /* prime parts of the inbound list */
0900         element =
0901             (struct esas2r_inbound_list_source_entry *)a->
0902             inbound_list_md.
0903             virt_addr;
0904 
0905         for (i = 0; i < a->list_size; i++) {
0906             element->address = 0;
0907             element->reserved = 0;
0908             element->length = cpu_to_le32(HWILSE_INTERFACE_F0
0909                               | (sizeof(union
0910                                 atto_vda_req)
0911                              /
0912                              sizeof(u32)));
0913             element++;
0914         }
0915 
0916         /* init the AE requests */
0917         for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
0918              i++) {
0919             INIT_LIST_HEAD(&rq->req_list);
0920             if (!alloc_vda_req(a, rq)) {
0921                 esas2r_hdebug(
0922                     "failed to allocate a VDA request!");
0923                 return false;
0924             }
0925 
0926             esas2r_rq_init_request(rq, a);
0927 
0928             /* override the completion function */
0929             rq->comp_cb = esas2r_ae_complete;
0930         }
0931     }
0932 
0933     return true;
0934 }
0935 
0936 /* This code will verify that the chip is operational. */
0937 bool esas2r_check_adapter(struct esas2r_adapter *a)
0938 {
0939     u32 starttime;
0940     u32 doorbell;
0941     u64 ppaddr;
0942     u32 dw;
0943 
0944     /*
0945      * if the chip reset detected flag is set, we can bypass a bunch of
0946      * stuff.
0947      */
0948     if (test_bit(AF_CHPRST_DETECTED, &a->flags))
0949         goto skip_chip_reset;
0950 
0951     /*
0952      * BEFORE WE DO ANYTHING, disable the chip interrupts!  the boot driver
0953      * may have left them enabled or we may be recovering from a fault.
0954      */
0955     esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
0956     esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
0957 
0958     /*
0959      * wait for the firmware to become ready by forcing an interrupt and
0960      * waiting for a response.
0961      */
0962     starttime = jiffies_to_msecs(jiffies);
0963 
0964     while (true) {
0965         esas2r_force_interrupt(a);
0966         doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
0967         if (doorbell == 0xFFFFFFFF) {
0968             /*
0969              * Give the firmware up to two seconds to enable
0970              * register access after a reset.
0971              */
0972             if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
0973                 return esas2r_set_degraded_mode(a,
0974                                 "unable to access registers");
0975         } else if (doorbell & DRBL_FORCE_INT) {
0976             u32 ver = (doorbell & DRBL_FW_VER_MSK);
0977 
0978             /*
0979              * This driver supports version 0 and version 1 of
0980              * the API
0981              */
0982             esas2r_write_register_dword(a, MU_DOORBELL_OUT,
0983                             doorbell);
0984 
0985             if (ver == DRBL_FW_VER_0) {
0986                 set_bit(AF_LEGACY_SGE_MODE, &a->flags);
0987 
0988                 a->max_vdareq_size = 128;
0989                 a->build_sgl = esas2r_build_sg_list_sge;
0990             } else if (ver == DRBL_FW_VER_1) {
0991                 clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
0992 
0993                 a->max_vdareq_size = 1024;
0994                 a->build_sgl = esas2r_build_sg_list_prd;
0995             } else {
0996                 return esas2r_set_degraded_mode(a,
0997                                 "unknown firmware version");
0998             }
0999             break;
1000         }
1001 
1002         schedule_timeout_interruptible(msecs_to_jiffies(100));
1003 
1004         if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
1005             esas2r_hdebug("FW ready TMO");
1006             esas2r_bugon();
1007 
1008             return esas2r_set_degraded_mode(a,
1009                             "firmware start has timed out");
1010         }
1011     }
1012 
1013     /* purge any asynchronous events since we will repost them later */
1014     esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
1015     starttime = jiffies_to_msecs(jiffies);
1016 
1017     while (true) {
1018         doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1019         if (doorbell & DRBL_MSG_IFC_DOWN) {
1020             esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1021                             doorbell);
1022             break;
1023         }
1024 
1025         schedule_timeout_interruptible(msecs_to_jiffies(50));
1026 
1027         if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1028             esas2r_hdebug("timeout waiting for interface down");
1029             break;
1030         }
1031     }
1032 skip_chip_reset:
1033     /*
1034      * first things first, before we go changing any of these registers
1035      * disable the communication lists.
1036      */
1037     dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1038     dw &= ~MU_ILC_ENABLE;
1039     esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1040     dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1041     dw &= ~MU_OLC_ENABLE;
1042     esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1043 
1044     /* configure the communication list addresses */
1045     ppaddr = a->inbound_list_md.phys_addr;
1046     esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
1047                     lower_32_bits(ppaddr));
1048     esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
1049                     upper_32_bits(ppaddr));
1050     ppaddr = a->outbound_list_md.phys_addr;
1051     esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
1052                     lower_32_bits(ppaddr));
1053     esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
1054                     upper_32_bits(ppaddr));
1055     ppaddr = a->uncached_phys +
1056          ((u8 *)a->outbound_copy - a->uncached);
1057     esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
1058                     lower_32_bits(ppaddr));
1059     esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
1060                     upper_32_bits(ppaddr));
1061 
1062     /* reset the read and write pointers */
1063     *a->outbound_copy =
1064         a->last_write =
1065             a->last_read = a->list_size - 1;
1066     set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
1067     esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
1068                     a->last_write);
1069     esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
1070                     a->last_write);
1071     esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
1072                     a->last_write);
1073     esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
1074                     MU_OLW_TOGGLE | a->last_write);
1075 
1076     /* configure the interface select fields */
1077     dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
1078     dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
1079     esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
1080                     (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
1081     dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
1082     dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
1083     esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
1084                     (dw | MU_OLIC_LIST_F0 |
1085                      MU_OLIC_SOURCE_DDR));
1086 
1087     /* finish configuring the communication lists */
1088     dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1089     dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
1090     dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
1091           | (a->list_size << MU_ILC_NUMBER_SHIFT);
1092     esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1093     dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1094     dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
1095     dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
1096     esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1097 
1098     /*
1099      * notify the firmware that we're done setting up the communication
1100      * list registers.  wait here until the firmware is done configuring
1101      * its lists.  it will signal that it is done by enabling the lists.
1102      */
1103     esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
1104     starttime = jiffies_to_msecs(jiffies);
1105 
1106     while (true) {
1107         doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1108         if (doorbell & DRBL_MSG_IFC_INIT) {
1109             esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1110                             doorbell);
1111             break;
1112         }
1113 
1114         schedule_timeout_interruptible(msecs_to_jiffies(100));
1115 
1116         if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1117             esas2r_hdebug(
1118                 "timeout waiting for communication list init");
1119             esas2r_bugon();
1120             return esas2r_set_degraded_mode(a,
1121                             "timeout waiting for communication list init");
1122         }
1123     }
1124 
1125     /*
1126      * flag whether the firmware supports the power down doorbell.  we
1127      * determine this by reading the inbound doorbell enable mask.
1128      */
1129     doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
1130     if (doorbell & DRBL_POWER_DOWN)
1131         set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1132     else
1133         clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1134 
1135     /*
1136      * enable assertion of outbound queue and doorbell interrupts in the
1137      * main interrupt cause register.
1138      */
1139     esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
1140     esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
1141     return true;
1142 }
1143 
1144 /* Process the initialization message just completed and format the next one. */
1145 static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1146                    struct esas2r_request *rq)
1147 {
1148     u32 msg = a->init_msg;
1149     struct atto_vda_cfg_init *ci;
1150 
1151     a->init_msg = 0;
1152 
1153     switch (msg) {
1154     case ESAS2R_INIT_MSG_START:
1155     case ESAS2R_INIT_MSG_REINIT:
1156     {
1157         esas2r_hdebug("CFG init");
1158         esas2r_build_cfg_req(a,
1159                      rq,
1160                      VDA_CFG_INIT,
1161                      0,
1162                      NULL);
1163         ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
1164         ci->sgl_page_size = cpu_to_le32(sgl_page_size);
1165         /* firmware interface overflows in y2106 */
1166         ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
1167         rq->flags |= RF_FAILURE_OK;
1168         a->init_msg = ESAS2R_INIT_MSG_INIT;
1169         break;
1170     }
1171 
1172     case ESAS2R_INIT_MSG_INIT:
1173         if (rq->req_stat == RS_SUCCESS) {
1174             u32 major;
1175             u32 minor;
1176             u16 fw_release;
1177 
1178             a->fw_version = le16_to_cpu(
1179                 rq->func_rsp.cfg_rsp.vda_version);
1180             a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
1181             fw_release = le16_to_cpu(
1182                 rq->func_rsp.cfg_rsp.fw_release);
1183             major = LOBYTE(fw_release);
1184             minor = HIBYTE(fw_release);
1185             a->fw_version += (major << 16) + (minor << 24);
1186         } else {
1187             esas2r_hdebug("FAILED");
1188         }
1189 
1190         /*
1191          * the 2.71 and earlier releases of R6xx firmware did not error
1192          * unsupported config requests correctly.
1193          */
1194 
1195         if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
1196             || (be32_to_cpu(a->fw_version) > 0x00524702)) {
1197             esas2r_hdebug("CFG get init");
1198             esas2r_build_cfg_req(a,
1199                          rq,
1200                          VDA_CFG_GET_INIT2,
1201                          sizeof(struct atto_vda_cfg_init),
1202                          NULL);
1203 
1204             rq->vrq->cfg.sg_list_offset = offsetof(
1205                 struct atto_vda_cfg_req,
1206                 data.sge);
1207             rq->vrq->cfg.data.prde.ctl_len =
1208                 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
1209             rq->vrq->cfg.data.prde.address = cpu_to_le64(
1210                 rq->vrq_md->phys_addr +
1211                 sizeof(union atto_vda_req));
1212             rq->flags |= RF_FAILURE_OK;
1213             a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
1214             break;
1215         }
1216         fallthrough;
1217 
1218     case ESAS2R_INIT_MSG_GET_INIT:
1219         if (msg == ESAS2R_INIT_MSG_GET_INIT) {
1220             ci = (struct atto_vda_cfg_init *)rq->data_buf;
1221             if (rq->req_stat == RS_SUCCESS) {
1222                 a->num_targets_backend =
1223                     le32_to_cpu(ci->num_targets_backend);
1224                 a->ioctl_tunnel =
1225                     le32_to_cpu(ci->ioctl_tunnel);
1226             } else {
1227                 esas2r_hdebug("FAILED");
1228             }
1229         }
1230         fallthrough;
1231 
1232     default:
1233         rq->req_stat = RS_SUCCESS;
1234         return false;
1235     }
1236     return true;
1237 }
1238 
1239 /*
1240  * Perform initialization messages via the request queue.  Messages are
1241  * performed with interrupts disabled.
1242  */
1243 bool esas2r_init_msgs(struct esas2r_adapter *a)
1244 {
1245     bool success = true;
1246     struct esas2r_request *rq = &a->general_req;
1247 
1248     esas2r_rq_init_request(rq, a);
1249     rq->comp_cb = esas2r_dummy_complete;
1250 
1251     if (a->init_msg == 0)
1252         a->init_msg = ESAS2R_INIT_MSG_REINIT;
1253 
1254     while (a->init_msg) {
1255         if (esas2r_format_init_msg(a, rq)) {
1256             unsigned long flags;
1257             while (true) {
1258                 spin_lock_irqsave(&a->queue_lock, flags);
1259                 esas2r_start_vda_request(a, rq);
1260                 spin_unlock_irqrestore(&a->queue_lock, flags);
1261                 esas2r_wait_request(a, rq);
1262                 if (rq->req_stat != RS_PENDING)
1263                     break;
1264             }
1265         }
1266 
1267         if (rq->req_stat == RS_SUCCESS
1268             || ((rq->flags & RF_FAILURE_OK)
1269             && rq->req_stat != RS_TIMEOUT))
1270             continue;
1271 
1272         esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
1273                a->init_msg, rq->req_stat, rq->flags);
1274         a->init_msg = ESAS2R_INIT_MSG_START;
1275         success = false;
1276         break;
1277     }
1278 
1279     esas2r_rq_destroy_request(rq, a);
1280     return success;
1281 }
1282 
1283 /* Initialize the adapter chip */
1284 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
1285 {
1286     bool rslt = false;
1287     struct esas2r_request *rq;
1288     u32 i;
1289 
1290     if (test_bit(AF_DEGRADED_MODE, &a->flags))
1291         goto exit;
1292 
1293     if (!test_bit(AF_NVR_VALID, &a->flags)) {
1294         if (!esas2r_nvram_read_direct(a))
1295             esas2r_log(ESAS2R_LOG_WARN,
1296                    "invalid/missing NVRAM parameters");
1297     }
1298 
1299     if (!esas2r_init_msgs(a)) {
1300         esas2r_set_degraded_mode(a, "init messages failed");
1301         goto exit;
1302     }
1303 
1304     /* The firmware is ready. */
1305     clear_bit(AF_DEGRADED_MODE, &a->flags);
1306     clear_bit(AF_CHPRST_PENDING, &a->flags);
1307 
1308     /* Post all the async event requests */
1309     for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
1310         esas2r_start_ae_request(a, rq);
1311 
1312     if (!a->flash_rev[0])
1313         esas2r_read_flash_rev(a);
1314 
1315     if (!a->image_type[0])
1316         esas2r_read_image_type(a);
1317 
1318     if (a->fw_version == 0)
1319         a->fw_rev[0] = 0;
1320     else
1321         sprintf(a->fw_rev, "%1d.%02d",
1322             (int)LOBYTE(HIWORD(a->fw_version)),
1323             (int)HIBYTE(HIWORD(a->fw_version)));
1324 
1325     esas2r_hdebug("firmware revision: %s", a->fw_rev);
1326 
1327     if (test_bit(AF_CHPRST_DETECTED, &a->flags)
1328         && (test_bit(AF_FIRST_INIT, &a->flags))) {
1329         esas2r_enable_chip_interrupts(a);
1330         return true;
1331     }
1332 
1333     /* initialize discovery */
1334     esas2r_disc_initialize(a);
1335 
1336     /*
1337      * wait for the device wait time to expire here if requested.  this is
1338      * usually requested during initial driver load and possibly when
1339      * resuming from a low power state.  deferred device waiting will use
1340      * interrupts.  chip reset recovery always defers device waiting to
1341      * avoid being in a TASKLET too long.
1342      */
1343     if (init_poll) {
1344         u32 currtime = a->disc_start_time;
1345         u32 nexttick = 100;
1346         u32 deltatime;
1347 
1348         /*
1349          * Block Tasklets from getting scheduled and indicate this is
1350          * polled discovery.
1351          */
1352         set_bit(AF_TASKLET_SCHEDULED, &a->flags);
1353         set_bit(AF_DISC_POLLED, &a->flags);
1354 
1355         /*
1356          * Temporarily bring the disable count to zero to enable
1357          * deferred processing.  Note that the count is already zero
1358          * after the first initialization.
1359          */
1360         if (test_bit(AF_FIRST_INIT, &a->flags))
1361             atomic_dec(&a->disable_cnt);
1362 
1363         while (test_bit(AF_DISC_PENDING, &a->flags)) {
1364             schedule_timeout_interruptible(msecs_to_jiffies(100));
1365 
1366             /*
1367              * Determine the need for a timer tick based on the
1368              * delta time between this and the last iteration of
1369              * this loop.  We don't use the absolute time because
1370              * then we would have to worry about when nexttick
1371              * wraps and currtime hasn't yet.
1372              */
1373             deltatime = jiffies_to_msecs(jiffies) - currtime;
1374             currtime += deltatime;
1375 
1376             /*
1377              * Process any waiting discovery as long as the chip is
1378              * up.  If a chip reset happens during initial polling,
1379              * we have to make sure the timer tick processes the
1380              * doorbell indicating the firmware is ready.
1381              */
1382             if (!test_bit(AF_CHPRST_PENDING, &a->flags))
1383                 esas2r_disc_check_for_work(a);
1384 
1385             /* Simulate a timer tick. */
1386             if (nexttick <= deltatime) {
1387 
1388                 /* Time for a timer tick */
1389                 nexttick += 100;
1390                 esas2r_timer_tick(a);
1391             }
1392 
1393             if (nexttick > deltatime)
1394                 nexttick -= deltatime;
1395 
1396             /* Do any deferred processing */
1397             if (esas2r_is_tasklet_pending(a))
1398                 esas2r_do_tasklet_tasks(a);
1399 
1400         }
1401 
1402         if (test_bit(AF_FIRST_INIT, &a->flags))
1403             atomic_inc(&a->disable_cnt);
1404 
1405         clear_bit(AF_DISC_POLLED, &a->flags);
1406         clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
1407     }
1408 
1409 
1410     esas2r_targ_db_report_changes(a);
1411 
1412     /*
1413      * For cases where (a) the initialization messages processing may
1414      * handle an interrupt for a port event and a discovery is waiting, but
1415      * we are not waiting for devices, or (b) the device wait time has been
1416      * exhausted but there is still discovery pending, start any leftover
1417      * discovery in interrupt driven mode.
1418      */
1419     esas2r_disc_start_waiting(a);
1420 
1421     /* Enable chip interrupts */
1422     a->int_mask = ESAS2R_INT_STS_MASK;
1423     esas2r_enable_chip_interrupts(a);
1424     esas2r_enable_heartbeat(a);
1425     rslt = true;
1426 
1427 exit:
1428     /*
1429      * Regardless of whether initialization was successful, certain things
1430      * need to get done before we exit.
1431      */
1432 
1433     if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
1434         test_bit(AF_FIRST_INIT, &a->flags)) {
1435         /*
1436          * Reinitialization was performed during the first
1437          * initialization.  Only clear the chip reset flag so the
1438          * original device polling is not cancelled.
1439          */
1440         if (!rslt)
1441             clear_bit(AF_CHPRST_PENDING, &a->flags);
1442     } else {
1443         /* First initialization or a subsequent re-init is complete. */
1444         if (!rslt) {
1445             clear_bit(AF_CHPRST_PENDING, &a->flags);
1446             clear_bit(AF_DISC_PENDING, &a->flags);
1447         }
1448 
1449 
1450         /* Enable deferred processing after the first initialization. */
1451         if (test_bit(AF_FIRST_INIT, &a->flags)) {
1452             clear_bit(AF_FIRST_INIT, &a->flags);
1453 
1454             if (atomic_dec_return(&a->disable_cnt) == 0)
1455                 esas2r_do_deferred_processes(a);
1456         }
1457     }
1458 
1459     return rslt;
1460 }
1461 
1462 void esas2r_reset_adapter(struct esas2r_adapter *a)
1463 {
1464     set_bit(AF_OS_RESET, &a->flags);
1465     esas2r_local_reset_adapter(a);
1466     esas2r_schedule_tasklet(a);
1467 }
1468 
1469 void esas2r_reset_chip(struct esas2r_adapter *a)
1470 {
1471     if (!esas2r_is_adapter_present(a))
1472         return;
1473 
1474     /*
1475      * Before we reset the chip, save off the VDA core dump.  The VDA core
1476      * dump is located in the upper 512KB of the onchip SRAM.  Make sure
1477      * to not overwrite a previous crash that was saved.
1478      */
1479     if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
1480         !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
1481         esas2r_read_mem_block(a,
1482                       a->fw_coredump_buff,
1483                       MW_DATA_ADDR_SRAM + 0x80000,
1484                       ESAS2R_FWCOREDUMP_SZ);
1485 
1486         set_bit(AF2_COREDUMP_SAVED, &a->flags2);
1487     }
1488 
1489     clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
1490 
1491     /* Reset the chip */
1492     if (a->pcid->revision == MVR_FREY_B2)
1493         esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
1494                         MU_CTL_IN_FULL_RST2);
1495     else
1496         esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
1497                         MU_CTL_IN_FULL_RST);
1498 
1499 
1500     /* Stall a little while to let the reset condition clear */
1501     mdelay(10);
1502 }
1503 
1504 static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
1505 {
1506     u32 starttime;
1507     u32 doorbell;
1508 
1509     esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
1510     starttime = jiffies_to_msecs(jiffies);
1511 
1512     while (true) {
1513         doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1514         if (doorbell & DRBL_POWER_DOWN) {
1515             esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1516                             doorbell);
1517             break;
1518         }
1519 
1520         schedule_timeout_interruptible(msecs_to_jiffies(100));
1521 
1522         if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
1523             esas2r_hdebug("Timeout waiting for power down");
1524             break;
1525         }
1526     }
1527 }
1528 
1529 /*
1530  * Perform power management processing including managing device states, adapter
1531  * states, interrupts, and I/O.
1532  */
1533 void esas2r_power_down(struct esas2r_adapter *a)
1534 {
1535     set_bit(AF_POWER_MGT, &a->flags);
1536     set_bit(AF_POWER_DOWN, &a->flags);
1537 
1538     if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
1539         u32 starttime;
1540         u32 doorbell;
1541 
1542         /*
1543          * We are currently running OK and will be reinitializing later.
1544          * increment the disable count to coordinate with
1545          * esas2r_init_adapter.  We don't have to do this in degraded
1546          * mode since we never enabled interrupts in the first place.
1547          */
1548         esas2r_disable_chip_interrupts(a);
1549         esas2r_disable_heartbeat(a);
1550 
1551         /* wait for any VDA activity to clear before continuing */
1552         esas2r_write_register_dword(a, MU_DOORBELL_IN,
1553                         DRBL_MSG_IFC_DOWN);
1554         starttime = jiffies_to_msecs(jiffies);
1555 
1556         while (true) {
1557             doorbell =
1558                 esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1559             if (doorbell & DRBL_MSG_IFC_DOWN) {
1560                 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1561                                 doorbell);
1562                 break;
1563             }
1564 
1565             schedule_timeout_interruptible(msecs_to_jiffies(100));
1566 
1567             if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1568                 esas2r_hdebug(
1569                     "timeout waiting for interface down");
1570                 break;
1571             }
1572         }
1573 
1574         /*
1575          * For versions of firmware that support it tell them the driver
1576          * is powering down.
1577          */
1578         if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
1579             esas2r_power_down_notify_firmware(a);
1580     }
1581 
1582     /* Suspend I/O processing. */
1583     set_bit(AF_OS_RESET, &a->flags);
1584     set_bit(AF_DISC_PENDING, &a->flags);
1585     set_bit(AF_CHPRST_PENDING, &a->flags);
1586 
1587     esas2r_process_adapter_reset(a);
1588 
1589     /* Remove devices now that I/O is cleaned up. */
1590     a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
1591     esas2r_targ_db_remove_all(a, false);
1592 }
1593 
1594 /*
1595  * Perform power management processing including managing device states, adapter
1596  * states, interrupts, and I/O.
1597  */
1598 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
1599 {
1600     bool ret;
1601 
1602     clear_bit(AF_POWER_DOWN, &a->flags);
1603     esas2r_init_pci_cfg_space(a);
1604     set_bit(AF_FIRST_INIT, &a->flags);
1605     atomic_inc(&a->disable_cnt);
1606 
1607     /* reinitialize the adapter */
1608     ret = esas2r_check_adapter(a);
1609     if (!esas2r_init_adapter_hw(a, init_poll))
1610         ret = false;
1611 
1612     /* send the reset asynchronous event */
1613     esas2r_send_reset_ae(a, true);
1614 
1615     /* clear this flag after initialization. */
1616     clear_bit(AF_POWER_MGT, &a->flags);
1617     return ret;
1618 }
1619 
1620 bool esas2r_is_adapter_present(struct esas2r_adapter *a)
1621 {
1622     if (test_bit(AF_NOT_PRESENT, &a->flags))
1623         return false;
1624 
1625     if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
1626         set_bit(AF_NOT_PRESENT, &a->flags);
1627 
1628         return false;
1629     }
1630     return true;
1631 }
1632 
1633 const char *esas2r_get_model_name(struct esas2r_adapter *a)
1634 {
1635     switch (a->pcid->subsystem_device) {
1636     case ATTO_ESAS_R680:
1637         return "ATTO ExpressSAS R680";
1638 
1639     case ATTO_ESAS_R608:
1640         return "ATTO ExpressSAS R608";
1641 
1642     case ATTO_ESAS_R60F:
1643         return "ATTO ExpressSAS R60F";
1644 
1645     case ATTO_ESAS_R6F0:
1646         return "ATTO ExpressSAS R6F0";
1647 
1648     case ATTO_ESAS_R644:
1649         return "ATTO ExpressSAS R644";
1650 
1651     case ATTO_ESAS_R648:
1652         return "ATTO ExpressSAS R648";
1653 
1654     case ATTO_TSSC_3808:
1655         return "ATTO ThunderStream SC 3808D";
1656 
1657     case ATTO_TSSC_3808E:
1658         return "ATTO ThunderStream SC 3808E";
1659 
1660     case ATTO_TLSH_1068:
1661         return "ATTO ThunderLink SH 1068";
1662     }
1663 
1664     return "ATTO SAS Controller";
1665 }
1666 
1667 const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
1668 {
1669     switch (a->pcid->subsystem_device) {
1670     case ATTO_ESAS_R680:
1671         return "R680";
1672 
1673     case ATTO_ESAS_R608:
1674         return "R608";
1675 
1676     case ATTO_ESAS_R60F:
1677         return "R60F";
1678 
1679     case ATTO_ESAS_R6F0:
1680         return "R6F0";
1681 
1682     case ATTO_ESAS_R644:
1683         return "R644";
1684 
1685     case ATTO_ESAS_R648:
1686         return "R648";
1687 
1688     case ATTO_TSSC_3808:
1689         return "SC 3808D";
1690 
1691     case ATTO_TSSC_3808E:
1692         return "SC 3808E";
1693 
1694     case ATTO_TLSH_1068:
1695         return "SH 1068";
1696     }
1697 
1698     return "unknown";
1699 }