Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  libata-core.c - helper library for ATA
0004  *
0005  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
0006  *  Copyright 2003-2004 Jeff Garzik
0007  *
0008  *  libata documentation is available via 'make {ps|pdf}docs',
0009  *  as Documentation/driver-api/libata.rst
0010  *
0011  *  Hardware documentation available from http://www.t13.org/ and
0012  *  http://www.sata-io.org/
0013  *
0014  *  Standards documents from:
0015  *  http://www.t13.org (ATA standards, PCI DMA IDE spec)
0016  *  http://www.t10.org (SCSI MMC - for ATAPI MMC)
0017  *  http://www.sata-io.org (SATA)
0018  *  http://www.compactflash.org (CF)
0019  *  http://www.qic.org (QIC157 - Tape and DSC)
0020  *  http://www.ce-ata.org (CE-ATA: not supported)
0021  *
0022  * libata is essentially a library of internal helper functions for
0023  * low-level ATA host controller drivers.  As such, the API/ABI is
0024  * likely to change as new drivers are added and updated.
0025  * Do not depend on ABI/API stability.
0026  */
0027 
0028 #include <linux/kernel.h>
0029 #include <linux/module.h>
0030 #include <linux/pci.h>
0031 #include <linux/init.h>
0032 #include <linux/list.h>
0033 #include <linux/mm.h>
0034 #include <linux/spinlock.h>
0035 #include <linux/blkdev.h>
0036 #include <linux/delay.h>
0037 #include <linux/timer.h>
0038 #include <linux/time.h>
0039 #include <linux/interrupt.h>
0040 #include <linux/completion.h>
0041 #include <linux/suspend.h>
0042 #include <linux/workqueue.h>
0043 #include <linux/scatterlist.h>
0044 #include <linux/io.h>
0045 #include <linux/log2.h>
0046 #include <linux/slab.h>
0047 #include <linux/glob.h>
0048 #include <scsi/scsi.h>
0049 #include <scsi/scsi_cmnd.h>
0050 #include <scsi/scsi_host.h>
0051 #include <linux/libata.h>
0052 #include <asm/byteorder.h>
0053 #include <asm/unaligned.h>
0054 #include <linux/cdrom.h>
0055 #include <linux/ratelimit.h>
0056 #include <linux/leds.h>
0057 #include <linux/pm_runtime.h>
0058 #include <linux/platform_device.h>
0059 #include <asm/setup.h>
0060 
0061 #define CREATE_TRACE_POINTS
0062 #include <trace/events/libata.h>
0063 
0064 #include "libata.h"
0065 #include "libata-transport.h"
0066 
0067 const struct ata_port_operations ata_base_port_ops = {
0068     .prereset       = ata_std_prereset,
0069     .postreset      = ata_std_postreset,
0070     .error_handler      = ata_std_error_handler,
0071     .sched_eh       = ata_std_sched_eh,
0072     .end_eh         = ata_std_end_eh,
0073 };
0074 
0075 const struct ata_port_operations sata_port_ops = {
0076     .inherits       = &ata_base_port_ops,
0077 
0078     .qc_defer       = ata_std_qc_defer,
0079     .hardreset      = sata_std_hardreset,
0080 };
0081 EXPORT_SYMBOL_GPL(sata_port_ops);
0082 
0083 static unsigned int ata_dev_init_params(struct ata_device *dev,
0084                     u16 heads, u16 sectors);
0085 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
0086 static void ata_dev_xfermask(struct ata_device *dev);
0087 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
0088 
0089 atomic_t ata_print_id = ATOMIC_INIT(0);
0090 
0091 #ifdef CONFIG_ATA_FORCE
0092 struct ata_force_param {
0093     const char  *name;
0094     u8      cbl;
0095     u8      spd_limit;
0096     unsigned int    xfer_mask;
0097     unsigned int    horkage_on;
0098     unsigned int    horkage_off;
0099     u16     lflags_on;
0100     u16     lflags_off;
0101 };
0102 
0103 struct ata_force_ent {
0104     int         port;
0105     int         device;
0106     struct ata_force_param  param;
0107 };
0108 
0109 static struct ata_force_ent *ata_force_tbl;
0110 static int ata_force_tbl_size;
0111 
0112 static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
0113 /* param_buf is thrown away after initialization, disallow read */
0114 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
0115 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
0116 #endif
0117 
0118 static int atapi_enabled = 1;
0119 module_param(atapi_enabled, int, 0444);
0120 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
0121 
0122 static int atapi_dmadir = 0;
0123 module_param(atapi_dmadir, int, 0444);
0124 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
0125 
0126 int atapi_passthru16 = 1;
0127 module_param(atapi_passthru16, int, 0444);
0128 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
0129 
0130 int libata_fua = 0;
0131 module_param_named(fua, libata_fua, int, 0444);
0132 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
0133 
0134 static int ata_ignore_hpa;
0135 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
0136 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
0137 
0138 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
0139 module_param_named(dma, libata_dma_mask, int, 0444);
0140 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
0141 
0142 static int ata_probe_timeout;
0143 module_param(ata_probe_timeout, int, 0444);
0144 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
0145 
0146 int libata_noacpi = 0;
0147 module_param_named(noacpi, libata_noacpi, int, 0444);
0148 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
0149 
0150 int libata_allow_tpm = 0;
0151 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
0152 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
0153 
0154 static int atapi_an;
0155 module_param(atapi_an, int, 0444);
0156 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
0157 
0158 MODULE_AUTHOR("Jeff Garzik");
0159 MODULE_DESCRIPTION("Library module for ATA devices");
0160 MODULE_LICENSE("GPL");
0161 MODULE_VERSION(DRV_VERSION);
0162 
0163 static inline bool ata_dev_print_info(struct ata_device *dev)
0164 {
0165     struct ata_eh_context *ehc = &dev->link->eh_context;
0166 
0167     return ehc->i.flags & ATA_EHI_PRINTINFO;
0168 }
0169 
0170 static bool ata_sstatus_online(u32 sstatus)
0171 {
0172     return (sstatus & 0xf) == 0x3;
0173 }
0174 
0175 /**
0176  *  ata_link_next - link iteration helper
0177  *  @link: the previous link, NULL to start
0178  *  @ap: ATA port containing links to iterate
0179  *  @mode: iteration mode, one of ATA_LITER_*
0180  *
0181  *  LOCKING:
0182  *  Host lock or EH context.
0183  *
0184  *  RETURNS:
0185  *  Pointer to the next link.
0186  */
0187 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
0188                    enum ata_link_iter_mode mode)
0189 {
0190     BUG_ON(mode != ATA_LITER_EDGE &&
0191            mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
0192 
0193     /* NULL link indicates start of iteration */
0194     if (!link)
0195         switch (mode) {
0196         case ATA_LITER_EDGE:
0197         case ATA_LITER_PMP_FIRST:
0198             if (sata_pmp_attached(ap))
0199                 return ap->pmp_link;
0200             fallthrough;
0201         case ATA_LITER_HOST_FIRST:
0202             return &ap->link;
0203         }
0204 
0205     /* we just iterated over the host link, what's next? */
0206     if (link == &ap->link)
0207         switch (mode) {
0208         case ATA_LITER_HOST_FIRST:
0209             if (sata_pmp_attached(ap))
0210                 return ap->pmp_link;
0211             fallthrough;
0212         case ATA_LITER_PMP_FIRST:
0213             if (unlikely(ap->slave_link))
0214                 return ap->slave_link;
0215             fallthrough;
0216         case ATA_LITER_EDGE:
0217             return NULL;
0218         }
0219 
0220     /* slave_link excludes PMP */
0221     if (unlikely(link == ap->slave_link))
0222         return NULL;
0223 
0224     /* we were over a PMP link */
0225     if (++link < ap->pmp_link + ap->nr_pmp_links)
0226         return link;
0227 
0228     if (mode == ATA_LITER_PMP_FIRST)
0229         return &ap->link;
0230 
0231     return NULL;
0232 }
0233 EXPORT_SYMBOL_GPL(ata_link_next);
0234 
0235 /**
0236  *  ata_dev_next - device iteration helper
0237  *  @dev: the previous device, NULL to start
0238  *  @link: ATA link containing devices to iterate
0239  *  @mode: iteration mode, one of ATA_DITER_*
0240  *
0241  *  LOCKING:
0242  *  Host lock or EH context.
0243  *
0244  *  RETURNS:
0245  *  Pointer to the next device.
0246  */
0247 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
0248                 enum ata_dev_iter_mode mode)
0249 {
0250     BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
0251            mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
0252 
0253     /* NULL dev indicates start of iteration */
0254     if (!dev)
0255         switch (mode) {
0256         case ATA_DITER_ENABLED:
0257         case ATA_DITER_ALL:
0258             dev = link->device;
0259             goto check;
0260         case ATA_DITER_ENABLED_REVERSE:
0261         case ATA_DITER_ALL_REVERSE:
0262             dev = link->device + ata_link_max_devices(link) - 1;
0263             goto check;
0264         }
0265 
0266  next:
0267     /* move to the next one */
0268     switch (mode) {
0269     case ATA_DITER_ENABLED:
0270     case ATA_DITER_ALL:
0271         if (++dev < link->device + ata_link_max_devices(link))
0272             goto check;
0273         return NULL;
0274     case ATA_DITER_ENABLED_REVERSE:
0275     case ATA_DITER_ALL_REVERSE:
0276         if (--dev >= link->device)
0277             goto check;
0278         return NULL;
0279     }
0280 
0281  check:
0282     if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
0283         !ata_dev_enabled(dev))
0284         goto next;
0285     return dev;
0286 }
0287 EXPORT_SYMBOL_GPL(ata_dev_next);
0288 
0289 /**
0290  *  ata_dev_phys_link - find physical link for a device
0291  *  @dev: ATA device to look up physical link for
0292  *
0293  *  Look up physical link which @dev is attached to.  Note that
0294  *  this is different from @dev->link only when @dev is on slave
0295  *  link.  For all other cases, it's the same as @dev->link.
0296  *
0297  *  LOCKING:
0298  *  Don't care.
0299  *
0300  *  RETURNS:
0301  *  Pointer to the found physical link.
0302  */
0303 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
0304 {
0305     struct ata_port *ap = dev->link->ap;
0306 
0307     if (!ap->slave_link)
0308         return dev->link;
0309     if (!dev->devno)
0310         return &ap->link;
0311     return ap->slave_link;
0312 }
0313 
0314 #ifdef CONFIG_ATA_FORCE
0315 /**
0316  *  ata_force_cbl - force cable type according to libata.force
0317  *  @ap: ATA port of interest
0318  *
0319  *  Force cable type according to libata.force and whine about it.
0320  *  The last entry which has matching port number is used, so it
0321  *  can be specified as part of device force parameters.  For
0322  *  example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
0323  *  same effect.
0324  *
0325  *  LOCKING:
0326  *  EH context.
0327  */
0328 void ata_force_cbl(struct ata_port *ap)
0329 {
0330     int i;
0331 
0332     for (i = ata_force_tbl_size - 1; i >= 0; i--) {
0333         const struct ata_force_ent *fe = &ata_force_tbl[i];
0334 
0335         if (fe->port != -1 && fe->port != ap->print_id)
0336             continue;
0337 
0338         if (fe->param.cbl == ATA_CBL_NONE)
0339             continue;
0340 
0341         ap->cbl = fe->param.cbl;
0342         ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
0343         return;
0344     }
0345 }
0346 
0347 /**
0348  *  ata_force_link_limits - force link limits according to libata.force
0349  *  @link: ATA link of interest
0350  *
0351  *  Force link flags and SATA spd limit according to libata.force
0352  *  and whine about it.  When only the port part is specified
0353  *  (e.g. 1:), the limit applies to all links connected to both
0354  *  the host link and all fan-out ports connected via PMP.  If the
0355  *  device part is specified as 0 (e.g. 1.00:), it specifies the
0356  *  first fan-out link not the host link.  Device number 15 always
0357  *  points to the host link whether PMP is attached or not.  If the
0358  *  controller has slave link, device number 16 points to it.
0359  *
0360  *  LOCKING:
0361  *  EH context.
0362  */
0363 static void ata_force_link_limits(struct ata_link *link)
0364 {
0365     bool did_spd = false;
0366     int linkno = link->pmp;
0367     int i;
0368 
0369     if (ata_is_host_link(link))
0370         linkno += 15;
0371 
0372     for (i = ata_force_tbl_size - 1; i >= 0; i--) {
0373         const struct ata_force_ent *fe = &ata_force_tbl[i];
0374 
0375         if (fe->port != -1 && fe->port != link->ap->print_id)
0376             continue;
0377 
0378         if (fe->device != -1 && fe->device != linkno)
0379             continue;
0380 
0381         /* only honor the first spd limit */
0382         if (!did_spd && fe->param.spd_limit) {
0383             link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
0384             ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
0385                     fe->param.name);
0386             did_spd = true;
0387         }
0388 
0389         /* let lflags stack */
0390         if (fe->param.lflags_on) {
0391             link->flags |= fe->param.lflags_on;
0392             ata_link_notice(link,
0393                     "FORCE: link flag 0x%x forced -> 0x%x\n",
0394                     fe->param.lflags_on, link->flags);
0395         }
0396         if (fe->param.lflags_off) {
0397             link->flags &= ~fe->param.lflags_off;
0398             ata_link_notice(link,
0399                 "FORCE: link flag 0x%x cleared -> 0x%x\n",
0400                 fe->param.lflags_off, link->flags);
0401         }
0402     }
0403 }
0404 
0405 /**
0406  *  ata_force_xfermask - force xfermask according to libata.force
0407  *  @dev: ATA device of interest
0408  *
0409  *  Force xfer_mask according to libata.force and whine about it.
0410  *  For consistency with link selection, device number 15 selects
0411  *  the first device connected to the host link.
0412  *
0413  *  LOCKING:
0414  *  EH context.
0415  */
0416 static void ata_force_xfermask(struct ata_device *dev)
0417 {
0418     int devno = dev->link->pmp + dev->devno;
0419     int alt_devno = devno;
0420     int i;
0421 
0422     /* allow n.15/16 for devices attached to host port */
0423     if (ata_is_host_link(dev->link))
0424         alt_devno += 15;
0425 
0426     for (i = ata_force_tbl_size - 1; i >= 0; i--) {
0427         const struct ata_force_ent *fe = &ata_force_tbl[i];
0428         unsigned int pio_mask, mwdma_mask, udma_mask;
0429 
0430         if (fe->port != -1 && fe->port != dev->link->ap->print_id)
0431             continue;
0432 
0433         if (fe->device != -1 && fe->device != devno &&
0434             fe->device != alt_devno)
0435             continue;
0436 
0437         if (!fe->param.xfer_mask)
0438             continue;
0439 
0440         ata_unpack_xfermask(fe->param.xfer_mask,
0441                     &pio_mask, &mwdma_mask, &udma_mask);
0442         if (udma_mask)
0443             dev->udma_mask = udma_mask;
0444         else if (mwdma_mask) {
0445             dev->udma_mask = 0;
0446             dev->mwdma_mask = mwdma_mask;
0447         } else {
0448             dev->udma_mask = 0;
0449             dev->mwdma_mask = 0;
0450             dev->pio_mask = pio_mask;
0451         }
0452 
0453         ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
0454                    fe->param.name);
0455         return;
0456     }
0457 }
0458 
0459 /**
0460  *  ata_force_horkage - force horkage according to libata.force
0461  *  @dev: ATA device of interest
0462  *
0463  *  Force horkage according to libata.force and whine about it.
0464  *  For consistency with link selection, device number 15 selects
0465  *  the first device connected to the host link.
0466  *
0467  *  LOCKING:
0468  *  EH context.
0469  */
0470 static void ata_force_horkage(struct ata_device *dev)
0471 {
0472     int devno = dev->link->pmp + dev->devno;
0473     int alt_devno = devno;
0474     int i;
0475 
0476     /* allow n.15/16 for devices attached to host port */
0477     if (ata_is_host_link(dev->link))
0478         alt_devno += 15;
0479 
0480     for (i = 0; i < ata_force_tbl_size; i++) {
0481         const struct ata_force_ent *fe = &ata_force_tbl[i];
0482 
0483         if (fe->port != -1 && fe->port != dev->link->ap->print_id)
0484             continue;
0485 
0486         if (fe->device != -1 && fe->device != devno &&
0487             fe->device != alt_devno)
0488             continue;
0489 
0490         if (!(~dev->horkage & fe->param.horkage_on) &&
0491             !(dev->horkage & fe->param.horkage_off))
0492             continue;
0493 
0494         dev->horkage |= fe->param.horkage_on;
0495         dev->horkage &= ~fe->param.horkage_off;
0496 
0497         ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
0498                    fe->param.name);
0499     }
0500 }
0501 #else
0502 static inline void ata_force_link_limits(struct ata_link *link) { }
0503 static inline void ata_force_xfermask(struct ata_device *dev) { }
0504 static inline void ata_force_horkage(struct ata_device *dev) { }
0505 #endif
0506 
0507 /**
0508  *  atapi_cmd_type - Determine ATAPI command type from SCSI opcode
0509  *  @opcode: SCSI opcode
0510  *
0511  *  Determine ATAPI command type from @opcode.
0512  *
0513  *  LOCKING:
0514  *  None.
0515  *
0516  *  RETURNS:
0517  *  ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
0518  */
0519 int atapi_cmd_type(u8 opcode)
0520 {
0521     switch (opcode) {
0522     case GPCMD_READ_10:
0523     case GPCMD_READ_12:
0524         return ATAPI_READ;
0525 
0526     case GPCMD_WRITE_10:
0527     case GPCMD_WRITE_12:
0528     case GPCMD_WRITE_AND_VERIFY_10:
0529         return ATAPI_WRITE;
0530 
0531     case GPCMD_READ_CD:
0532     case GPCMD_READ_CD_MSF:
0533         return ATAPI_READ_CD;
0534 
0535     case ATA_16:
0536     case ATA_12:
0537         if (atapi_passthru16)
0538             return ATAPI_PASS_THRU;
0539         fallthrough;
0540     default:
0541         return ATAPI_MISC;
0542     }
0543 }
0544 EXPORT_SYMBOL_GPL(atapi_cmd_type);
0545 
0546 static const u8 ata_rw_cmds[] = {
0547     /* pio multi */
0548     ATA_CMD_READ_MULTI,
0549     ATA_CMD_WRITE_MULTI,
0550     ATA_CMD_READ_MULTI_EXT,
0551     ATA_CMD_WRITE_MULTI_EXT,
0552     0,
0553     0,
0554     0,
0555     ATA_CMD_WRITE_MULTI_FUA_EXT,
0556     /* pio */
0557     ATA_CMD_PIO_READ,
0558     ATA_CMD_PIO_WRITE,
0559     ATA_CMD_PIO_READ_EXT,
0560     ATA_CMD_PIO_WRITE_EXT,
0561     0,
0562     0,
0563     0,
0564     0,
0565     /* dma */
0566     ATA_CMD_READ,
0567     ATA_CMD_WRITE,
0568     ATA_CMD_READ_EXT,
0569     ATA_CMD_WRITE_EXT,
0570     0,
0571     0,
0572     0,
0573     ATA_CMD_WRITE_FUA_EXT
0574 };
0575 
0576 /**
0577  *  ata_rwcmd_protocol - set taskfile r/w commands and protocol
0578  *  @tf: command to examine and configure
0579  *  @dev: device tf belongs to
0580  *
0581  *  Examine the device configuration and tf->flags to calculate
0582  *  the proper read/write commands and protocol to use.
0583  *
0584  *  LOCKING:
0585  *  caller.
0586  */
0587 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
0588 {
0589     u8 cmd;
0590 
0591     int index, fua, lba48, write;
0592 
0593     fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
0594     lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
0595     write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
0596 
0597     if (dev->flags & ATA_DFLAG_PIO) {
0598         tf->protocol = ATA_PROT_PIO;
0599         index = dev->multi_count ? 0 : 8;
0600     } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
0601         /* Unable to use DMA due to host limitation */
0602         tf->protocol = ATA_PROT_PIO;
0603         index = dev->multi_count ? 0 : 8;
0604     } else {
0605         tf->protocol = ATA_PROT_DMA;
0606         index = 16;
0607     }
0608 
0609     cmd = ata_rw_cmds[index + fua + lba48 + write];
0610     if (cmd) {
0611         tf->command = cmd;
0612         return 0;
0613     }
0614     return -1;
0615 }
0616 
0617 /**
0618  *  ata_tf_read_block - Read block address from ATA taskfile
0619  *  @tf: ATA taskfile of interest
0620  *  @dev: ATA device @tf belongs to
0621  *
0622  *  LOCKING:
0623  *  None.
0624  *
0625  *  Read block address from @tf.  This function can handle all
0626  *  three address formats - LBA, LBA48 and CHS.  tf->protocol and
0627  *  flags select the address format to use.
0628  *
0629  *  RETURNS:
0630  *  Block address read from @tf.
0631  */
0632 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
0633 {
0634     u64 block = 0;
0635 
0636     if (tf->flags & ATA_TFLAG_LBA) {
0637         if (tf->flags & ATA_TFLAG_LBA48) {
0638             block |= (u64)tf->hob_lbah << 40;
0639             block |= (u64)tf->hob_lbam << 32;
0640             block |= (u64)tf->hob_lbal << 24;
0641         } else
0642             block |= (tf->device & 0xf) << 24;
0643 
0644         block |= tf->lbah << 16;
0645         block |= tf->lbam << 8;
0646         block |= tf->lbal;
0647     } else {
0648         u32 cyl, head, sect;
0649 
0650         cyl = tf->lbam | (tf->lbah << 8);
0651         head = tf->device & 0xf;
0652         sect = tf->lbal;
0653 
0654         if (!sect) {
0655             ata_dev_warn(dev,
0656                      "device reported invalid CHS sector 0\n");
0657             return U64_MAX;
0658         }
0659 
0660         block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
0661     }
0662 
0663     return block;
0664 }
0665 
0666 /**
0667  *  ata_build_rw_tf - Build ATA taskfile for given read/write request
0668  *  @tf: Target ATA taskfile
0669  *  @dev: ATA device @tf belongs to
0670  *  @block: Block address
0671  *  @n_block: Number of blocks
0672  *  @tf_flags: RW/FUA etc...
0673  *  @tag: tag
0674  *  @class: IO priority class
0675  *
0676  *  LOCKING:
0677  *  None.
0678  *
0679  *  Build ATA taskfile @tf for read/write request described by
0680  *  @block, @n_block, @tf_flags and @tag on @dev.
0681  *
0682  *  RETURNS:
0683  *
0684  *  0 on success, -ERANGE if the request is too large for @dev,
0685  *  -EINVAL if the request is invalid.
0686  */
0687 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
0688             u64 block, u32 n_block, unsigned int tf_flags,
0689             unsigned int tag, int class)
0690 {
0691     tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
0692     tf->flags |= tf_flags;
0693 
0694     if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
0695         /* yay, NCQ */
0696         if (!lba_48_ok(block, n_block))
0697             return -ERANGE;
0698 
0699         tf->protocol = ATA_PROT_NCQ;
0700         tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
0701 
0702         if (tf->flags & ATA_TFLAG_WRITE)
0703             tf->command = ATA_CMD_FPDMA_WRITE;
0704         else
0705             tf->command = ATA_CMD_FPDMA_READ;
0706 
0707         tf->nsect = tag << 3;
0708         tf->hob_feature = (n_block >> 8) & 0xff;
0709         tf->feature = n_block & 0xff;
0710 
0711         tf->hob_lbah = (block >> 40) & 0xff;
0712         tf->hob_lbam = (block >> 32) & 0xff;
0713         tf->hob_lbal = (block >> 24) & 0xff;
0714         tf->lbah = (block >> 16) & 0xff;
0715         tf->lbam = (block >> 8) & 0xff;
0716         tf->lbal = block & 0xff;
0717 
0718         tf->device = ATA_LBA;
0719         if (tf->flags & ATA_TFLAG_FUA)
0720             tf->device |= 1 << 7;
0721 
0722         if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE &&
0723             class == IOPRIO_CLASS_RT)
0724             tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
0725     } else if (dev->flags & ATA_DFLAG_LBA) {
0726         tf->flags |= ATA_TFLAG_LBA;
0727 
0728         if (lba_28_ok(block, n_block)) {
0729             /* use LBA28 */
0730             tf->device |= (block >> 24) & 0xf;
0731         } else if (lba_48_ok(block, n_block)) {
0732             if (!(dev->flags & ATA_DFLAG_LBA48))
0733                 return -ERANGE;
0734 
0735             /* use LBA48 */
0736             tf->flags |= ATA_TFLAG_LBA48;
0737 
0738             tf->hob_nsect = (n_block >> 8) & 0xff;
0739 
0740             tf->hob_lbah = (block >> 40) & 0xff;
0741             tf->hob_lbam = (block >> 32) & 0xff;
0742             tf->hob_lbal = (block >> 24) & 0xff;
0743         } else
0744             /* request too large even for LBA48 */
0745             return -ERANGE;
0746 
0747         if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
0748             return -EINVAL;
0749 
0750         tf->nsect = n_block & 0xff;
0751 
0752         tf->lbah = (block >> 16) & 0xff;
0753         tf->lbam = (block >> 8) & 0xff;
0754         tf->lbal = block & 0xff;
0755 
0756         tf->device |= ATA_LBA;
0757     } else {
0758         /* CHS */
0759         u32 sect, head, cyl, track;
0760 
0761         /* The request -may- be too large for CHS addressing. */
0762         if (!lba_28_ok(block, n_block))
0763             return -ERANGE;
0764 
0765         if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
0766             return -EINVAL;
0767 
0768         /* Convert LBA to CHS */
0769         track = (u32)block / dev->sectors;
0770         cyl   = track / dev->heads;
0771         head  = track % dev->heads;
0772         sect  = (u32)block % dev->sectors + 1;
0773 
0774         /* Check whether the converted CHS can fit.
0775            Cylinder: 0-65535
0776            Head: 0-15
0777            Sector: 1-255*/
0778         if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
0779             return -ERANGE;
0780 
0781         tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
0782         tf->lbal = sect;
0783         tf->lbam = cyl;
0784         tf->lbah = cyl >> 8;
0785         tf->device |= head;
0786     }
0787 
0788     return 0;
0789 }
0790 
0791 /**
0792  *  ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
0793  *  @pio_mask: pio_mask
0794  *  @mwdma_mask: mwdma_mask
0795  *  @udma_mask: udma_mask
0796  *
0797  *  Pack @pio_mask, @mwdma_mask and @udma_mask into a single
0798  *  unsigned int xfer_mask.
0799  *
0800  *  LOCKING:
0801  *  None.
0802  *
0803  *  RETURNS:
0804  *  Packed xfer_mask.
0805  */
0806 unsigned int ata_pack_xfermask(unsigned int pio_mask,
0807                    unsigned int mwdma_mask,
0808                    unsigned int udma_mask)
0809 {
0810     return  ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
0811         ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
0812         ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
0813 }
0814 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
0815 
0816 /**
0817  *  ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
0818  *  @xfer_mask: xfer_mask to unpack
0819  *  @pio_mask: resulting pio_mask
0820  *  @mwdma_mask: resulting mwdma_mask
0821  *  @udma_mask: resulting udma_mask
0822  *
0823  *  Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
0824  *  Any NULL destination masks will be ignored.
0825  */
0826 void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
0827              unsigned int *mwdma_mask, unsigned int *udma_mask)
0828 {
0829     if (pio_mask)
0830         *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
0831     if (mwdma_mask)
0832         *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
0833     if (udma_mask)
0834         *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
0835 }
0836 
0837 static const struct ata_xfer_ent {
0838     int shift, bits;
0839     u8 base;
0840 } ata_xfer_tbl[] = {
0841     { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
0842     { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
0843     { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
0844     { -1, },
0845 };
0846 
0847 /**
0848  *  ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
0849  *  @xfer_mask: xfer_mask of interest
0850  *
0851  *  Return matching XFER_* value for @xfer_mask.  Only the highest
0852  *  bit of @xfer_mask is considered.
0853  *
0854  *  LOCKING:
0855  *  None.
0856  *
0857  *  RETURNS:
0858  *  Matching XFER_* value, 0xff if no match found.
0859  */
0860 u8 ata_xfer_mask2mode(unsigned int xfer_mask)
0861 {
0862     int highbit = fls(xfer_mask) - 1;
0863     const struct ata_xfer_ent *ent;
0864 
0865     for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
0866         if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
0867             return ent->base + highbit - ent->shift;
0868     return 0xff;
0869 }
0870 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
0871 
0872 /**
0873  *  ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
0874  *  @xfer_mode: XFER_* of interest
0875  *
0876  *  Return matching xfer_mask for @xfer_mode.
0877  *
0878  *  LOCKING:
0879  *  None.
0880  *
0881  *  RETURNS:
0882  *  Matching xfer_mask, 0 if no match found.
0883  */
0884 unsigned int ata_xfer_mode2mask(u8 xfer_mode)
0885 {
0886     const struct ata_xfer_ent *ent;
0887 
0888     for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
0889         if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
0890             return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
0891                 & ~((1 << ent->shift) - 1);
0892     return 0;
0893 }
0894 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
0895 
0896 /**
0897  *  ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
0898  *  @xfer_mode: XFER_* of interest
0899  *
0900  *  Return matching xfer_shift for @xfer_mode.
0901  *
0902  *  LOCKING:
0903  *  None.
0904  *
0905  *  RETURNS:
0906  *  Matching xfer_shift, -1 if no match found.
0907  */
0908 int ata_xfer_mode2shift(u8 xfer_mode)
0909 {
0910     const struct ata_xfer_ent *ent;
0911 
0912     for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
0913         if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
0914             return ent->shift;
0915     return -1;
0916 }
0917 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
0918 
0919 /**
0920  *  ata_mode_string - convert xfer_mask to string
0921  *  @xfer_mask: mask of bits supported; only highest bit counts.
0922  *
0923  *  Determine string which represents the highest speed
0924  *  (highest bit in @modemask).
0925  *
0926  *  LOCKING:
0927  *  None.
0928  *
0929  *  RETURNS:
0930  *  Constant C string representing highest speed listed in
0931  *  @mode_mask, or the constant C string "<n/a>".
0932  */
0933 const char *ata_mode_string(unsigned int xfer_mask)
0934 {
0935     static const char * const xfer_mode_str[] = {
0936         "PIO0",
0937         "PIO1",
0938         "PIO2",
0939         "PIO3",
0940         "PIO4",
0941         "PIO5",
0942         "PIO6",
0943         "MWDMA0",
0944         "MWDMA1",
0945         "MWDMA2",
0946         "MWDMA3",
0947         "MWDMA4",
0948         "UDMA/16",
0949         "UDMA/25",
0950         "UDMA/33",
0951         "UDMA/44",
0952         "UDMA/66",
0953         "UDMA/100",
0954         "UDMA/133",
0955         "UDMA7",
0956     };
0957     int highbit;
0958 
0959     highbit = fls(xfer_mask) - 1;
0960     if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
0961         return xfer_mode_str[highbit];
0962     return "<n/a>";
0963 }
0964 EXPORT_SYMBOL_GPL(ata_mode_string);
0965 
0966 const char *sata_spd_string(unsigned int spd)
0967 {
0968     static const char * const spd_str[] = {
0969         "1.5 Gbps",
0970         "3.0 Gbps",
0971         "6.0 Gbps",
0972     };
0973 
0974     if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
0975         return "<unknown>";
0976     return spd_str[spd - 1];
0977 }
0978 
0979 /**
0980  *  ata_dev_classify - determine device type based on ATA-spec signature
0981  *  @tf: ATA taskfile register set for device to be identified
0982  *
0983  *  Determine from taskfile register contents whether a device is
0984  *  ATA or ATAPI, as per "Signature and persistence" section
0985  *  of ATA/PI spec (volume 1, sect 5.14).
0986  *
0987  *  LOCKING:
0988  *  None.
0989  *
0990  *  RETURNS:
0991  *  Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
0992  *  %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
0993  */
0994 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
0995 {
0996     /* Apple's open source Darwin code hints that some devices only
0997      * put a proper signature into the LBA mid/high registers,
0998      * So, we only check those.  It's sufficient for uniqueness.
0999      *
1000      * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1001      * signatures for ATA and ATAPI devices attached on SerialATA,
1002      * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1003      * spec has never mentioned about using different signatures
1004      * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1005      * Multiplier specification began to use 0x69/0x96 to identify
1006      * port multpliers and 0x3c/0xc3 to identify SEMB device.
1007      * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1008      * 0x69/0x96 shortly and described them as reserved for
1009      * SerialATA.
1010      *
1011      * We follow the current spec and consider that 0x69/0x96
1012      * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1013      * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1014      * SEMB signature.  This is worked around in
1015      * ata_dev_read_id().
1016      */
1017     if (tf->lbam == 0 && tf->lbah == 0)
1018         return ATA_DEV_ATA;
1019 
1020     if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1021         return ATA_DEV_ATAPI;
1022 
1023     if (tf->lbam == 0x69 && tf->lbah == 0x96)
1024         return ATA_DEV_PMP;
1025 
1026     if (tf->lbam == 0x3c && tf->lbah == 0xc3)
1027         return ATA_DEV_SEMB;
1028 
1029     if (tf->lbam == 0xcd && tf->lbah == 0xab)
1030         return ATA_DEV_ZAC;
1031 
1032     return ATA_DEV_UNKNOWN;
1033 }
1034 EXPORT_SYMBOL_GPL(ata_dev_classify);
1035 
1036 /**
1037  *  ata_id_string - Convert IDENTIFY DEVICE page into string
1038  *  @id: IDENTIFY DEVICE results we will examine
1039  *  @s: string into which data is output
1040  *  @ofs: offset into identify device page
1041  *  @len: length of string to return. must be an even number.
1042  *
1043  *  The strings in the IDENTIFY DEVICE page are broken up into
1044  *  16-bit chunks.  Run through the string, and output each
1045  *  8-bit chunk linearly, regardless of platform.
1046  *
1047  *  LOCKING:
1048  *  caller.
1049  */
1050 
1051 void ata_id_string(const u16 *id, unsigned char *s,
1052            unsigned int ofs, unsigned int len)
1053 {
1054     unsigned int c;
1055 
1056     BUG_ON(len & 1);
1057 
1058     while (len > 0) {
1059         c = id[ofs] >> 8;
1060         *s = c;
1061         s++;
1062 
1063         c = id[ofs] & 0xff;
1064         *s = c;
1065         s++;
1066 
1067         ofs++;
1068         len -= 2;
1069     }
1070 }
1071 EXPORT_SYMBOL_GPL(ata_id_string);
1072 
1073 /**
1074  *  ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1075  *  @id: IDENTIFY DEVICE results we will examine
1076  *  @s: string into which data is output
1077  *  @ofs: offset into identify device page
1078  *  @len: length of string to return. must be an odd number.
1079  *
1080  *  This function is identical to ata_id_string except that it
1081  *  trims trailing spaces and terminates the resulting string with
1082  *  null.  @len must be actual maximum length (even number) + 1.
1083  *
1084  *  LOCKING:
1085  *  caller.
1086  */
1087 void ata_id_c_string(const u16 *id, unsigned char *s,
1088              unsigned int ofs, unsigned int len)
1089 {
1090     unsigned char *p;
1091 
1092     ata_id_string(id, s, ofs, len - 1);
1093 
1094     p = s + strnlen(s, len - 1);
1095     while (p > s && p[-1] == ' ')
1096         p--;
1097     *p = '\0';
1098 }
1099 EXPORT_SYMBOL_GPL(ata_id_c_string);
1100 
1101 static u64 ata_id_n_sectors(const u16 *id)
1102 {
1103     if (ata_id_has_lba(id)) {
1104         if (ata_id_has_lba48(id))
1105             return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1106 
1107         return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1108     }
1109 
1110     if (ata_id_current_chs_valid(id))
1111         return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1112                (u32)id[ATA_ID_CUR_SECTORS];
1113 
1114     return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1115            (u32)id[ATA_ID_SECTORS];
1116 }
1117 
1118 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1119 {
1120     u64 sectors = 0;
1121 
1122     sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1123     sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1124     sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1125     sectors |= (tf->lbah & 0xff) << 16;
1126     sectors |= (tf->lbam & 0xff) << 8;
1127     sectors |= (tf->lbal & 0xff);
1128 
1129     return sectors;
1130 }
1131 
1132 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1133 {
1134     u64 sectors = 0;
1135 
1136     sectors |= (tf->device & 0x0f) << 24;
1137     sectors |= (tf->lbah & 0xff) << 16;
1138     sectors |= (tf->lbam & 0xff) << 8;
1139     sectors |= (tf->lbal & 0xff);
1140 
1141     return sectors;
1142 }
1143 
1144 /**
1145  *  ata_read_native_max_address - Read native max address
1146  *  @dev: target device
1147  *  @max_sectors: out parameter for the result native max address
1148  *
1149  *  Perform an LBA48 or LBA28 native size query upon the device in
1150  *  question.
1151  *
1152  *  RETURNS:
1153  *  0 on success, -EACCES if command is aborted by the drive.
1154  *  -EIO on other errors.
1155  */
1156 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1157 {
1158     unsigned int err_mask;
1159     struct ata_taskfile tf;
1160     int lba48 = ata_id_has_lba48(dev->id);
1161 
1162     ata_tf_init(dev, &tf);
1163 
1164     /* always clear all address registers */
1165     tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1166 
1167     if (lba48) {
1168         tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1169         tf.flags |= ATA_TFLAG_LBA48;
1170     } else
1171         tf.command = ATA_CMD_READ_NATIVE_MAX;
1172 
1173     tf.protocol = ATA_PROT_NODATA;
1174     tf.device |= ATA_LBA;
1175 
1176     err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1177     if (err_mask) {
1178         ata_dev_warn(dev,
1179                  "failed to read native max address (err_mask=0x%x)\n",
1180                  err_mask);
1181         if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
1182             return -EACCES;
1183         return -EIO;
1184     }
1185 
1186     if (lba48)
1187         *max_sectors = ata_tf_to_lba48(&tf) + 1;
1188     else
1189         *max_sectors = ata_tf_to_lba(&tf) + 1;
1190     if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1191         (*max_sectors)--;
1192     return 0;
1193 }
1194 
1195 /**
1196  *  ata_set_max_sectors - Set max sectors
1197  *  @dev: target device
1198  *  @new_sectors: new max sectors value to set for the device
1199  *
1200  *  Set max sectors of @dev to @new_sectors.
1201  *
1202  *  RETURNS:
1203  *  0 on success, -EACCES if command is aborted or denied (due to
1204  *  previous non-volatile SET_MAX) by the drive.  -EIO on other
1205  *  errors.
1206  */
1207 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1208 {
1209     unsigned int err_mask;
1210     struct ata_taskfile tf;
1211     int lba48 = ata_id_has_lba48(dev->id);
1212 
1213     new_sectors--;
1214 
1215     ata_tf_init(dev, &tf);
1216 
1217     tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1218 
1219     if (lba48) {
1220         tf.command = ATA_CMD_SET_MAX_EXT;
1221         tf.flags |= ATA_TFLAG_LBA48;
1222 
1223         tf.hob_lbal = (new_sectors >> 24) & 0xff;
1224         tf.hob_lbam = (new_sectors >> 32) & 0xff;
1225         tf.hob_lbah = (new_sectors >> 40) & 0xff;
1226     } else {
1227         tf.command = ATA_CMD_SET_MAX;
1228 
1229         tf.device |= (new_sectors >> 24) & 0xf;
1230     }
1231 
1232     tf.protocol = ATA_PROT_NODATA;
1233     tf.device |= ATA_LBA;
1234 
1235     tf.lbal = (new_sectors >> 0) & 0xff;
1236     tf.lbam = (new_sectors >> 8) & 0xff;
1237     tf.lbah = (new_sectors >> 16) & 0xff;
1238 
1239     err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1240     if (err_mask) {
1241         ata_dev_warn(dev,
1242                  "failed to set max address (err_mask=0x%x)\n",
1243                  err_mask);
1244         if (err_mask == AC_ERR_DEV &&
1245             (tf.error & (ATA_ABORTED | ATA_IDNF)))
1246             return -EACCES;
1247         return -EIO;
1248     }
1249 
1250     return 0;
1251 }
1252 
1253 /**
1254  *  ata_hpa_resize      -   Resize a device with an HPA set
1255  *  @dev: Device to resize
1256  *
1257  *  Read the size of an LBA28 or LBA48 disk with HPA features and resize
1258  *  it if required to the full size of the media. The caller must check
1259  *  the drive has the HPA feature set enabled.
1260  *
1261  *  RETURNS:
1262  *  0 on success, -errno on failure.
1263  */
1264 static int ata_hpa_resize(struct ata_device *dev)
1265 {
1266     bool print_info = ata_dev_print_info(dev);
1267     bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1268     u64 sectors = ata_id_n_sectors(dev->id);
1269     u64 native_sectors;
1270     int rc;
1271 
1272     /* do we need to do it? */
1273     if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1274         !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1275         (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1276         return 0;
1277 
1278     /* read native max address */
1279     rc = ata_read_native_max_address(dev, &native_sectors);
1280     if (rc) {
1281         /* If device aborted the command or HPA isn't going to
1282          * be unlocked, skip HPA resizing.
1283          */
1284         if (rc == -EACCES || !unlock_hpa) {
1285             ata_dev_warn(dev,
1286                      "HPA support seems broken, skipping HPA handling\n");
1287             dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1288 
1289             /* we can continue if device aborted the command */
1290             if (rc == -EACCES)
1291                 rc = 0;
1292         }
1293 
1294         return rc;
1295     }
1296     dev->n_native_sectors = native_sectors;
1297 
1298     /* nothing to do? */
1299     if (native_sectors <= sectors || !unlock_hpa) {
1300         if (!print_info || native_sectors == sectors)
1301             return 0;
1302 
1303         if (native_sectors > sectors)
1304             ata_dev_info(dev,
1305                 "HPA detected: current %llu, native %llu\n",
1306                 (unsigned long long)sectors,
1307                 (unsigned long long)native_sectors);
1308         else if (native_sectors < sectors)
1309             ata_dev_warn(dev,
1310                 "native sectors (%llu) is smaller than sectors (%llu)\n",
1311                 (unsigned long long)native_sectors,
1312                 (unsigned long long)sectors);
1313         return 0;
1314     }
1315 
1316     /* let's unlock HPA */
1317     rc = ata_set_max_sectors(dev, native_sectors);
1318     if (rc == -EACCES) {
1319         /* if device aborted the command, skip HPA resizing */
1320         ata_dev_warn(dev,
1321                  "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1322                  (unsigned long long)sectors,
1323                  (unsigned long long)native_sectors);
1324         dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1325         return 0;
1326     } else if (rc)
1327         return rc;
1328 
1329     /* re-read IDENTIFY data */
1330     rc = ata_dev_reread_id(dev, 0);
1331     if (rc) {
1332         ata_dev_err(dev,
1333                 "failed to re-read IDENTIFY data after HPA resizing\n");
1334         return rc;
1335     }
1336 
1337     if (print_info) {
1338         u64 new_sectors = ata_id_n_sectors(dev->id);
1339         ata_dev_info(dev,
1340             "HPA unlocked: %llu -> %llu, native %llu\n",
1341             (unsigned long long)sectors,
1342             (unsigned long long)new_sectors,
1343             (unsigned long long)native_sectors);
1344     }
1345 
1346     return 0;
1347 }
1348 
1349 /**
1350  *  ata_dump_id - IDENTIFY DEVICE info debugging output
1351  *  @dev: device from which the information is fetched
1352  *  @id: IDENTIFY DEVICE page to dump
1353  *
1354  *  Dump selected 16-bit words from the given IDENTIFY DEVICE
1355  *  page.
1356  *
1357  *  LOCKING:
1358  *  caller.
1359  */
1360 
1361 static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1362 {
1363     ata_dev_dbg(dev,
1364         "49==0x%04x  53==0x%04x  63==0x%04x  64==0x%04x  75==0x%04x\n"
1365         "80==0x%04x  81==0x%04x  82==0x%04x  83==0x%04x  84==0x%04x\n"
1366         "88==0x%04x  93==0x%04x\n",
1367         id[49], id[53], id[63], id[64], id[75], id[80],
1368         id[81], id[82], id[83], id[84], id[88], id[93]);
1369 }
1370 
1371 /**
1372  *  ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1373  *  @id: IDENTIFY data to compute xfer mask from
1374  *
1375  *  Compute the xfermask for this device. This is not as trivial
1376  *  as it seems if we must consider early devices correctly.
1377  *
1378  *  FIXME: pre IDE drive timing (do we care ?).
1379  *
1380  *  LOCKING:
1381  *  None.
1382  *
1383  *  RETURNS:
1384  *  Computed xfermask
1385  */
1386 unsigned int ata_id_xfermask(const u16 *id)
1387 {
1388     unsigned int pio_mask, mwdma_mask, udma_mask;
1389 
1390     /* Usual case. Word 53 indicates word 64 is valid */
1391     if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1392         pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1393         pio_mask <<= 3;
1394         pio_mask |= 0x7;
1395     } else {
1396         /* If word 64 isn't valid then Word 51 high byte holds
1397          * the PIO timing number for the maximum. Turn it into
1398          * a mask.
1399          */
1400         u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1401         if (mode < 5)   /* Valid PIO range */
1402             pio_mask = (2 << mode) - 1;
1403         else
1404             pio_mask = 1;
1405 
1406         /* But wait.. there's more. Design your standards by
1407          * committee and you too can get a free iordy field to
1408          * process. However it is the speeds not the modes that
1409          * are supported... Note drivers using the timing API
1410          * will get this right anyway
1411          */
1412     }
1413 
1414     mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1415 
1416     if (ata_id_is_cfa(id)) {
1417         /*
1418          *  Process compact flash extended modes
1419          */
1420         int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1421         int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1422 
1423         if (pio)
1424             pio_mask |= (1 << 5);
1425         if (pio > 1)
1426             pio_mask |= (1 << 6);
1427         if (dma)
1428             mwdma_mask |= (1 << 3);
1429         if (dma > 1)
1430             mwdma_mask |= (1 << 4);
1431     }
1432 
1433     udma_mask = 0;
1434     if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1435         udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1436 
1437     return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1438 }
1439 EXPORT_SYMBOL_GPL(ata_id_xfermask);
1440 
1441 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1442 {
1443     struct completion *waiting = qc->private_data;
1444 
1445     complete(waiting);
1446 }
1447 
1448 /**
1449  *  ata_exec_internal_sg - execute libata internal command
1450  *  @dev: Device to which the command is sent
1451  *  @tf: Taskfile registers for the command and the result
1452  *  @cdb: CDB for packet command
1453  *  @dma_dir: Data transfer direction of the command
1454  *  @sgl: sg list for the data buffer of the command
1455  *  @n_elem: Number of sg entries
1456  *  @timeout: Timeout in msecs (0 for default)
1457  *
1458  *  Executes libata internal command with timeout.  @tf contains
1459  *  command on entry and result on return.  Timeout and error
1460  *  conditions are reported via return value.  No recovery action
1461  *  is taken after a command times out.  It's caller's duty to
1462  *  clean up after timeout.
1463  *
1464  *  LOCKING:
1465  *  None.  Should be called with kernel context, might sleep.
1466  *
1467  *  RETURNS:
1468  *  Zero on success, AC_ERR_* mask on failure
1469  */
1470 static unsigned ata_exec_internal_sg(struct ata_device *dev,
1471                      struct ata_taskfile *tf, const u8 *cdb,
1472                      int dma_dir, struct scatterlist *sgl,
1473                      unsigned int n_elem, unsigned int timeout)
1474 {
1475     struct ata_link *link = dev->link;
1476     struct ata_port *ap = link->ap;
1477     u8 command = tf->command;
1478     int auto_timeout = 0;
1479     struct ata_queued_cmd *qc;
1480     unsigned int preempted_tag;
1481     u32 preempted_sactive;
1482     u64 preempted_qc_active;
1483     int preempted_nr_active_links;
1484     DECLARE_COMPLETION_ONSTACK(wait);
1485     unsigned long flags;
1486     unsigned int err_mask;
1487     int rc;
1488 
1489     spin_lock_irqsave(ap->lock, flags);
1490 
1491     /* no internal command while frozen */
1492     if (ap->pflags & ATA_PFLAG_FROZEN) {
1493         spin_unlock_irqrestore(ap->lock, flags);
1494         return AC_ERR_SYSTEM;
1495     }
1496 
1497     /* initialize internal qc */
1498     qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1499 
1500     qc->tag = ATA_TAG_INTERNAL;
1501     qc->hw_tag = 0;
1502     qc->scsicmd = NULL;
1503     qc->ap = ap;
1504     qc->dev = dev;
1505     ata_qc_reinit(qc);
1506 
1507     preempted_tag = link->active_tag;
1508     preempted_sactive = link->sactive;
1509     preempted_qc_active = ap->qc_active;
1510     preempted_nr_active_links = ap->nr_active_links;
1511     link->active_tag = ATA_TAG_POISON;
1512     link->sactive = 0;
1513     ap->qc_active = 0;
1514     ap->nr_active_links = 0;
1515 
1516     /* prepare & issue qc */
1517     qc->tf = *tf;
1518     if (cdb)
1519         memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1520 
1521     /* some SATA bridges need us to indicate data xfer direction */
1522     if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1523         dma_dir == DMA_FROM_DEVICE)
1524         qc->tf.feature |= ATAPI_DMADIR;
1525 
1526     qc->flags |= ATA_QCFLAG_RESULT_TF;
1527     qc->dma_dir = dma_dir;
1528     if (dma_dir != DMA_NONE) {
1529         unsigned int i, buflen = 0;
1530         struct scatterlist *sg;
1531 
1532         for_each_sg(sgl, sg, n_elem, i)
1533             buflen += sg->length;
1534 
1535         ata_sg_init(qc, sgl, n_elem);
1536         qc->nbytes = buflen;
1537     }
1538 
1539     qc->private_data = &wait;
1540     qc->complete_fn = ata_qc_complete_internal;
1541 
1542     ata_qc_issue(qc);
1543 
1544     spin_unlock_irqrestore(ap->lock, flags);
1545 
1546     if (!timeout) {
1547         if (ata_probe_timeout)
1548             timeout = ata_probe_timeout * 1000;
1549         else {
1550             timeout = ata_internal_cmd_timeout(dev, command);
1551             auto_timeout = 1;
1552         }
1553     }
1554 
1555     if (ap->ops->error_handler)
1556         ata_eh_release(ap);
1557 
1558     rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1559 
1560     if (ap->ops->error_handler)
1561         ata_eh_acquire(ap);
1562 
1563     ata_sff_flush_pio_task(ap);
1564 
1565     if (!rc) {
1566         spin_lock_irqsave(ap->lock, flags);
1567 
1568         /* We're racing with irq here.  If we lose, the
1569          * following test prevents us from completing the qc
1570          * twice.  If we win, the port is frozen and will be
1571          * cleaned up by ->post_internal_cmd().
1572          */
1573         if (qc->flags & ATA_QCFLAG_ACTIVE) {
1574             qc->err_mask |= AC_ERR_TIMEOUT;
1575 
1576             if (ap->ops->error_handler)
1577                 ata_port_freeze(ap);
1578             else
1579                 ata_qc_complete(qc);
1580 
1581             ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1582                      command);
1583         }
1584 
1585         spin_unlock_irqrestore(ap->lock, flags);
1586     }
1587 
1588     /* do post_internal_cmd */
1589     if (ap->ops->post_internal_cmd)
1590         ap->ops->post_internal_cmd(qc);
1591 
1592     /* perform minimal error analysis */
1593     if (qc->flags & ATA_QCFLAG_FAILED) {
1594         if (qc->result_tf.status & (ATA_ERR | ATA_DF))
1595             qc->err_mask |= AC_ERR_DEV;
1596 
1597         if (!qc->err_mask)
1598             qc->err_mask |= AC_ERR_OTHER;
1599 
1600         if (qc->err_mask & ~AC_ERR_OTHER)
1601             qc->err_mask &= ~AC_ERR_OTHER;
1602     } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1603         qc->result_tf.status |= ATA_SENSE;
1604     }
1605 
1606     /* finish up */
1607     spin_lock_irqsave(ap->lock, flags);
1608 
1609     *tf = qc->result_tf;
1610     err_mask = qc->err_mask;
1611 
1612     ata_qc_free(qc);
1613     link->active_tag = preempted_tag;
1614     link->sactive = preempted_sactive;
1615     ap->qc_active = preempted_qc_active;
1616     ap->nr_active_links = preempted_nr_active_links;
1617 
1618     spin_unlock_irqrestore(ap->lock, flags);
1619 
1620     if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1621         ata_internal_cmd_timed_out(dev, command);
1622 
1623     return err_mask;
1624 }
1625 
1626 /**
1627  *  ata_exec_internal - execute libata internal command
1628  *  @dev: Device to which the command is sent
1629  *  @tf: Taskfile registers for the command and the result
1630  *  @cdb: CDB for packet command
1631  *  @dma_dir: Data transfer direction of the command
1632  *  @buf: Data buffer of the command
1633  *  @buflen: Length of data buffer
1634  *  @timeout: Timeout in msecs (0 for default)
1635  *
1636  *  Wrapper around ata_exec_internal_sg() which takes simple
1637  *  buffer instead of sg list.
1638  *
1639  *  LOCKING:
1640  *  None.  Should be called with kernel context, might sleep.
1641  *
1642  *  RETURNS:
1643  *  Zero on success, AC_ERR_* mask on failure
1644  */
1645 unsigned ata_exec_internal(struct ata_device *dev,
1646                struct ata_taskfile *tf, const u8 *cdb,
1647                int dma_dir, void *buf, unsigned int buflen,
1648                unsigned int timeout)
1649 {
1650     struct scatterlist *psg = NULL, sg;
1651     unsigned int n_elem = 0;
1652 
1653     if (dma_dir != DMA_NONE) {
1654         WARN_ON(!buf);
1655         sg_init_one(&sg, buf, buflen);
1656         psg = &sg;
1657         n_elem++;
1658     }
1659 
1660     return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1661                     timeout);
1662 }
1663 
1664 /**
1665  *  ata_pio_need_iordy  -   check if iordy needed
1666  *  @adev: ATA device
1667  *
1668  *  Check if the current speed of the device requires IORDY. Used
1669  *  by various controllers for chip configuration.
1670  */
1671 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1672 {
1673     /* Don't set IORDY if we're preparing for reset.  IORDY may
1674      * lead to controller lock up on certain controllers if the
1675      * port is not occupied.  See bko#11703 for details.
1676      */
1677     if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1678         return 0;
1679     /* Controller doesn't support IORDY.  Probably a pointless
1680      * check as the caller should know this.
1681      */
1682     if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1683         return 0;
1684     /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1685     if (ata_id_is_cfa(adev->id)
1686         && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1687         return 0;
1688     /* PIO3 and higher it is mandatory */
1689     if (adev->pio_mode > XFER_PIO_2)
1690         return 1;
1691     /* We turn it on when possible */
1692     if (ata_id_has_iordy(adev->id))
1693         return 1;
1694     return 0;
1695 }
1696 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1697 
1698 /**
1699  *  ata_pio_mask_no_iordy   -   Return the non IORDY mask
1700  *  @adev: ATA device
1701  *
1702  *  Compute the highest mode possible if we are not using iordy. Return
1703  *  -1 if no iordy mode is available.
1704  */
1705 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1706 {
1707     /* If we have no drive specific rule, then PIO 2 is non IORDY */
1708     if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1709         u16 pio = adev->id[ATA_ID_EIDE_PIO];
1710         /* Is the speed faster than the drive allows non IORDY ? */
1711         if (pio) {
1712             /* This is cycle times not frequency - watch the logic! */
1713             if (pio > 240)  /* PIO2 is 240nS per cycle */
1714                 return 3 << ATA_SHIFT_PIO;
1715             return 7 << ATA_SHIFT_PIO;
1716         }
1717     }
1718     return 3 << ATA_SHIFT_PIO;
1719 }
1720 
1721 /**
1722  *  ata_do_dev_read_id      -   default ID read method
1723  *  @dev: device
1724  *  @tf: proposed taskfile
1725  *  @id: data buffer
1726  *
1727  *  Issue the identify taskfile and hand back the buffer containing
1728  *  identify data. For some RAID controllers and for pre ATA devices
1729  *  this function is wrapped or replaced by the driver
1730  */
1731 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1732                 struct ata_taskfile *tf, __le16 *id)
1733 {
1734     return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1735                      id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1736 }
1737 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1738 
1739 /**
1740  *  ata_dev_read_id - Read ID data from the specified device
1741  *  @dev: target device
1742  *  @p_class: pointer to class of the target device (may be changed)
1743  *  @flags: ATA_READID_* flags
1744  *  @id: buffer to read IDENTIFY data into
1745  *
1746  *  Read ID data from the specified device.  ATA_CMD_ID_ATA is
1747  *  performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1748  *  devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1749  *  for pre-ATA4 drives.
1750  *
1751  *  FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1752  *  now we abort if we hit that case.
1753  *
1754  *  LOCKING:
1755  *  Kernel thread context (may sleep)
1756  *
1757  *  RETURNS:
1758  *  0 on success, -errno otherwise.
1759  */
1760 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1761             unsigned int flags, u16 *id)
1762 {
1763     struct ata_port *ap = dev->link->ap;
1764     unsigned int class = *p_class;
1765     struct ata_taskfile tf;
1766     unsigned int err_mask = 0;
1767     const char *reason;
1768     bool is_semb = class == ATA_DEV_SEMB;
1769     int may_fallback = 1, tried_spinup = 0;
1770     int rc;
1771 
1772 retry:
1773     ata_tf_init(dev, &tf);
1774 
1775     switch (class) {
1776     case ATA_DEV_SEMB:
1777         class = ATA_DEV_ATA;    /* some hard drives report SEMB sig */
1778         fallthrough;
1779     case ATA_DEV_ATA:
1780     case ATA_DEV_ZAC:
1781         tf.command = ATA_CMD_ID_ATA;
1782         break;
1783     case ATA_DEV_ATAPI:
1784         tf.command = ATA_CMD_ID_ATAPI;
1785         break;
1786     default:
1787         rc = -ENODEV;
1788         reason = "unsupported class";
1789         goto err_out;
1790     }
1791 
1792     tf.protocol = ATA_PROT_PIO;
1793 
1794     /* Some devices choke if TF registers contain garbage.  Make
1795      * sure those are properly initialized.
1796      */
1797     tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1798 
1799     /* Device presence detection is unreliable on some
1800      * controllers.  Always poll IDENTIFY if available.
1801      */
1802     tf.flags |= ATA_TFLAG_POLLING;
1803 
1804     if (ap->ops->read_id)
1805         err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
1806     else
1807         err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
1808 
1809     if (err_mask) {
1810         if (err_mask & AC_ERR_NODEV_HINT) {
1811             ata_dev_dbg(dev, "NODEV after polling detection\n");
1812             return -ENOENT;
1813         }
1814 
1815         if (is_semb) {
1816             ata_dev_info(dev,
1817              "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1818             /* SEMB is not supported yet */
1819             *p_class = ATA_DEV_SEMB_UNSUP;
1820             return 0;
1821         }
1822 
1823         if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1824             /* Device or controller might have reported
1825              * the wrong device class.  Give a shot at the
1826              * other IDENTIFY if the current one is
1827              * aborted by the device.
1828              */
1829             if (may_fallback) {
1830                 may_fallback = 0;
1831 
1832                 if (class == ATA_DEV_ATA)
1833                     class = ATA_DEV_ATAPI;
1834                 else
1835                     class = ATA_DEV_ATA;
1836                 goto retry;
1837             }
1838 
1839             /* Control reaches here iff the device aborted
1840              * both flavors of IDENTIFYs which happens
1841              * sometimes with phantom devices.
1842              */
1843             ata_dev_dbg(dev,
1844                     "both IDENTIFYs aborted, assuming NODEV\n");
1845             return -ENOENT;
1846         }
1847 
1848         rc = -EIO;
1849         reason = "I/O error";
1850         goto err_out;
1851     }
1852 
1853     if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1854         ata_dev_info(dev, "dumping IDENTIFY data, "
1855                 "class=%d may_fallback=%d tried_spinup=%d\n",
1856                 class, may_fallback, tried_spinup);
1857         print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1858                    16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1859     }
1860 
1861     /* Falling back doesn't make sense if ID data was read
1862      * successfully at least once.
1863      */
1864     may_fallback = 0;
1865 
1866     swap_buf_le16(id, ATA_ID_WORDS);
1867 
1868     /* sanity check */
1869     rc = -EINVAL;
1870     reason = "device reports invalid type";
1871 
1872     if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1873         if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1874             goto err_out;
1875         if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1876                             ata_id_is_ata(id)) {
1877             ata_dev_dbg(dev,
1878                 "host indicates ignore ATA devices, ignored\n");
1879             return -ENOENT;
1880         }
1881     } else {
1882         if (ata_id_is_ata(id))
1883             goto err_out;
1884     }
1885 
1886     if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1887         tried_spinup = 1;
1888         /*
1889          * Drive powered-up in standby mode, and requires a specific
1890          * SET_FEATURES spin-up subcommand before it will accept
1891          * anything other than the original IDENTIFY command.
1892          */
1893         err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1894         if (err_mask && id[2] != 0x738c) {
1895             rc = -EIO;
1896             reason = "SPINUP failed";
1897             goto err_out;
1898         }
1899         /*
1900          * If the drive initially returned incomplete IDENTIFY info,
1901          * we now must reissue the IDENTIFY command.
1902          */
1903         if (id[2] == 0x37c8)
1904             goto retry;
1905     }
1906 
1907     if ((flags & ATA_READID_POSTRESET) &&
1908         (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1909         /*
1910          * The exact sequence expected by certain pre-ATA4 drives is:
1911          * SRST RESET
1912          * IDENTIFY (optional in early ATA)
1913          * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1914          * anything else..
1915          * Some drives were very specific about that exact sequence.
1916          *
1917          * Note that ATA4 says lba is mandatory so the second check
1918          * should never trigger.
1919          */
1920         if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1921             err_mask = ata_dev_init_params(dev, id[3], id[6]);
1922             if (err_mask) {
1923                 rc = -EIO;
1924                 reason = "INIT_DEV_PARAMS failed";
1925                 goto err_out;
1926             }
1927 
1928             /* current CHS translation info (id[53-58]) might be
1929              * changed. reread the identify device info.
1930              */
1931             flags &= ~ATA_READID_POSTRESET;
1932             goto retry;
1933         }
1934     }
1935 
1936     *p_class = class;
1937 
1938     return 0;
1939 
1940  err_out:
1941     ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1942              reason, err_mask);
1943     return rc;
1944 }
1945 
1946 /**
1947  *  ata_read_log_page - read a specific log page
1948  *  @dev: target device
1949  *  @log: log to read
1950  *  @page: page to read
1951  *  @buf: buffer to store read page
1952  *  @sectors: number of sectors to read
1953  *
1954  *  Read log page using READ_LOG_EXT command.
1955  *
1956  *  LOCKING:
1957  *  Kernel thread context (may sleep).
1958  *
1959  *  RETURNS:
1960  *  0 on success, AC_ERR_* mask otherwise.
1961  */
1962 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1963                    u8 page, void *buf, unsigned int sectors)
1964 {
1965     unsigned long ap_flags = dev->link->ap->flags;
1966     struct ata_taskfile tf;
1967     unsigned int err_mask;
1968     bool dma = false;
1969 
1970     ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
1971 
1972     /*
1973      * Return error without actually issuing the command on controllers
1974      * which e.g. lockup on a read log page.
1975      */
1976     if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1977         return AC_ERR_DEV;
1978 
1979 retry:
1980     ata_tf_init(dev, &tf);
1981     if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
1982         !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
1983         tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1984         tf.protocol = ATA_PROT_DMA;
1985         dma = true;
1986     } else {
1987         tf.command = ATA_CMD_READ_LOG_EXT;
1988         tf.protocol = ATA_PROT_PIO;
1989         dma = false;
1990     }
1991     tf.lbal = log;
1992     tf.lbam = page;
1993     tf.nsect = sectors;
1994     tf.hob_nsect = sectors >> 8;
1995     tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1996 
1997     err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1998                      buf, sectors * ATA_SECT_SIZE, 0);
1999 
2000     if (err_mask) {
2001         if (dma) {
2002             dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2003             goto retry;
2004         }
2005         ata_dev_err(dev,
2006                 "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2007                 (unsigned int)log, (unsigned int)page, err_mask);
2008     }
2009 
2010     return err_mask;
2011 }
2012 
2013 static int ata_log_supported(struct ata_device *dev, u8 log)
2014 {
2015     struct ata_port *ap = dev->link->ap;
2016 
2017     if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
2018         return 0;
2019 
2020     if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2021         return 0;
2022     return get_unaligned_le16(&ap->sector_buf[log * 2]);
2023 }
2024 
2025 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2026 {
2027     struct ata_port *ap = dev->link->ap;
2028     unsigned int err, i;
2029 
2030     if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2031         return false;
2032 
2033     if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2034         /*
2035          * IDENTIFY DEVICE data log is defined as mandatory starting
2036          * with ACS-3 (ATA version 10). Warn about the missing log
2037          * for drives which implement this ATA level or above.
2038          */
2039         if (ata_id_major_version(dev->id) >= 10)
2040             ata_dev_warn(dev,
2041                 "ATA Identify Device Log not supported\n");
2042         dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
2043         return false;
2044     }
2045 
2046     /*
2047      * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2048      * supported.
2049      */
2050     err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2051                 1);
2052     if (err)
2053         return false;
2054 
2055     for (i = 0; i < ap->sector_buf[8]; i++) {
2056         if (ap->sector_buf[9 + i] == page)
2057             return true;
2058     }
2059 
2060     return false;
2061 }
2062 
2063 static int ata_do_link_spd_horkage(struct ata_device *dev)
2064 {
2065     struct ata_link *plink = ata_dev_phys_link(dev);
2066     u32 target, target_limit;
2067 
2068     if (!sata_scr_valid(plink))
2069         return 0;
2070 
2071     if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2072         target = 1;
2073     else
2074         return 0;
2075 
2076     target_limit = (1 << target) - 1;
2077 
2078     /* if already on stricter limit, no need to push further */
2079     if (plink->sata_spd_limit <= target_limit)
2080         return 0;
2081 
2082     plink->sata_spd_limit = target_limit;
2083 
2084     /* Request another EH round by returning -EAGAIN if link is
2085      * going faster than the target speed.  Forward progress is
2086      * guaranteed by setting sata_spd_limit to target_limit above.
2087      */
2088     if (plink->sata_spd > target) {
2089         ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2090                  sata_spd_string(target));
2091         return -EAGAIN;
2092     }
2093     return 0;
2094 }
2095 
2096 static inline u8 ata_dev_knobble(struct ata_device *dev)
2097 {
2098     struct ata_port *ap = dev->link->ap;
2099 
2100     if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2101         return 0;
2102 
2103     return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2104 }
2105 
2106 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2107 {
2108     struct ata_port *ap = dev->link->ap;
2109     unsigned int err_mask;
2110 
2111     if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2112         ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2113         return;
2114     }
2115     err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2116                      0, ap->sector_buf, 1);
2117     if (!err_mask) {
2118         u8 *cmds = dev->ncq_send_recv_cmds;
2119 
2120         dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2121         memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2122 
2123         if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2124             ata_dev_dbg(dev, "disabling queued TRIM support\n");
2125             cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2126                 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2127         }
2128     }
2129 }
2130 
2131 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2132 {
2133     struct ata_port *ap = dev->link->ap;
2134     unsigned int err_mask;
2135 
2136     if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2137         ata_dev_warn(dev,
2138                  "NCQ Send/Recv Log not supported\n");
2139         return;
2140     }
2141     err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2142                      0, ap->sector_buf, 1);
2143     if (!err_mask) {
2144         u8 *cmds = dev->ncq_non_data_cmds;
2145 
2146         memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2147     }
2148 }
2149 
2150 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2151 {
2152     struct ata_port *ap = dev->link->ap;
2153     unsigned int err_mask;
2154 
2155     if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2156         return;
2157 
2158     err_mask = ata_read_log_page(dev,
2159                      ATA_LOG_IDENTIFY_DEVICE,
2160                      ATA_LOG_SATA_SETTINGS,
2161                      ap->sector_buf,
2162                      1);
2163     if (err_mask)
2164         goto not_supported;
2165 
2166     if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2167         goto not_supported;
2168 
2169     dev->flags |= ATA_DFLAG_NCQ_PRIO;
2170 
2171     return;
2172 
2173 not_supported:
2174     dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
2175     dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2176 }
2177 
2178 static bool ata_dev_check_adapter(struct ata_device *dev,
2179                   unsigned short vendor_id)
2180 {
2181     struct pci_dev *pcidev = NULL;
2182     struct device *parent_dev = NULL;
2183 
2184     for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2185          parent_dev = parent_dev->parent) {
2186         if (dev_is_pci(parent_dev)) {
2187             pcidev = to_pci_dev(parent_dev);
2188             if (pcidev->vendor == vendor_id)
2189                 return true;
2190             break;
2191         }
2192     }
2193 
2194     return false;
2195 }
2196 
2197 static int ata_dev_config_ncq(struct ata_device *dev,
2198                    char *desc, size_t desc_sz)
2199 {
2200     struct ata_port *ap = dev->link->ap;
2201     int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2202     unsigned int err_mask;
2203     char *aa_desc = "";
2204 
2205     if (!ata_id_has_ncq(dev->id)) {
2206         desc[0] = '\0';
2207         return 0;
2208     }
2209     if (!IS_ENABLED(CONFIG_SATA_HOST))
2210         return 0;
2211     if (dev->horkage & ATA_HORKAGE_NONCQ) {
2212         snprintf(desc, desc_sz, "NCQ (not used)");
2213         return 0;
2214     }
2215 
2216     if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2217         ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2218         snprintf(desc, desc_sz, "NCQ (not used)");
2219         return 0;
2220     }
2221 
2222     if (ap->flags & ATA_FLAG_NCQ) {
2223         hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2224         dev->flags |= ATA_DFLAG_NCQ;
2225     }
2226 
2227     if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2228         (ap->flags & ATA_FLAG_FPDMA_AA) &&
2229         ata_id_has_fpdma_aa(dev->id)) {
2230         err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2231             SATA_FPDMA_AA);
2232         if (err_mask) {
2233             ata_dev_err(dev,
2234                     "failed to enable AA (error_mask=0x%x)\n",
2235                     err_mask);
2236             if (err_mask != AC_ERR_DEV) {
2237                 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2238                 return -EIO;
2239             }
2240         } else
2241             aa_desc = ", AA";
2242     }
2243 
2244     if (hdepth >= ddepth)
2245         snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2246     else
2247         snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2248             ddepth, aa_desc);
2249 
2250     if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2251         if (ata_id_has_ncq_send_and_recv(dev->id))
2252             ata_dev_config_ncq_send_recv(dev);
2253         if (ata_id_has_ncq_non_data(dev->id))
2254             ata_dev_config_ncq_non_data(dev);
2255         if (ata_id_has_ncq_prio(dev->id))
2256             ata_dev_config_ncq_prio(dev);
2257     }
2258 
2259     return 0;
2260 }
2261 
2262 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2263 {
2264     unsigned int err_mask;
2265 
2266     if (!ata_id_has_sense_reporting(dev->id))
2267         return;
2268 
2269     if (ata_id_sense_reporting_enabled(dev->id))
2270         return;
2271 
2272     err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2273     if (err_mask) {
2274         ata_dev_dbg(dev,
2275                 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2276                 err_mask);
2277     }
2278 }
2279 
2280 static void ata_dev_config_zac(struct ata_device *dev)
2281 {
2282     struct ata_port *ap = dev->link->ap;
2283     unsigned int err_mask;
2284     u8 *identify_buf = ap->sector_buf;
2285 
2286     dev->zac_zones_optimal_open = U32_MAX;
2287     dev->zac_zones_optimal_nonseq = U32_MAX;
2288     dev->zac_zones_max_open = U32_MAX;
2289 
2290     /*
2291      * Always set the 'ZAC' flag for Host-managed devices.
2292      */
2293     if (dev->class == ATA_DEV_ZAC)
2294         dev->flags |= ATA_DFLAG_ZAC;
2295     else if (ata_id_zoned_cap(dev->id) == 0x01)
2296         /*
2297          * Check for host-aware devices.
2298          */
2299         dev->flags |= ATA_DFLAG_ZAC;
2300 
2301     if (!(dev->flags & ATA_DFLAG_ZAC))
2302         return;
2303 
2304     if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2305         ata_dev_warn(dev,
2306                  "ATA Zoned Information Log not supported\n");
2307         return;
2308     }
2309 
2310     /*
2311      * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2312      */
2313     err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2314                      ATA_LOG_ZONED_INFORMATION,
2315                      identify_buf, 1);
2316     if (!err_mask) {
2317         u64 zoned_cap, opt_open, opt_nonseq, max_open;
2318 
2319         zoned_cap = get_unaligned_le64(&identify_buf[8]);
2320         if ((zoned_cap >> 63))
2321             dev->zac_zoned_cap = (zoned_cap & 1);
2322         opt_open = get_unaligned_le64(&identify_buf[24]);
2323         if ((opt_open >> 63))
2324             dev->zac_zones_optimal_open = (u32)opt_open;
2325         opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2326         if ((opt_nonseq >> 63))
2327             dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2328         max_open = get_unaligned_le64(&identify_buf[40]);
2329         if ((max_open >> 63))
2330             dev->zac_zones_max_open = (u32)max_open;
2331     }
2332 }
2333 
2334 static void ata_dev_config_trusted(struct ata_device *dev)
2335 {
2336     struct ata_port *ap = dev->link->ap;
2337     u64 trusted_cap;
2338     unsigned int err;
2339 
2340     if (!ata_id_has_trusted(dev->id))
2341         return;
2342 
2343     if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2344         ata_dev_warn(dev,
2345                  "Security Log not supported\n");
2346         return;
2347     }
2348 
2349     err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2350             ap->sector_buf, 1);
2351     if (err)
2352         return;
2353 
2354     trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2355     if (!(trusted_cap & (1ULL << 63))) {
2356         ata_dev_dbg(dev,
2357                 "Trusted Computing capability qword not valid!\n");
2358         return;
2359     }
2360 
2361     if (trusted_cap & (1 << 0))
2362         dev->flags |= ATA_DFLAG_TRUSTED;
2363 }
2364 
2365 static int ata_dev_config_lba(struct ata_device *dev)
2366 {
2367     const u16 *id = dev->id;
2368     const char *lba_desc;
2369     char ncq_desc[24];
2370     int ret;
2371 
2372     dev->flags |= ATA_DFLAG_LBA;
2373 
2374     if (ata_id_has_lba48(id)) {
2375         lba_desc = "LBA48";
2376         dev->flags |= ATA_DFLAG_LBA48;
2377         if (dev->n_sectors >= (1UL << 28) &&
2378             ata_id_has_flush_ext(id))
2379             dev->flags |= ATA_DFLAG_FLUSH_EXT;
2380     } else {
2381         lba_desc = "LBA";
2382     }
2383 
2384     /* config NCQ */
2385     ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2386 
2387     /* print device info to dmesg */
2388     if (ata_dev_print_info(dev))
2389         ata_dev_info(dev,
2390                  "%llu sectors, multi %u: %s %s\n",
2391                  (unsigned long long)dev->n_sectors,
2392                  dev->multi_count, lba_desc, ncq_desc);
2393 
2394     return ret;
2395 }
2396 
2397 static void ata_dev_config_chs(struct ata_device *dev)
2398 {
2399     const u16 *id = dev->id;
2400 
2401     if (ata_id_current_chs_valid(id)) {
2402         /* Current CHS translation is valid. */
2403         dev->cylinders = id[54];
2404         dev->heads     = id[55];
2405         dev->sectors   = id[56];
2406     } else {
2407         /* Default translation */
2408         dev->cylinders  = id[1];
2409         dev->heads  = id[3];
2410         dev->sectors    = id[6];
2411     }
2412 
2413     /* print device info to dmesg */
2414     if (ata_dev_print_info(dev))
2415         ata_dev_info(dev,
2416                  "%llu sectors, multi %u, CHS %u/%u/%u\n",
2417                  (unsigned long long)dev->n_sectors,
2418                  dev->multi_count, dev->cylinders,
2419                  dev->heads, dev->sectors);
2420 }
2421 
2422 static void ata_dev_config_devslp(struct ata_device *dev)
2423 {
2424     u8 *sata_setting = dev->link->ap->sector_buf;
2425     unsigned int err_mask;
2426     int i, j;
2427 
2428     /*
2429      * Check device sleep capability. Get DevSlp timing variables
2430      * from SATA Settings page of Identify Device Data Log.
2431      */
2432     if (!ata_id_has_devslp(dev->id) ||
2433         !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2434         return;
2435 
2436     err_mask = ata_read_log_page(dev,
2437                      ATA_LOG_IDENTIFY_DEVICE,
2438                      ATA_LOG_SATA_SETTINGS,
2439                      sata_setting, 1);
2440     if (err_mask)
2441         return;
2442 
2443     dev->flags |= ATA_DFLAG_DEVSLP;
2444     for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2445         j = ATA_LOG_DEVSLP_OFFSET + i;
2446         dev->devslp_timing[i] = sata_setting[j];
2447     }
2448 }
2449 
2450 static void ata_dev_config_cpr(struct ata_device *dev)
2451 {
2452     unsigned int err_mask;
2453     size_t buf_len;
2454     int i, nr_cpr = 0;
2455     struct ata_cpr_log *cpr_log = NULL;
2456     u8 *desc, *buf = NULL;
2457 
2458     if (ata_id_major_version(dev->id) < 11)
2459         goto out;
2460 
2461     buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2462     if (buf_len == 0)
2463         goto out;
2464 
2465     /*
2466      * Read the concurrent positioning ranges log (0x47). We can have at
2467      * most 255 32B range descriptors plus a 64B header. This log varies in
2468      * size, so use the size reported in the GPL directory. Reading beyond
2469      * the supported length will result in an error.
2470      */
2471     buf_len <<= 9;
2472     buf = kzalloc(buf_len, GFP_KERNEL);
2473     if (!buf)
2474         goto out;
2475 
2476     err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2477                      0, buf, buf_len >> 9);
2478     if (err_mask)
2479         goto out;
2480 
2481     nr_cpr = buf[0];
2482     if (!nr_cpr)
2483         goto out;
2484 
2485     cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2486     if (!cpr_log)
2487         goto out;
2488 
2489     cpr_log->nr_cpr = nr_cpr;
2490     desc = &buf[64];
2491     for (i = 0; i < nr_cpr; i++, desc += 32) {
2492         cpr_log->cpr[i].num = desc[0];
2493         cpr_log->cpr[i].num_storage_elements = desc[1];
2494         cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2495         cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2496     }
2497 
2498 out:
2499     swap(dev->cpr_log, cpr_log);
2500     kfree(cpr_log);
2501     kfree(buf);
2502 }
2503 
2504 static void ata_dev_print_features(struct ata_device *dev)
2505 {
2506     if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2507         return;
2508 
2509     ata_dev_info(dev,
2510              "Features:%s%s%s%s%s%s\n",
2511              dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2512              dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2513              dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2514              dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
2515              dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2516              dev->cpr_log ? " CPR" : "");
2517 }
2518 
2519 /**
2520  *  ata_dev_configure - Configure the specified ATA/ATAPI device
2521  *  @dev: Target device to configure
2522  *
2523  *  Configure @dev according to @dev->id.  Generic and low-level
2524  *  driver specific fixups are also applied.
2525  *
2526  *  LOCKING:
2527  *  Kernel thread context (may sleep)
2528  *
2529  *  RETURNS:
2530  *  0 on success, -errno otherwise
2531  */
2532 int ata_dev_configure(struct ata_device *dev)
2533 {
2534     struct ata_port *ap = dev->link->ap;
2535     bool print_info = ata_dev_print_info(dev);
2536     const u16 *id = dev->id;
2537     unsigned int xfer_mask;
2538     unsigned int err_mask;
2539     char revbuf[7];     /* XYZ-99\0 */
2540     char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2541     char modelbuf[ATA_ID_PROD_LEN+1];
2542     int rc;
2543 
2544     if (!ata_dev_enabled(dev)) {
2545         ata_dev_dbg(dev, "no device\n");
2546         return 0;
2547     }
2548 
2549     /* set horkage */
2550     dev->horkage |= ata_dev_blacklisted(dev);
2551     ata_force_horkage(dev);
2552 
2553     if (dev->horkage & ATA_HORKAGE_DISABLE) {
2554         ata_dev_info(dev, "unsupported device, disabling\n");
2555         ata_dev_disable(dev);
2556         return 0;
2557     }
2558 
2559     if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2560         dev->class == ATA_DEV_ATAPI) {
2561         ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2562                  atapi_enabled ? "not supported with this driver"
2563                  : "disabled");
2564         ata_dev_disable(dev);
2565         return 0;
2566     }
2567 
2568     rc = ata_do_link_spd_horkage(dev);
2569     if (rc)
2570         return rc;
2571 
2572     /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2573     if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2574         (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2575         dev->horkage |= ATA_HORKAGE_NOLPM;
2576 
2577     if (ap->flags & ATA_FLAG_NO_LPM)
2578         dev->horkage |= ATA_HORKAGE_NOLPM;
2579 
2580     if (dev->horkage & ATA_HORKAGE_NOLPM) {
2581         ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2582         dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2583     }
2584 
2585     /* let ACPI work its magic */
2586     rc = ata_acpi_on_devcfg(dev);
2587     if (rc)
2588         return rc;
2589 
2590     /* massage HPA, do it early as it might change IDENTIFY data */
2591     rc = ata_hpa_resize(dev);
2592     if (rc)
2593         return rc;
2594 
2595     /* print device capabilities */
2596     ata_dev_dbg(dev,
2597             "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2598             "85:%04x 86:%04x 87:%04x 88:%04x\n",
2599             __func__,
2600             id[49], id[82], id[83], id[84],
2601             id[85], id[86], id[87], id[88]);
2602 
2603     /* initialize to-be-configured parameters */
2604     dev->flags &= ~ATA_DFLAG_CFG_MASK;
2605     dev->max_sectors = 0;
2606     dev->cdb_len = 0;
2607     dev->n_sectors = 0;
2608     dev->cylinders = 0;
2609     dev->heads = 0;
2610     dev->sectors = 0;
2611     dev->multi_count = 0;
2612 
2613     /*
2614      * common ATA, ATAPI feature tests
2615      */
2616 
2617     /* find max transfer mode; for printk only */
2618     xfer_mask = ata_id_xfermask(id);
2619 
2620     ata_dump_id(dev, id);
2621 
2622     /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2623     ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2624             sizeof(fwrevbuf));
2625 
2626     ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2627             sizeof(modelbuf));
2628 
2629     /* ATA-specific feature tests */
2630     if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2631         if (ata_id_is_cfa(id)) {
2632             /* CPRM may make this media unusable */
2633             if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2634                 ata_dev_warn(dev,
2635     "supports DRM functions and may not be fully accessible\n");
2636             snprintf(revbuf, 7, "CFA");
2637         } else {
2638             snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2639             /* Warn the user if the device has TPM extensions */
2640             if (ata_id_has_tpm(id))
2641                 ata_dev_warn(dev,
2642     "supports DRM functions and may not be fully accessible\n");
2643         }
2644 
2645         dev->n_sectors = ata_id_n_sectors(id);
2646 
2647         /* get current R/W Multiple count setting */
2648         if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2649             unsigned int max = dev->id[47] & 0xff;
2650             unsigned int cnt = dev->id[59] & 0xff;
2651             /* only recognize/allow powers of two here */
2652             if (is_power_of_2(max) && is_power_of_2(cnt))
2653                 if (cnt <= max)
2654                     dev->multi_count = cnt;
2655         }
2656 
2657         /* print device info to dmesg */
2658         if (print_info)
2659             ata_dev_info(dev, "%s: %s, %s, max %s\n",
2660                      revbuf, modelbuf, fwrevbuf,
2661                      ata_mode_string(xfer_mask));
2662 
2663         if (ata_id_has_lba(id)) {
2664             rc = ata_dev_config_lba(dev);
2665             if (rc)
2666                 return rc;
2667         } else {
2668             ata_dev_config_chs(dev);
2669         }
2670 
2671         ata_dev_config_devslp(dev);
2672         ata_dev_config_sense_reporting(dev);
2673         ata_dev_config_zac(dev);
2674         ata_dev_config_trusted(dev);
2675         ata_dev_config_cpr(dev);
2676         dev->cdb_len = 32;
2677 
2678         if (print_info)
2679             ata_dev_print_features(dev);
2680     }
2681 
2682     /* ATAPI-specific feature tests */
2683     else if (dev->class == ATA_DEV_ATAPI) {
2684         const char *cdb_intr_string = "";
2685         const char *atapi_an_string = "";
2686         const char *dma_dir_string = "";
2687         u32 sntf;
2688 
2689         rc = atapi_cdb_len(id);
2690         if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2691             ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
2692             rc = -EINVAL;
2693             goto err_out_nosup;
2694         }
2695         dev->cdb_len = (unsigned int) rc;
2696 
2697         /* Enable ATAPI AN if both the host and device have
2698          * the support.  If PMP is attached, SNTF is required
2699          * to enable ATAPI AN to discern between PHY status
2700          * changed notifications and ATAPI ANs.
2701          */
2702         if (atapi_an &&
2703             (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2704             (!sata_pmp_attached(ap) ||
2705              sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2706             /* issue SET feature command to turn this on */
2707             err_mask = ata_dev_set_feature(dev,
2708                     SETFEATURES_SATA_ENABLE, SATA_AN);
2709             if (err_mask)
2710                 ata_dev_err(dev,
2711                         "failed to enable ATAPI AN (err_mask=0x%x)\n",
2712                         err_mask);
2713             else {
2714                 dev->flags |= ATA_DFLAG_AN;
2715                 atapi_an_string = ", ATAPI AN";
2716             }
2717         }
2718 
2719         if (ata_id_cdb_intr(dev->id)) {
2720             dev->flags |= ATA_DFLAG_CDB_INTR;
2721             cdb_intr_string = ", CDB intr";
2722         }
2723 
2724         if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2725             dev->flags |= ATA_DFLAG_DMADIR;
2726             dma_dir_string = ", DMADIR";
2727         }
2728 
2729         if (ata_id_has_da(dev->id)) {
2730             dev->flags |= ATA_DFLAG_DA;
2731             zpodd_init(dev);
2732         }
2733 
2734         /* print device info to dmesg */
2735         if (print_info)
2736             ata_dev_info(dev,
2737                      "ATAPI: %s, %s, max %s%s%s%s\n",
2738                      modelbuf, fwrevbuf,
2739                      ata_mode_string(xfer_mask),
2740                      cdb_intr_string, atapi_an_string,
2741                      dma_dir_string);
2742     }
2743 
2744     /* determine max_sectors */
2745     dev->max_sectors = ATA_MAX_SECTORS;
2746     if (dev->flags & ATA_DFLAG_LBA48)
2747         dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2748 
2749     /* Limit PATA drive on SATA cable bridge transfers to udma5,
2750        200 sectors */
2751     if (ata_dev_knobble(dev)) {
2752         if (print_info)
2753             ata_dev_info(dev, "applying bridge limits\n");
2754         dev->udma_mask &= ATA_UDMA5;
2755         dev->max_sectors = ATA_MAX_SECTORS;
2756     }
2757 
2758     if ((dev->class == ATA_DEV_ATAPI) &&
2759         (atapi_command_packet_set(id) == TYPE_TAPE)) {
2760         dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2761         dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2762     }
2763 
2764     if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2765         dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2766                      dev->max_sectors);
2767 
2768     if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2769         dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2770                      dev->max_sectors);
2771 
2772     if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2773         dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2774 
2775     if (ap->ops->dev_config)
2776         ap->ops->dev_config(dev);
2777 
2778     if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2779         /* Let the user know. We don't want to disallow opens for
2780            rescue purposes, or in case the vendor is just a blithering
2781            idiot. Do this after the dev_config call as some controllers
2782            with buggy firmware may want to avoid reporting false device
2783            bugs */
2784 
2785         if (print_info) {
2786             ata_dev_warn(dev,
2787 "Drive reports diagnostics failure. This may indicate a drive\n");
2788             ata_dev_warn(dev,
2789 "fault or invalid emulation. Contact drive vendor for information.\n");
2790         }
2791     }
2792 
2793     if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2794         ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2795         ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2796     }
2797 
2798     return 0;
2799 
2800 err_out_nosup:
2801     return rc;
2802 }
2803 
2804 /**
2805  *  ata_cable_40wire    -   return 40 wire cable type
2806  *  @ap: port
2807  *
2808  *  Helper method for drivers which want to hardwire 40 wire cable
2809  *  detection.
2810  */
2811 
2812 int ata_cable_40wire(struct ata_port *ap)
2813 {
2814     return ATA_CBL_PATA40;
2815 }
2816 EXPORT_SYMBOL_GPL(ata_cable_40wire);
2817 
2818 /**
2819  *  ata_cable_80wire    -   return 80 wire cable type
2820  *  @ap: port
2821  *
2822  *  Helper method for drivers which want to hardwire 80 wire cable
2823  *  detection.
2824  */
2825 
2826 int ata_cable_80wire(struct ata_port *ap)
2827 {
2828     return ATA_CBL_PATA80;
2829 }
2830 EXPORT_SYMBOL_GPL(ata_cable_80wire);
2831 
2832 /**
2833  *  ata_cable_unknown   -   return unknown PATA cable.
2834  *  @ap: port
2835  *
2836  *  Helper method for drivers which have no PATA cable detection.
2837  */
2838 
2839 int ata_cable_unknown(struct ata_port *ap)
2840 {
2841     return ATA_CBL_PATA_UNK;
2842 }
2843 EXPORT_SYMBOL_GPL(ata_cable_unknown);
2844 
2845 /**
2846  *  ata_cable_ignore    -   return ignored PATA cable.
2847  *  @ap: port
2848  *
2849  *  Helper method for drivers which don't use cable type to limit
2850  *  transfer mode.
2851  */
2852 int ata_cable_ignore(struct ata_port *ap)
2853 {
2854     return ATA_CBL_PATA_IGN;
2855 }
2856 EXPORT_SYMBOL_GPL(ata_cable_ignore);
2857 
2858 /**
2859  *  ata_cable_sata  -   return SATA cable type
2860  *  @ap: port
2861  *
2862  *  Helper method for drivers which have SATA cables
2863  */
2864 
2865 int ata_cable_sata(struct ata_port *ap)
2866 {
2867     return ATA_CBL_SATA;
2868 }
2869 EXPORT_SYMBOL_GPL(ata_cable_sata);
2870 
2871 /**
2872  *  ata_bus_probe - Reset and probe ATA bus
2873  *  @ap: Bus to probe
2874  *
2875  *  Master ATA bus probing function.  Initiates a hardware-dependent
2876  *  bus reset, then attempts to identify any devices found on
2877  *  the bus.
2878  *
2879  *  LOCKING:
2880  *  PCI/etc. bus probe sem.
2881  *
2882  *  RETURNS:
2883  *  Zero on success, negative errno otherwise.
2884  */
2885 
2886 int ata_bus_probe(struct ata_port *ap)
2887 {
2888     unsigned int classes[ATA_MAX_DEVICES];
2889     int tries[ATA_MAX_DEVICES];
2890     int rc;
2891     struct ata_device *dev;
2892 
2893     ata_for_each_dev(dev, &ap->link, ALL)
2894         tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2895 
2896  retry:
2897     ata_for_each_dev(dev, &ap->link, ALL) {
2898         /* If we issue an SRST then an ATA drive (not ATAPI)
2899          * may change configuration and be in PIO0 timing. If
2900          * we do a hard reset (or are coming from power on)
2901          * this is true for ATA or ATAPI. Until we've set a
2902          * suitable controller mode we should not touch the
2903          * bus as we may be talking too fast.
2904          */
2905         dev->pio_mode = XFER_PIO_0;
2906         dev->dma_mode = 0xff;
2907 
2908         /* If the controller has a pio mode setup function
2909          * then use it to set the chipset to rights. Don't
2910          * touch the DMA setup as that will be dealt with when
2911          * configuring devices.
2912          */
2913         if (ap->ops->set_piomode)
2914             ap->ops->set_piomode(ap, dev);
2915     }
2916 
2917     /* reset and determine device classes */
2918     ap->ops->phy_reset(ap);
2919 
2920     ata_for_each_dev(dev, &ap->link, ALL) {
2921         if (dev->class != ATA_DEV_UNKNOWN)
2922             classes[dev->devno] = dev->class;
2923         else
2924             classes[dev->devno] = ATA_DEV_NONE;
2925 
2926         dev->class = ATA_DEV_UNKNOWN;
2927     }
2928 
2929     /* read IDENTIFY page and configure devices. We have to do the identify
2930        specific sequence bass-ackwards so that PDIAG- is released by
2931        the slave device */
2932 
2933     ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2934         if (tries[dev->devno])
2935             dev->class = classes[dev->devno];
2936 
2937         if (!ata_dev_enabled(dev))
2938             continue;
2939 
2940         rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2941                      dev->id);
2942         if (rc)
2943             goto fail;
2944     }
2945 
2946     /* Now ask for the cable type as PDIAG- should have been released */
2947     if (ap->ops->cable_detect)
2948         ap->cbl = ap->ops->cable_detect(ap);
2949 
2950     /* We may have SATA bridge glue hiding here irrespective of
2951      * the reported cable types and sensed types.  When SATA
2952      * drives indicate we have a bridge, we don't know which end
2953      * of the link the bridge is which is a problem.
2954      */
2955     ata_for_each_dev(dev, &ap->link, ENABLED)
2956         if (ata_id_is_sata(dev->id))
2957             ap->cbl = ATA_CBL_SATA;
2958 
2959     /* After the identify sequence we can now set up the devices. We do
2960        this in the normal order so that the user doesn't get confused */
2961 
2962     ata_for_each_dev(dev, &ap->link, ENABLED) {
2963         ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2964         rc = ata_dev_configure(dev);
2965         ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2966         if (rc)
2967             goto fail;
2968     }
2969 
2970     /* configure transfer mode */
2971     rc = ata_set_mode(&ap->link, &dev);
2972     if (rc)
2973         goto fail;
2974 
2975     ata_for_each_dev(dev, &ap->link, ENABLED)
2976         return 0;
2977 
2978     return -ENODEV;
2979 
2980  fail:
2981     tries[dev->devno]--;
2982 
2983     switch (rc) {
2984     case -EINVAL:
2985         /* eeek, something went very wrong, give up */
2986         tries[dev->devno] = 0;
2987         break;
2988 
2989     case -ENODEV:
2990         /* give it just one more chance */
2991         tries[dev->devno] = min(tries[dev->devno], 1);
2992         fallthrough;
2993     case -EIO:
2994         if (tries[dev->devno] == 1) {
2995             /* This is the last chance, better to slow
2996              * down than lose it.
2997              */
2998             sata_down_spd_limit(&ap->link, 0);
2999             ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3000         }
3001     }
3002 
3003     if (!tries[dev->devno])
3004         ata_dev_disable(dev);
3005 
3006     goto retry;
3007 }
3008 
3009 /**
3010  *  sata_print_link_status - Print SATA link status
3011  *  @link: SATA link to printk link status about
3012  *
3013  *  This function prints link speed and status of a SATA link.
3014  *
3015  *  LOCKING:
3016  *  None.
3017  */
3018 static void sata_print_link_status(struct ata_link *link)
3019 {
3020     u32 sstatus, scontrol, tmp;
3021 
3022     if (sata_scr_read(link, SCR_STATUS, &sstatus))
3023         return;
3024     sata_scr_read(link, SCR_CONTROL, &scontrol);
3025 
3026     if (ata_phys_link_online(link)) {
3027         tmp = (sstatus >> 4) & 0xf;
3028         ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3029                   sata_spd_string(tmp), sstatus, scontrol);
3030     } else {
3031         ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3032                   sstatus, scontrol);
3033     }
3034 }
3035 
3036 /**
3037  *  ata_dev_pair        -   return other device on cable
3038  *  @adev: device
3039  *
3040  *  Obtain the other device on the same cable, or if none is
3041  *  present NULL is returned
3042  */
3043 
3044 struct ata_device *ata_dev_pair(struct ata_device *adev)
3045 {
3046     struct ata_link *link = adev->link;
3047     struct ata_device *pair = &link->device[1 - adev->devno];
3048     if (!ata_dev_enabled(pair))
3049         return NULL;
3050     return pair;
3051 }
3052 EXPORT_SYMBOL_GPL(ata_dev_pair);
3053 
3054 /**
3055  *  sata_down_spd_limit - adjust SATA spd limit downward
3056  *  @link: Link to adjust SATA spd limit for
3057  *  @spd_limit: Additional limit
3058  *
3059  *  Adjust SATA spd limit of @link downward.  Note that this
3060  *  function only adjusts the limit.  The change must be applied
3061  *  using sata_set_spd().
3062  *
3063  *  If @spd_limit is non-zero, the speed is limited to equal to or
3064  *  lower than @spd_limit if such speed is supported.  If
3065  *  @spd_limit is slower than any supported speed, only the lowest
3066  *  supported speed is allowed.
3067  *
3068  *  LOCKING:
3069  *  Inherited from caller.
3070  *
3071  *  RETURNS:
3072  *  0 on success, negative errno on failure
3073  */
3074 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3075 {
3076     u32 sstatus, spd, mask;
3077     int rc, bit;
3078 
3079     if (!sata_scr_valid(link))
3080         return -EOPNOTSUPP;
3081 
3082     /* If SCR can be read, use it to determine the current SPD.
3083      * If not, use cached value in link->sata_spd.
3084      */
3085     rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3086     if (rc == 0 && ata_sstatus_online(sstatus))
3087         spd = (sstatus >> 4) & 0xf;
3088     else
3089         spd = link->sata_spd;
3090 
3091     mask = link->sata_spd_limit;
3092     if (mask <= 1)
3093         return -EINVAL;
3094 
3095     /* unconditionally mask off the highest bit */
3096     bit = fls(mask) - 1;
3097     mask &= ~(1 << bit);
3098 
3099     /*
3100      * Mask off all speeds higher than or equal to the current one.  At
3101      * this point, if current SPD is not available and we previously
3102      * recorded the link speed from SStatus, the driver has already
3103      * masked off the highest bit so mask should already be 1 or 0.
3104      * Otherwise, we should not force 1.5Gbps on a link where we have
3105      * not previously recorded speed from SStatus.  Just return in this
3106      * case.
3107      */
3108     if (spd > 1)
3109         mask &= (1 << (spd - 1)) - 1;
3110     else
3111         return -EINVAL;
3112 
3113     /* were we already at the bottom? */
3114     if (!mask)
3115         return -EINVAL;
3116 
3117     if (spd_limit) {
3118         if (mask & ((1 << spd_limit) - 1))
3119             mask &= (1 << spd_limit) - 1;
3120         else {
3121             bit = ffs(mask) - 1;
3122             mask = 1 << bit;
3123         }
3124     }
3125 
3126     link->sata_spd_limit = mask;
3127 
3128     ata_link_warn(link, "limiting SATA link speed to %s\n",
3129               sata_spd_string(fls(mask)));
3130 
3131     return 0;
3132 }
3133 
3134 #ifdef CONFIG_ATA_ACPI
3135 /**
3136  *  ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3137  *  @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3138  *  @cycle: cycle duration in ns
3139  *
3140  *  Return matching xfer mode for @cycle.  The returned mode is of
3141  *  the transfer type specified by @xfer_shift.  If @cycle is too
3142  *  slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3143  *  than the fastest known mode, the fasted mode is returned.
3144  *
3145  *  LOCKING:
3146  *  None.
3147  *
3148  *  RETURNS:
3149  *  Matching xfer_mode, 0xff if no match found.
3150  */
3151 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3152 {
3153     u8 base_mode = 0xff, last_mode = 0xff;
3154     const struct ata_xfer_ent *ent;
3155     const struct ata_timing *t;
3156 
3157     for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3158         if (ent->shift == xfer_shift)
3159             base_mode = ent->base;
3160 
3161     for (t = ata_timing_find_mode(base_mode);
3162          t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3163         unsigned short this_cycle;
3164 
3165         switch (xfer_shift) {
3166         case ATA_SHIFT_PIO:
3167         case ATA_SHIFT_MWDMA:
3168             this_cycle = t->cycle;
3169             break;
3170         case ATA_SHIFT_UDMA:
3171             this_cycle = t->udma;
3172             break;
3173         default:
3174             return 0xff;
3175         }
3176 
3177         if (cycle > this_cycle)
3178             break;
3179 
3180         last_mode = t->mode;
3181     }
3182 
3183     return last_mode;
3184 }
3185 #endif
3186 
3187 /**
3188  *  ata_down_xfermask_limit - adjust dev xfer masks downward
3189  *  @dev: Device to adjust xfer masks
3190  *  @sel: ATA_DNXFER_* selector
3191  *
3192  *  Adjust xfer masks of @dev downward.  Note that this function
3193  *  does not apply the change.  Invoking ata_set_mode() afterwards
3194  *  will apply the limit.
3195  *
3196  *  LOCKING:
3197  *  Inherited from caller.
3198  *
3199  *  RETURNS:
3200  *  0 on success, negative errno on failure
3201  */
3202 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3203 {
3204     char buf[32];
3205     unsigned int orig_mask, xfer_mask;
3206     unsigned int pio_mask, mwdma_mask, udma_mask;
3207     int quiet, highbit;
3208 
3209     quiet = !!(sel & ATA_DNXFER_QUIET);
3210     sel &= ~ATA_DNXFER_QUIET;
3211 
3212     xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3213                           dev->mwdma_mask,
3214                           dev->udma_mask);
3215     ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3216 
3217     switch (sel) {
3218     case ATA_DNXFER_PIO:
3219         highbit = fls(pio_mask) - 1;
3220         pio_mask &= ~(1 << highbit);
3221         break;
3222 
3223     case ATA_DNXFER_DMA:
3224         if (udma_mask) {
3225             highbit = fls(udma_mask) - 1;
3226             udma_mask &= ~(1 << highbit);
3227             if (!udma_mask)
3228                 return -ENOENT;
3229         } else if (mwdma_mask) {
3230             highbit = fls(mwdma_mask) - 1;
3231             mwdma_mask &= ~(1 << highbit);
3232             if (!mwdma_mask)
3233                 return -ENOENT;
3234         }
3235         break;
3236 
3237     case ATA_DNXFER_40C:
3238         udma_mask &= ATA_UDMA_MASK_40C;
3239         break;
3240 
3241     case ATA_DNXFER_FORCE_PIO0:
3242         pio_mask &= 1;
3243         fallthrough;
3244     case ATA_DNXFER_FORCE_PIO:
3245         mwdma_mask = 0;
3246         udma_mask = 0;
3247         break;
3248 
3249     default:
3250         BUG();
3251     }
3252 
3253     xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3254 
3255     if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3256         return -ENOENT;
3257 
3258     if (!quiet) {
3259         if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3260             snprintf(buf, sizeof(buf), "%s:%s",
3261                  ata_mode_string(xfer_mask),
3262                  ata_mode_string(xfer_mask & ATA_MASK_PIO));
3263         else
3264             snprintf(buf, sizeof(buf), "%s",
3265                  ata_mode_string(xfer_mask));
3266 
3267         ata_dev_warn(dev, "limiting speed to %s\n", buf);
3268     }
3269 
3270     ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3271                 &dev->udma_mask);
3272 
3273     return 0;
3274 }
3275 
3276 static int ata_dev_set_mode(struct ata_device *dev)
3277 {
3278     struct ata_port *ap = dev->link->ap;
3279     struct ata_eh_context *ehc = &dev->link->eh_context;
3280     const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3281     const char *dev_err_whine = "";
3282     int ign_dev_err = 0;
3283     unsigned int err_mask = 0;
3284     int rc;
3285 
3286     dev->flags &= ~ATA_DFLAG_PIO;
3287     if (dev->xfer_shift == ATA_SHIFT_PIO)
3288         dev->flags |= ATA_DFLAG_PIO;
3289 
3290     if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3291         dev_err_whine = " (SET_XFERMODE skipped)";
3292     else {
3293         if (nosetxfer)
3294             ata_dev_warn(dev,
3295                      "NOSETXFER but PATA detected - can't "
3296                      "skip SETXFER, might malfunction\n");
3297         err_mask = ata_dev_set_xfermode(dev);
3298     }
3299 
3300     if (err_mask & ~AC_ERR_DEV)
3301         goto fail;
3302 
3303     /* revalidate */
3304     ehc->i.flags |= ATA_EHI_POST_SETMODE;
3305     rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3306     ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3307     if (rc)
3308         return rc;
3309 
3310     if (dev->xfer_shift == ATA_SHIFT_PIO) {
3311         /* Old CFA may refuse this command, which is just fine */
3312         if (ata_id_is_cfa(dev->id))
3313             ign_dev_err = 1;
3314         /* Catch several broken garbage emulations plus some pre
3315            ATA devices */
3316         if (ata_id_major_version(dev->id) == 0 &&
3317                     dev->pio_mode <= XFER_PIO_2)
3318             ign_dev_err = 1;
3319         /* Some very old devices and some bad newer ones fail
3320            any kind of SET_XFERMODE request but support PIO0-2
3321            timings and no IORDY */
3322         if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3323             ign_dev_err = 1;
3324     }
3325     /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3326        Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3327     if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3328         dev->dma_mode == XFER_MW_DMA_0 &&
3329         (dev->id[63] >> 8) & 1)
3330         ign_dev_err = 1;
3331 
3332     /* if the device is actually configured correctly, ignore dev err */
3333     if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3334         ign_dev_err = 1;
3335 
3336     if (err_mask & AC_ERR_DEV) {
3337         if (!ign_dev_err)
3338             goto fail;
3339         else
3340             dev_err_whine = " (device error ignored)";
3341     }
3342 
3343     ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3344             dev->xfer_shift, (int)dev->xfer_mode);
3345 
3346     if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3347         ehc->i.flags & ATA_EHI_DID_HARDRESET)
3348         ata_dev_info(dev, "configured for %s%s\n",
3349                  ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3350                  dev_err_whine);
3351 
3352     return 0;
3353 
3354  fail:
3355     ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3356     return -EIO;
3357 }
3358 
3359 /**
3360  *  ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3361  *  @link: link on which timings will be programmed
3362  *  @r_failed_dev: out parameter for failed device
3363  *
3364  *  Standard implementation of the function used to tune and set
3365  *  ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3366  *  ata_dev_set_mode() fails, pointer to the failing device is
3367  *  returned in @r_failed_dev.
3368  *
3369  *  LOCKING:
3370  *  PCI/etc. bus probe sem.
3371  *
3372  *  RETURNS:
3373  *  0 on success, negative errno otherwise
3374  */
3375 
3376 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3377 {
3378     struct ata_port *ap = link->ap;
3379     struct ata_device *dev;
3380     int rc = 0, used_dma = 0, found = 0;
3381 
3382     /* step 1: calculate xfer_mask */
3383     ata_for_each_dev(dev, link, ENABLED) {
3384         unsigned int pio_mask, dma_mask;
3385         unsigned int mode_mask;
3386 
3387         mode_mask = ATA_DMA_MASK_ATA;
3388         if (dev->class == ATA_DEV_ATAPI)
3389             mode_mask = ATA_DMA_MASK_ATAPI;
3390         else if (ata_id_is_cfa(dev->id))
3391             mode_mask = ATA_DMA_MASK_CFA;
3392 
3393         ata_dev_xfermask(dev);
3394         ata_force_xfermask(dev);
3395 
3396         pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3397 
3398         if (libata_dma_mask & mode_mask)
3399             dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3400                              dev->udma_mask);
3401         else
3402             dma_mask = 0;
3403 
3404         dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3405         dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3406 
3407         found = 1;
3408         if (ata_dma_enabled(dev))
3409             used_dma = 1;
3410     }
3411     if (!found)
3412         goto out;
3413 
3414     /* step 2: always set host PIO timings */
3415     ata_for_each_dev(dev, link, ENABLED) {
3416         if (dev->pio_mode == 0xff) {
3417             ata_dev_warn(dev, "no PIO support\n");
3418             rc = -EINVAL;
3419             goto out;
3420         }
3421 
3422         dev->xfer_mode = dev->pio_mode;
3423         dev->xfer_shift = ATA_SHIFT_PIO;
3424         if (ap->ops->set_piomode)
3425             ap->ops->set_piomode(ap, dev);
3426     }
3427 
3428     /* step 3: set host DMA timings */
3429     ata_for_each_dev(dev, link, ENABLED) {
3430         if (!ata_dma_enabled(dev))
3431             continue;
3432 
3433         dev->xfer_mode = dev->dma_mode;
3434         dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3435         if (ap->ops->set_dmamode)
3436             ap->ops->set_dmamode(ap, dev);
3437     }
3438 
3439     /* step 4: update devices' xfer mode */
3440     ata_for_each_dev(dev, link, ENABLED) {
3441         rc = ata_dev_set_mode(dev);
3442         if (rc)
3443             goto out;
3444     }
3445 
3446     /* Record simplex status. If we selected DMA then the other
3447      * host channels are not permitted to do so.
3448      */
3449     if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3450         ap->host->simplex_claimed = ap;
3451 
3452  out:
3453     if (rc)
3454         *r_failed_dev = dev;
3455     return rc;
3456 }
3457 EXPORT_SYMBOL_GPL(ata_do_set_mode);
3458 
3459 /**
3460  *  ata_wait_ready - wait for link to become ready
3461  *  @link: link to be waited on
3462  *  @deadline: deadline jiffies for the operation
3463  *  @check_ready: callback to check link readiness
3464  *
3465  *  Wait for @link to become ready.  @check_ready should return
3466  *  positive number if @link is ready, 0 if it isn't, -ENODEV if
3467  *  link doesn't seem to be occupied, other errno for other error
3468  *  conditions.
3469  *
3470  *  Transient -ENODEV conditions are allowed for
3471  *  ATA_TMOUT_FF_WAIT.
3472  *
3473  *  LOCKING:
3474  *  EH context.
3475  *
3476  *  RETURNS:
3477  *  0 if @link is ready before @deadline; otherwise, -errno.
3478  */
3479 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3480            int (*check_ready)(struct ata_link *link))
3481 {
3482     unsigned long start = jiffies;
3483     unsigned long nodev_deadline;
3484     int warned = 0;
3485 
3486     /* choose which 0xff timeout to use, read comment in libata.h */
3487     if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3488         nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3489     else
3490         nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3491 
3492     /* Slave readiness can't be tested separately from master.  On
3493      * M/S emulation configuration, this function should be called
3494      * only on the master and it will handle both master and slave.
3495      */
3496     WARN_ON(link == link->ap->slave_link);
3497 
3498     if (time_after(nodev_deadline, deadline))
3499         nodev_deadline = deadline;
3500 
3501     while (1) {
3502         unsigned long now = jiffies;
3503         int ready, tmp;
3504 
3505         ready = tmp = check_ready(link);
3506         if (ready > 0)
3507             return 0;
3508 
3509         /*
3510          * -ENODEV could be transient.  Ignore -ENODEV if link
3511          * is online.  Also, some SATA devices take a long
3512          * time to clear 0xff after reset.  Wait for
3513          * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3514          * offline.
3515          *
3516          * Note that some PATA controllers (pata_ali) explode
3517          * if status register is read more than once when
3518          * there's no device attached.
3519          */
3520         if (ready == -ENODEV) {
3521             if (ata_link_online(link))
3522                 ready = 0;
3523             else if ((link->ap->flags & ATA_FLAG_SATA) &&
3524                  !ata_link_offline(link) &&
3525                  time_before(now, nodev_deadline))
3526                 ready = 0;
3527         }
3528 
3529         if (ready)
3530             return ready;
3531         if (time_after(now, deadline))
3532             return -EBUSY;
3533 
3534         if (!warned && time_after(now, start + 5 * HZ) &&
3535             (deadline - now > 3 * HZ)) {
3536             ata_link_warn(link,
3537                 "link is slow to respond, please be patient "
3538                 "(ready=%d)\n", tmp);
3539             warned = 1;
3540         }
3541 
3542         ata_msleep(link->ap, 50);
3543     }
3544 }
3545 
3546 /**
3547  *  ata_wait_after_reset - wait for link to become ready after reset
3548  *  @link: link to be waited on
3549  *  @deadline: deadline jiffies for the operation
3550  *  @check_ready: callback to check link readiness
3551  *
3552  *  Wait for @link to become ready after reset.
3553  *
3554  *  LOCKING:
3555  *  EH context.
3556  *
3557  *  RETURNS:
3558  *  0 if @link is ready before @deadline; otherwise, -errno.
3559  */
3560 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3561                 int (*check_ready)(struct ata_link *link))
3562 {
3563     ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3564 
3565     return ata_wait_ready(link, deadline, check_ready);
3566 }
3567 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3568 
3569 /**
3570  *  ata_std_prereset - prepare for reset
3571  *  @link: ATA link to be reset
3572  *  @deadline: deadline jiffies for the operation
3573  *
3574  *  @link is about to be reset.  Initialize it.  Failure from
3575  *  prereset makes libata abort whole reset sequence and give up
3576  *  that port, so prereset should be best-effort.  It does its
3577  *  best to prepare for reset sequence but if things go wrong, it
3578  *  should just whine, not fail.
3579  *
3580  *  LOCKING:
3581  *  Kernel thread context (may sleep)
3582  *
3583  *  RETURNS:
3584  *  Always 0.
3585  */
3586 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3587 {
3588     struct ata_port *ap = link->ap;
3589     struct ata_eh_context *ehc = &link->eh_context;
3590     const unsigned long *timing = sata_ehc_deb_timing(ehc);
3591     int rc;
3592 
3593     /* if we're about to do hardreset, nothing more to do */
3594     if (ehc->i.action & ATA_EH_HARDRESET)
3595         return 0;
3596 
3597     /* if SATA, resume link */
3598     if (ap->flags & ATA_FLAG_SATA) {
3599         rc = sata_link_resume(link, timing, deadline);
3600         /* whine about phy resume failure but proceed */
3601         if (rc && rc != -EOPNOTSUPP)
3602             ata_link_warn(link,
3603                       "failed to resume link for reset (errno=%d)\n",
3604                       rc);
3605     }
3606 
3607     /* no point in trying softreset on offline link */
3608     if (ata_phys_link_offline(link))
3609         ehc->i.action &= ~ATA_EH_SOFTRESET;
3610 
3611     return 0;
3612 }
3613 EXPORT_SYMBOL_GPL(ata_std_prereset);
3614 
3615 /**
3616  *  sata_std_hardreset - COMRESET w/o waiting or classification
3617  *  @link: link to reset
3618  *  @class: resulting class of attached device
3619  *  @deadline: deadline jiffies for the operation
3620  *
3621  *  Standard SATA COMRESET w/o waiting or classification.
3622  *
3623  *  LOCKING:
3624  *  Kernel thread context (may sleep)
3625  *
3626  *  RETURNS:
3627  *  0 if link offline, -EAGAIN if link online, -errno on errors.
3628  */
3629 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3630                unsigned long deadline)
3631 {
3632     const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3633     bool online;
3634     int rc;
3635 
3636     /* do hardreset */
3637     rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3638     return online ? -EAGAIN : rc;
3639 }
3640 EXPORT_SYMBOL_GPL(sata_std_hardreset);
3641 
3642 /**
3643  *  ata_std_postreset - standard postreset callback
3644  *  @link: the target ata_link
3645  *  @classes: classes of attached devices
3646  *
3647  *  This function is invoked after a successful reset.  Note that
3648  *  the device might have been reset more than once using
3649  *  different reset methods before postreset is invoked.
3650  *
3651  *  LOCKING:
3652  *  Kernel thread context (may sleep)
3653  */
3654 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3655 {
3656     u32 serror;
3657 
3658     /* reset complete, clear SError */
3659     if (!sata_scr_read(link, SCR_ERROR, &serror))
3660         sata_scr_write(link, SCR_ERROR, serror);
3661 
3662     /* print link status */
3663     sata_print_link_status(link);
3664 }
3665 EXPORT_SYMBOL_GPL(ata_std_postreset);
3666 
3667 /**
3668  *  ata_dev_same_device - Determine whether new ID matches configured device
3669  *  @dev: device to compare against
3670  *  @new_class: class of the new device
3671  *  @new_id: IDENTIFY page of the new device
3672  *
3673  *  Compare @new_class and @new_id against @dev and determine
3674  *  whether @dev is the device indicated by @new_class and
3675  *  @new_id.
3676  *
3677  *  LOCKING:
3678  *  None.
3679  *
3680  *  RETURNS:
3681  *  1 if @dev matches @new_class and @new_id, 0 otherwise.
3682  */
3683 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3684                    const u16 *new_id)
3685 {
3686     const u16 *old_id = dev->id;
3687     unsigned char model[2][ATA_ID_PROD_LEN + 1];
3688     unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3689 
3690     if (dev->class != new_class) {
3691         ata_dev_info(dev, "class mismatch %d != %d\n",
3692                  dev->class, new_class);
3693         return 0;
3694     }
3695 
3696     ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3697     ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3698     ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3699     ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3700 
3701     if (strcmp(model[0], model[1])) {
3702         ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3703                  model[0], model[1]);
3704         return 0;
3705     }
3706 
3707     if (strcmp(serial[0], serial[1])) {
3708         ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3709                  serial[0], serial[1]);
3710         return 0;
3711     }
3712 
3713     return 1;
3714 }
3715 
3716 /**
3717  *  ata_dev_reread_id - Re-read IDENTIFY data
3718  *  @dev: target ATA device
3719  *  @readid_flags: read ID flags
3720  *
3721  *  Re-read IDENTIFY page and make sure @dev is still attached to
3722  *  the port.
3723  *
3724  *  LOCKING:
3725  *  Kernel thread context (may sleep)
3726  *
3727  *  RETURNS:
3728  *  0 on success, negative errno otherwise
3729  */
3730 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3731 {
3732     unsigned int class = dev->class;
3733     u16 *id = (void *)dev->link->ap->sector_buf;
3734     int rc;
3735 
3736     /* read ID data */
3737     rc = ata_dev_read_id(dev, &class, readid_flags, id);
3738     if (rc)
3739         return rc;
3740 
3741     /* is the device still there? */
3742     if (!ata_dev_same_device(dev, class, id))
3743         return -ENODEV;
3744 
3745     memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3746     return 0;
3747 }
3748 
3749 /**
3750  *  ata_dev_revalidate - Revalidate ATA device
3751  *  @dev: device to revalidate
3752  *  @new_class: new class code
3753  *  @readid_flags: read ID flags
3754  *
3755  *  Re-read IDENTIFY page, make sure @dev is still attached to the
3756  *  port and reconfigure it according to the new IDENTIFY page.
3757  *
3758  *  LOCKING:
3759  *  Kernel thread context (may sleep)
3760  *
3761  *  RETURNS:
3762  *  0 on success, negative errno otherwise
3763  */
3764 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3765                unsigned int readid_flags)
3766 {
3767     u64 n_sectors = dev->n_sectors;
3768     u64 n_native_sectors = dev->n_native_sectors;
3769     int rc;
3770 
3771     if (!ata_dev_enabled(dev))
3772         return -ENODEV;
3773 
3774     /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3775     if (ata_class_enabled(new_class) &&
3776         new_class != ATA_DEV_ATA &&
3777         new_class != ATA_DEV_ATAPI &&
3778         new_class != ATA_DEV_ZAC &&
3779         new_class != ATA_DEV_SEMB) {
3780         ata_dev_info(dev, "class mismatch %u != %u\n",
3781                  dev->class, new_class);
3782         rc = -ENODEV;
3783         goto fail;
3784     }
3785 
3786     /* re-read ID */
3787     rc = ata_dev_reread_id(dev, readid_flags);
3788     if (rc)
3789         goto fail;
3790 
3791     /* configure device according to the new ID */
3792     rc = ata_dev_configure(dev);
3793     if (rc)
3794         goto fail;
3795 
3796     /* verify n_sectors hasn't changed */
3797     if (dev->class != ATA_DEV_ATA || !n_sectors ||
3798         dev->n_sectors == n_sectors)
3799         return 0;
3800 
3801     /* n_sectors has changed */
3802     ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3803              (unsigned long long)n_sectors,
3804              (unsigned long long)dev->n_sectors);
3805 
3806     /*
3807      * Something could have caused HPA to be unlocked
3808      * involuntarily.  If n_native_sectors hasn't changed and the
3809      * new size matches it, keep the device.
3810      */
3811     if (dev->n_native_sectors == n_native_sectors &&
3812         dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3813         ata_dev_warn(dev,
3814                  "new n_sectors matches native, probably "
3815                  "late HPA unlock, n_sectors updated\n");
3816         /* use the larger n_sectors */
3817         return 0;
3818     }
3819 
3820     /*
3821      * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
3822      * unlocking HPA in those cases.
3823      *
3824      * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3825      */
3826     if (dev->n_native_sectors == n_native_sectors &&
3827         dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3828         !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
3829         ata_dev_warn(dev,
3830                  "old n_sectors matches native, probably "
3831                  "late HPA lock, will try to unlock HPA\n");
3832         /* try unlocking HPA */
3833         dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3834         rc = -EIO;
3835     } else
3836         rc = -ENODEV;
3837 
3838     /* restore original n_[native_]sectors and fail */
3839     dev->n_native_sectors = n_native_sectors;
3840     dev->n_sectors = n_sectors;
3841  fail:
3842     ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
3843     return rc;
3844 }
3845 
3846 struct ata_blacklist_entry {
3847     const char *model_num;
3848     const char *model_rev;
3849     unsigned long horkage;
3850 };
3851 
3852 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3853     /* Devices with DMA related problems under Linux */
3854     { "WDC AC11000H",   NULL,       ATA_HORKAGE_NODMA },
3855     { "WDC AC22100H",   NULL,       ATA_HORKAGE_NODMA },
3856     { "WDC AC32500H",   NULL,       ATA_HORKAGE_NODMA },
3857     { "WDC AC33100H",   NULL,       ATA_HORKAGE_NODMA },
3858     { "WDC AC31600H",   NULL,       ATA_HORKAGE_NODMA },
3859     { "WDC AC32100H",   "24.09P07", ATA_HORKAGE_NODMA },
3860     { "WDC AC23200L",   "21.10N21", ATA_HORKAGE_NODMA },
3861     { "Compaq CRD-8241B",   NULL,       ATA_HORKAGE_NODMA },
3862     { "CRD-8400B",      NULL,       ATA_HORKAGE_NODMA },
3863     { "CRD-848[02]B",   NULL,       ATA_HORKAGE_NODMA },
3864     { "CRD-84",     NULL,       ATA_HORKAGE_NODMA },
3865     { "SanDisk SDP3B",  NULL,       ATA_HORKAGE_NODMA },
3866     { "SanDisk SDP3B-64",   NULL,       ATA_HORKAGE_NODMA },
3867     { "SANYO CD-ROM CRD",   NULL,       ATA_HORKAGE_NODMA },
3868     { "HITACHI CDR-8",  NULL,       ATA_HORKAGE_NODMA },
3869     { "HITACHI CDR-8[34]35",NULL,       ATA_HORKAGE_NODMA },
3870     { "Toshiba CD-ROM XM-6202B", NULL,  ATA_HORKAGE_NODMA },
3871     { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3872     { "CD-532E-A",      NULL,       ATA_HORKAGE_NODMA },
3873     { "E-IDE CD-ROM CR-840",NULL,       ATA_HORKAGE_NODMA },
3874     { "CD-ROM Drive/F5A",   NULL,       ATA_HORKAGE_NODMA },
3875     { "WPI CDD-820",    NULL,       ATA_HORKAGE_NODMA },
3876     { "SAMSUNG CD-ROM SC-148C", NULL,   ATA_HORKAGE_NODMA },
3877     { "SAMSUNG CD-ROM SC",  NULL,       ATA_HORKAGE_NODMA },
3878     { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3879     { "_NEC DV5800A",   NULL,       ATA_HORKAGE_NODMA },
3880     { "SAMSUNG CD-ROM SN-124", "N001",  ATA_HORKAGE_NODMA },
3881     { "Seagate STT20000A", NULL,        ATA_HORKAGE_NODMA },
3882     { " 2GB ATA Flash Disk", "ADMA428M",    ATA_HORKAGE_NODMA },
3883     { "VRFDFC22048UCHC-TE*", NULL,      ATA_HORKAGE_NODMA },
3884     /* Odd clown on sil3726/4726 PMPs */
3885     { "Config  Disk",   NULL,       ATA_HORKAGE_DISABLE },
3886     /* Similar story with ASMedia 1092 */
3887     { "ASMT109x- Config",   NULL,       ATA_HORKAGE_DISABLE },
3888 
3889     /* Weird ATAPI devices */
3890     { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3891     { "QUANTUM DAT    DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
3892     { "Slimtype DVD A  DS8A8SH", NULL,  ATA_HORKAGE_MAX_SEC_LBA48 },
3893     { "Slimtype DVD A  DS8A9SH", NULL,  ATA_HORKAGE_MAX_SEC_LBA48 },
3894 
3895     /*
3896      * Causes silent data corruption with higher max sects.
3897      * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3898      */
3899     { "ST380013AS",     "3.20",     ATA_HORKAGE_MAX_SEC_1024 },
3900 
3901     /*
3902      * These devices time out with higher max sects.
3903      * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3904      */
3905     { "LITEON CX1-JB*-HP",  NULL,       ATA_HORKAGE_MAX_SEC_1024 },
3906     { "LITEON EP1-*",   NULL,       ATA_HORKAGE_MAX_SEC_1024 },
3907 
3908     /* Devices we expect to fail diagnostics */
3909 
3910     /* Devices where NCQ should be avoided */
3911     /* NCQ is slow */
3912     { "WDC WD740ADFD-00",   NULL,       ATA_HORKAGE_NONCQ },
3913     { "WDC WD740ADFD-00NLR1", NULL,     ATA_HORKAGE_NONCQ },
3914     /* http://thread.gmane.org/gmane.linux.ide/14907 */
3915     { "FUJITSU MHT2060BH",  NULL,       ATA_HORKAGE_NONCQ },
3916     /* NCQ is broken */
3917     { "Maxtor *",       "BANC*",    ATA_HORKAGE_NONCQ },
3918     { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3919     { "ST380817AS",     "3.42",     ATA_HORKAGE_NONCQ },
3920     { "ST3160023AS",    "3.42",     ATA_HORKAGE_NONCQ },
3921     { "OCZ CORE_SSD",   "02.10104", ATA_HORKAGE_NONCQ },
3922 
3923     /* Seagate NCQ + FLUSH CACHE firmware bug */
3924     { "ST31500341AS",   "SD1[5-9]", ATA_HORKAGE_NONCQ |
3925                         ATA_HORKAGE_FIRMWARE_WARN },
3926 
3927     { "ST31000333AS",   "SD1[5-9]", ATA_HORKAGE_NONCQ |
3928                         ATA_HORKAGE_FIRMWARE_WARN },
3929 
3930     { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3931                         ATA_HORKAGE_FIRMWARE_WARN },
3932 
3933     { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3934                         ATA_HORKAGE_FIRMWARE_WARN },
3935 
3936     /* drives which fail FPDMA_AA activation (some may freeze afterwards)
3937        the ST disks also have LPM issues */
3938     { "ST1000LM024 HN-M101MBB", NULL,   ATA_HORKAGE_BROKEN_FPDMA_AA |
3939                         ATA_HORKAGE_NOLPM },
3940     { "VB0250EAVER",    "HPG7",     ATA_HORKAGE_BROKEN_FPDMA_AA },
3941 
3942     /* Blacklist entries taken from Silicon Image 3124/3132
3943        Windows driver .inf file - also several Linux problem reports */
3944     { "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ },
3945     { "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ },
3946     { "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ },
3947 
3948     /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3949     { "C300-CTFDDAC128MAG", "0001",     ATA_HORKAGE_NONCQ },
3950 
3951     /* Sandisk SD7/8/9s lock up hard on large trims */
3952     { "SanDisk SD[789]*",   NULL,       ATA_HORKAGE_MAX_TRIM_128M },
3953 
3954     /* devices which puke on READ_NATIVE_MAX */
3955     { "HDS724040KLSA80",    "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
3956     { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3957     { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3958     { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3959 
3960     /* this one allows HPA unlocking but fails IOs on the area */
3961     { "OCZ-VERTEX",         "1.30", ATA_HORKAGE_BROKEN_HPA },
3962 
3963     /* Devices which report 1 sector over size HPA */
3964     { "ST340823A",      NULL,       ATA_HORKAGE_HPA_SIZE },
3965     { "ST320413A",      NULL,       ATA_HORKAGE_HPA_SIZE },
3966     { "ST310211A",      NULL,       ATA_HORKAGE_HPA_SIZE },
3967 
3968     /* Devices which get the IVB wrong */
3969     { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
3970     /* Maybe we should just blacklist TSSTcorp... */
3971     { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB },
3972 
3973     /* Devices that do not need bridging limits applied */
3974     { "MTRON MSP-SATA*",        NULL,   ATA_HORKAGE_BRIDGE_OK },
3975     { "BUFFALO HD-QSU2/R5",     NULL,   ATA_HORKAGE_BRIDGE_OK },
3976 
3977     /* Devices which aren't very happy with higher link speeds */
3978     { "WD My Book",         NULL,   ATA_HORKAGE_1_5_GBPS },
3979     { "Seagate FreeAgent GoFlex",   NULL,   ATA_HORKAGE_1_5_GBPS },
3980 
3981     /*
3982      * Devices which choke on SETXFER.  Applies only if both the
3983      * device and controller are SATA.
3984      */
3985     { "PIONEER DVD-RW  DVRTD08",    NULL,   ATA_HORKAGE_NOSETXFER },
3986     { "PIONEER DVD-RW  DVRTD08A",   NULL,   ATA_HORKAGE_NOSETXFER },
3987     { "PIONEER DVD-RW  DVR-215",    NULL,   ATA_HORKAGE_NOSETXFER },
3988     { "PIONEER DVD-RW  DVR-212D",   NULL,   ATA_HORKAGE_NOSETXFER },
3989     { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
3990 
3991     /* These specific Pioneer models have LPM issues */
3992     { "PIONEER BD-RW   BDR-207M",   NULL,   ATA_HORKAGE_NOLPM },
3993     { "PIONEER BD-RW   BDR-205",    NULL,   ATA_HORKAGE_NOLPM },
3994 
3995     /* Crucial BX100 SSD 500GB has broken LPM support */
3996     { "CT500BX100SSD1",     NULL,   ATA_HORKAGE_NOLPM },
3997 
3998     /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
3999     { "Crucial_CT512MX100*",    "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4000                         ATA_HORKAGE_ZERO_AFTER_TRIM |
4001                         ATA_HORKAGE_NOLPM },
4002     /* 512GB MX100 with newer firmware has only LPM issues */
4003     { "Crucial_CT512MX100*",    NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM |
4004                         ATA_HORKAGE_NOLPM },
4005 
4006     /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4007     { "Crucial_CT480M500*",     NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4008                         ATA_HORKAGE_ZERO_AFTER_TRIM |
4009                         ATA_HORKAGE_NOLPM },
4010     { "Crucial_CT960M500*",     NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4011                         ATA_HORKAGE_ZERO_AFTER_TRIM |
4012                         ATA_HORKAGE_NOLPM },
4013 
4014     /* These specific Samsung models/firmware-revs do not handle LPM well */
4015     { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4016     { "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM },
4017     { "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM },
4018     { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
4019 
4020     /* devices that don't properly handle queued TRIM commands */
4021     { "Micron_M500IT_*",        "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4022                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4023     { "Micron_M500_*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4024                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4025     { "Crucial_CT*M500*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4026                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4027     { "Micron_M5[15]0_*",       "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4028                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4029     { "Crucial_CT*M550*",       "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4030                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4031     { "Crucial_CT*MX100*",      "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4032                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4033     { "Samsung SSD 840 EVO*",   NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4034                         ATA_HORKAGE_NO_DMA_LOG |
4035                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4036     { "Samsung SSD 840*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4037                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4038     { "Samsung SSD 850*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4039                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4040     { "Samsung SSD 860*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4041                         ATA_HORKAGE_ZERO_AFTER_TRIM |
4042                         ATA_HORKAGE_NO_NCQ_ON_ATI },
4043     { "Samsung SSD 870*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4044                         ATA_HORKAGE_ZERO_AFTER_TRIM |
4045                         ATA_HORKAGE_NO_NCQ_ON_ATI },
4046     { "FCCT*M500*",         NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
4047                         ATA_HORKAGE_ZERO_AFTER_TRIM },
4048 
4049     /* devices that don't properly handle TRIM commands */
4050     { "SuperSSpeed S238*",      NULL,   ATA_HORKAGE_NOTRIM },
4051     { "M88V29*",            NULL,   ATA_HORKAGE_NOTRIM },
4052 
4053     /*
4054      * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4055      * (Return Zero After Trim) flags in the ATA Command Set are
4056      * unreliable in the sense that they only define what happens if
4057      * the device successfully executed the DSM TRIM command. TRIM
4058      * is only advisory, however, and the device is free to silently
4059      * ignore all or parts of the request.
4060      *
4061      * Whitelist drives that are known to reliably return zeroes
4062      * after TRIM.
4063      */
4064 
4065     /*
4066      * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4067      * that model before whitelisting all other intel SSDs.
4068      */
4069     { "INTEL*SSDSC2MH*",        NULL,   0 },
4070 
4071     { "Micron*",            NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4072     { "Crucial*",           NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4073     { "INTEL*SSD*",         NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4074     { "SSD*INTEL*",         NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4075     { "Samsung*SSD*",       NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4076     { "SAMSUNG*SSD*",       NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4077     { "SAMSUNG*MZ7KM*",     NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4078     { "ST[1248][0248]0[FH]*",   NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM },
4079 
4080     /*
4081      * Some WD SATA-I drives spin up and down erratically when the link
4082      * is put into the slumber mode.  We don't have full list of the
4083      * affected devices.  Disable LPM if the device matches one of the
4084      * known prefixes and is SATA-1.  As a side effect LPM partial is
4085      * lost too.
4086      *
4087      * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4088      */
4089     { "WDC WD800JD-*",      NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
4090     { "WDC WD1200JD-*",     NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
4091     { "WDC WD1600JD-*",     NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
4092     { "WDC WD2000JD-*",     NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
4093     { "WDC WD2500JD-*",     NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
4094     { "WDC WD3000JD-*",     NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
4095     { "WDC WD3200JD-*",     NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
4096 
4097     /*
4098      * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4099      * log page is accessed. Ensure we never ask for this log page with
4100      * these devices.
4101      */
4102     { "SATADOM-ML 3ME",     NULL,   ATA_HORKAGE_NO_LOG_DIR },
4103 
4104     /* End Marker */
4105     { }
4106 };
4107 
4108 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4109 {
4110     unsigned char model_num[ATA_ID_PROD_LEN + 1];
4111     unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4112     const struct ata_blacklist_entry *ad = ata_device_blacklist;
4113 
4114     ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4115     ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4116 
4117     while (ad->model_num) {
4118         if (glob_match(ad->model_num, model_num)) {
4119             if (ad->model_rev == NULL)
4120                 return ad->horkage;
4121             if (glob_match(ad->model_rev, model_rev))
4122                 return ad->horkage;
4123         }
4124         ad++;
4125     }
4126     return 0;
4127 }
4128 
4129 static int ata_dma_blacklisted(const struct ata_device *dev)
4130 {
4131     /* We don't support polling DMA.
4132      * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4133      * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4134      */
4135     if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4136         (dev->flags & ATA_DFLAG_CDB_INTR))
4137         return 1;
4138     return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4139 }
4140 
4141 /**
4142  *  ata_is_40wire       -   check drive side detection
4143  *  @dev: device
4144  *
4145  *  Perform drive side detection decoding, allowing for device vendors
4146  *  who can't follow the documentation.
4147  */
4148 
4149 static int ata_is_40wire(struct ata_device *dev)
4150 {
4151     if (dev->horkage & ATA_HORKAGE_IVB)
4152         return ata_drive_40wire_relaxed(dev->id);
4153     return ata_drive_40wire(dev->id);
4154 }
4155 
4156 /**
4157  *  cable_is_40wire     -   40/80/SATA decider
4158  *  @ap: port to consider
4159  *
4160  *  This function encapsulates the policy for speed management
4161  *  in one place. At the moment we don't cache the result but
4162  *  there is a good case for setting ap->cbl to the result when
4163  *  we are called with unknown cables (and figuring out if it
4164  *  impacts hotplug at all).
4165  *
4166  *  Return 1 if the cable appears to be 40 wire.
4167  */
4168 
4169 static int cable_is_40wire(struct ata_port *ap)
4170 {
4171     struct ata_link *link;
4172     struct ata_device *dev;
4173 
4174     /* If the controller thinks we are 40 wire, we are. */
4175     if (ap->cbl == ATA_CBL_PATA40)
4176         return 1;
4177 
4178     /* If the controller thinks we are 80 wire, we are. */
4179     if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4180         return 0;
4181 
4182     /* If the system is known to be 40 wire short cable (eg
4183      * laptop), then we allow 80 wire modes even if the drive
4184      * isn't sure.
4185      */
4186     if (ap->cbl == ATA_CBL_PATA40_SHORT)
4187         return 0;
4188 
4189     /* If the controller doesn't know, we scan.
4190      *
4191      * Note: We look for all 40 wire detects at this point.  Any
4192      *       80 wire detect is taken to be 80 wire cable because
4193      * - in many setups only the one drive (slave if present) will
4194      *   give a valid detect
4195      * - if you have a non detect capable drive you don't want it
4196      *   to colour the choice
4197      */
4198     ata_for_each_link(link, ap, EDGE) {
4199         ata_for_each_dev(dev, link, ENABLED) {
4200             if (!ata_is_40wire(dev))
4201                 return 0;
4202         }
4203     }
4204     return 1;
4205 }
4206 
4207 /**
4208  *  ata_dev_xfermask - Compute supported xfermask of the given device
4209  *  @dev: Device to compute xfermask for
4210  *
4211  *  Compute supported xfermask of @dev and store it in
4212  *  dev->*_mask.  This function is responsible for applying all
4213  *  known limits including host controller limits, device
4214  *  blacklist, etc...
4215  *
4216  *  LOCKING:
4217  *  None.
4218  */
4219 static void ata_dev_xfermask(struct ata_device *dev)
4220 {
4221     struct ata_link *link = dev->link;
4222     struct ata_port *ap = link->ap;
4223     struct ata_host *host = ap->host;
4224     unsigned int xfer_mask;
4225 
4226     /* controller modes available */
4227     xfer_mask = ata_pack_xfermask(ap->pio_mask,
4228                       ap->mwdma_mask, ap->udma_mask);
4229 
4230     /* drive modes available */
4231     xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4232                        dev->mwdma_mask, dev->udma_mask);
4233     xfer_mask &= ata_id_xfermask(dev->id);
4234 
4235     /*
4236      *  CFA Advanced TrueIDE timings are not allowed on a shared
4237      *  cable
4238      */
4239     if (ata_dev_pair(dev)) {
4240         /* No PIO5 or PIO6 */
4241         xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4242         /* No MWDMA3 or MWDMA 4 */
4243         xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4244     }
4245 
4246     if (ata_dma_blacklisted(dev)) {
4247         xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4248         ata_dev_warn(dev,
4249                  "device is on DMA blacklist, disabling DMA\n");
4250     }
4251 
4252     if ((host->flags & ATA_HOST_SIMPLEX) &&
4253         host->simplex_claimed && host->simplex_claimed != ap) {
4254         xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4255         ata_dev_warn(dev,
4256                  "simplex DMA is claimed by other device, disabling DMA\n");
4257     }
4258 
4259     if (ap->flags & ATA_FLAG_NO_IORDY)
4260         xfer_mask &= ata_pio_mask_no_iordy(dev);
4261 
4262     if (ap->ops->mode_filter)
4263         xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4264 
4265     /* Apply cable rule here.  Don't apply it early because when
4266      * we handle hot plug the cable type can itself change.
4267      * Check this last so that we know if the transfer rate was
4268      * solely limited by the cable.
4269      * Unknown or 80 wire cables reported host side are checked
4270      * drive side as well. Cases where we know a 40wire cable
4271      * is used safely for 80 are not checked here.
4272      */
4273     if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4274         /* UDMA/44 or higher would be available */
4275         if (cable_is_40wire(ap)) {
4276             ata_dev_warn(dev,
4277                      "limited to UDMA/33 due to 40-wire cable\n");
4278             xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4279         }
4280 
4281     ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4282                 &dev->mwdma_mask, &dev->udma_mask);
4283 }
4284 
4285 /**
4286  *  ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4287  *  @dev: Device to which command will be sent
4288  *
4289  *  Issue SET FEATURES - XFER MODE command to device @dev
4290  *  on port @ap.
4291  *
4292  *  LOCKING:
4293  *  PCI/etc. bus probe sem.
4294  *
4295  *  RETURNS:
4296  *  0 on success, AC_ERR_* mask otherwise.
4297  */
4298 
4299 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4300 {
4301     struct ata_taskfile tf;
4302     unsigned int err_mask;
4303 
4304     /* set up set-features taskfile */
4305     ata_dev_dbg(dev, "set features - xfer mode\n");
4306 
4307     /* Some controllers and ATAPI devices show flaky interrupt
4308      * behavior after setting xfer mode.  Use polling instead.
4309      */
4310     ata_tf_init(dev, &tf);
4311     tf.command = ATA_CMD_SET_FEATURES;
4312     tf.feature = SETFEATURES_XFER;
4313     tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4314     tf.protocol = ATA_PROT_NODATA;
4315     /* If we are using IORDY we must send the mode setting command */
4316     if (ata_pio_need_iordy(dev))
4317         tf.nsect = dev->xfer_mode;
4318     /* If the device has IORDY and the controller does not - turn it off */
4319     else if (ata_id_has_iordy(dev->id))
4320         tf.nsect = 0x01;
4321     else /* In the ancient relic department - skip all of this */
4322         return 0;
4323 
4324     /* On some disks, this command causes spin-up, so we need longer timeout */
4325     err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4326 
4327     return err_mask;
4328 }
4329 
4330 /**
4331  *  ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4332  *  @dev: Device to which command will be sent
4333  *  @enable: Whether to enable or disable the feature
4334  *  @feature: The sector count represents the feature to set
4335  *
4336  *  Issue SET FEATURES - SATA FEATURES command to device @dev
4337  *  on port @ap with sector count
4338  *
4339  *  LOCKING:
4340  *  PCI/etc. bus probe sem.
4341  *
4342  *  RETURNS:
4343  *  0 on success, AC_ERR_* mask otherwise.
4344  */
4345 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4346 {
4347     struct ata_taskfile tf;
4348     unsigned int err_mask;
4349     unsigned int timeout = 0;
4350 
4351     /* set up set-features taskfile */
4352     ata_dev_dbg(dev, "set features - SATA features\n");
4353 
4354     ata_tf_init(dev, &tf);
4355     tf.command = ATA_CMD_SET_FEATURES;
4356     tf.feature = enable;
4357     tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4358     tf.protocol = ATA_PROT_NODATA;
4359     tf.nsect = feature;
4360 
4361     if (enable == SETFEATURES_SPINUP)
4362         timeout = ata_probe_timeout ?
4363               ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4364     err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4365 
4366     return err_mask;
4367 }
4368 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4369 
4370 /**
4371  *  ata_dev_init_params - Issue INIT DEV PARAMS command
4372  *  @dev: Device to which command will be sent
4373  *  @heads: Number of heads (taskfile parameter)
4374  *  @sectors: Number of sectors (taskfile parameter)
4375  *
4376  *  LOCKING:
4377  *  Kernel thread context (may sleep)
4378  *
4379  *  RETURNS:
4380  *  0 on success, AC_ERR_* mask otherwise.
4381  */
4382 static unsigned int ata_dev_init_params(struct ata_device *dev,
4383                     u16 heads, u16 sectors)
4384 {
4385     struct ata_taskfile tf;
4386     unsigned int err_mask;
4387 
4388     /* Number of sectors per track 1-255. Number of heads 1-16 */
4389     if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4390         return AC_ERR_INVALID;
4391 
4392     /* set up init dev params taskfile */
4393     ata_dev_dbg(dev, "init dev params \n");
4394 
4395     ata_tf_init(dev, &tf);
4396     tf.command = ATA_CMD_INIT_DEV_PARAMS;
4397     tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4398     tf.protocol = ATA_PROT_NODATA;
4399     tf.nsect = sectors;
4400     tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4401 
4402     err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4403     /* A clean abort indicates an original or just out of spec drive
4404        and we should continue as we issue the setup based on the
4405        drive reported working geometry */
4406     if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
4407         err_mask = 0;
4408 
4409     return err_mask;
4410 }
4411 
4412 /**
4413  *  atapi_check_dma - Check whether ATAPI DMA can be supported
4414  *  @qc: Metadata associated with taskfile to check
4415  *
4416  *  Allow low-level driver to filter ATA PACKET commands, returning
4417  *  a status indicating whether or not it is OK to use DMA for the
4418  *  supplied PACKET command.
4419  *
4420  *  LOCKING:
4421  *  spin_lock_irqsave(host lock)
4422  *
4423  *  RETURNS: 0 when ATAPI DMA can be used
4424  *               nonzero otherwise
4425  */
4426 int atapi_check_dma(struct ata_queued_cmd *qc)
4427 {
4428     struct ata_port *ap = qc->ap;
4429 
4430     /* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4431      * few ATAPI devices choke on such DMA requests.
4432      */
4433     if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4434         unlikely(qc->nbytes & 15))
4435         return 1;
4436 
4437     if (ap->ops->check_atapi_dma)
4438         return ap->ops->check_atapi_dma(qc);
4439 
4440     return 0;
4441 }
4442 
4443 /**
4444  *  ata_std_qc_defer - Check whether a qc needs to be deferred
4445  *  @qc: ATA command in question
4446  *
4447  *  Non-NCQ commands cannot run with any other command, NCQ or
4448  *  not.  As upper layer only knows the queue depth, we are
4449  *  responsible for maintaining exclusion.  This function checks
4450  *  whether a new command @qc can be issued.
4451  *
4452  *  LOCKING:
4453  *  spin_lock_irqsave(host lock)
4454  *
4455  *  RETURNS:
4456  *  ATA_DEFER_* if deferring is needed, 0 otherwise.
4457  */
4458 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4459 {
4460     struct ata_link *link = qc->dev->link;
4461 
4462     if (ata_is_ncq(qc->tf.protocol)) {
4463         if (!ata_tag_valid(link->active_tag))
4464             return 0;
4465     } else {
4466         if (!ata_tag_valid(link->active_tag) && !link->sactive)
4467             return 0;
4468     }
4469 
4470     return ATA_DEFER_LINK;
4471 }
4472 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4473 
4474 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4475 {
4476     return AC_ERR_OK;
4477 }
4478 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4479 
4480 /**
4481  *  ata_sg_init - Associate command with scatter-gather table.
4482  *  @qc: Command to be associated
4483  *  @sg: Scatter-gather table.
4484  *  @n_elem: Number of elements in s/g table.
4485  *
4486  *  Initialize the data-related elements of queued_cmd @qc
4487  *  to point to a scatter-gather table @sg, containing @n_elem
4488  *  elements.
4489  *
4490  *  LOCKING:
4491  *  spin_lock_irqsave(host lock)
4492  */
4493 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4494          unsigned int n_elem)
4495 {
4496     qc->sg = sg;
4497     qc->n_elem = n_elem;
4498     qc->cursg = qc->sg;
4499 }
4500 
4501 #ifdef CONFIG_HAS_DMA
4502 
4503 /**
4504  *  ata_sg_clean - Unmap DMA memory associated with command
4505  *  @qc: Command containing DMA memory to be released
4506  *
4507  *  Unmap all mapped DMA memory associated with this command.
4508  *
4509  *  LOCKING:
4510  *  spin_lock_irqsave(host lock)
4511  */
4512 static void ata_sg_clean(struct ata_queued_cmd *qc)
4513 {
4514     struct ata_port *ap = qc->ap;
4515     struct scatterlist *sg = qc->sg;
4516     int dir = qc->dma_dir;
4517 
4518     WARN_ON_ONCE(sg == NULL);
4519 
4520     if (qc->n_elem)
4521         dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4522 
4523     qc->flags &= ~ATA_QCFLAG_DMAMAP;
4524     qc->sg = NULL;
4525 }
4526 
4527 /**
4528  *  ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4529  *  @qc: Command with scatter-gather table to be mapped.
4530  *
4531  *  DMA-map the scatter-gather table associated with queued_cmd @qc.
4532  *
4533  *  LOCKING:
4534  *  spin_lock_irqsave(host lock)
4535  *
4536  *  RETURNS:
4537  *  Zero on success, negative on error.
4538  *
4539  */
4540 static int ata_sg_setup(struct ata_queued_cmd *qc)
4541 {
4542     struct ata_port *ap = qc->ap;
4543     unsigned int n_elem;
4544 
4545     n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4546     if (n_elem < 1)
4547         return -1;
4548 
4549     qc->orig_n_elem = qc->n_elem;
4550     qc->n_elem = n_elem;
4551     qc->flags |= ATA_QCFLAG_DMAMAP;
4552 
4553     return 0;
4554 }
4555 
4556 #else /* !CONFIG_HAS_DMA */
4557 
4558 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4559 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4560 
4561 #endif /* !CONFIG_HAS_DMA */
4562 
4563 /**
4564  *  swap_buf_le16 - swap halves of 16-bit words in place
4565  *  @buf:  Buffer to swap
4566  *  @buf_words:  Number of 16-bit words in buffer.
4567  *
4568  *  Swap halves of 16-bit words if needed to convert from
4569  *  little-endian byte order to native cpu byte order, or
4570  *  vice-versa.
4571  *
4572  *  LOCKING:
4573  *  Inherited from caller.
4574  */
4575 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4576 {
4577 #ifdef __BIG_ENDIAN
4578     unsigned int i;
4579 
4580     for (i = 0; i < buf_words; i++)
4581         buf[i] = le16_to_cpu(buf[i]);
4582 #endif /* __BIG_ENDIAN */
4583 }
4584 
4585 /**
4586  *  ata_qc_free - free unused ata_queued_cmd
4587  *  @qc: Command to complete
4588  *
4589  *  Designed to free unused ata_queued_cmd object
4590  *  in case something prevents using it.
4591  *
4592  *  LOCKING:
4593  *  spin_lock_irqsave(host lock)
4594  */
4595 void ata_qc_free(struct ata_queued_cmd *qc)
4596 {
4597     qc->flags = 0;
4598     if (ata_tag_valid(qc->tag))
4599         qc->tag = ATA_TAG_POISON;
4600 }
4601 
4602 void __ata_qc_complete(struct ata_queued_cmd *qc)
4603 {
4604     struct ata_port *ap;
4605     struct ata_link *link;
4606 
4607     WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4608     WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4609     ap = qc->ap;
4610     link = qc->dev->link;
4611 
4612     if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4613         ata_sg_clean(qc);
4614 
4615     /* command should be marked inactive atomically with qc completion */
4616     if (ata_is_ncq(qc->tf.protocol)) {
4617         link->sactive &= ~(1 << qc->hw_tag);
4618         if (!link->sactive)
4619             ap->nr_active_links--;
4620     } else {
4621         link->active_tag = ATA_TAG_POISON;
4622         ap->nr_active_links--;
4623     }
4624 
4625     /* clear exclusive status */
4626     if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4627              ap->excl_link == link))
4628         ap->excl_link = NULL;
4629 
4630     /* atapi: mark qc as inactive to prevent the interrupt handler
4631      * from completing the command twice later, before the error handler
4632      * is called. (when rc != 0 and atapi request sense is needed)
4633      */
4634     qc->flags &= ~ATA_QCFLAG_ACTIVE;
4635     ap->qc_active &= ~(1ULL << qc->tag);
4636 
4637     /* call completion callback */
4638     qc->complete_fn(qc);
4639 }
4640 
4641 static void fill_result_tf(struct ata_queued_cmd *qc)
4642 {
4643     struct ata_port *ap = qc->ap;
4644 
4645     qc->result_tf.flags = qc->tf.flags;
4646     ap->ops->qc_fill_rtf(qc);
4647 }
4648 
4649 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4650 {
4651     struct ata_device *dev = qc->dev;
4652 
4653     if (!ata_is_data(qc->tf.protocol))
4654         return;
4655 
4656     if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4657         return;
4658 
4659     dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4660 }
4661 
4662 /**
4663  *  ata_qc_complete - Complete an active ATA command
4664  *  @qc: Command to complete
4665  *
4666  *  Indicate to the mid and upper layers that an ATA command has
4667  *  completed, with either an ok or not-ok status.
4668  *
4669  *  Refrain from calling this function multiple times when
4670  *  successfully completing multiple NCQ commands.
4671  *  ata_qc_complete_multiple() should be used instead, which will
4672  *  properly update IRQ expect state.
4673  *
4674  *  LOCKING:
4675  *  spin_lock_irqsave(host lock)
4676  */
4677 void ata_qc_complete(struct ata_queued_cmd *qc)
4678 {
4679     struct ata_port *ap = qc->ap;
4680 
4681     /* Trigger the LED (if available) */
4682     ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4683 
4684     /* XXX: New EH and old EH use different mechanisms to
4685      * synchronize EH with regular execution path.
4686      *
4687      * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4688      * Normal execution path is responsible for not accessing a
4689      * failed qc.  libata core enforces the rule by returning NULL
4690      * from ata_qc_from_tag() for failed qcs.
4691      *
4692      * Old EH depends on ata_qc_complete() nullifying completion
4693      * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4694      * not synchronize with interrupt handler.  Only PIO task is
4695      * taken care of.
4696      */
4697     if (ap->ops->error_handler) {
4698         struct ata_device *dev = qc->dev;
4699         struct ata_eh_info *ehi = &dev->link->eh_info;
4700 
4701         if (unlikely(qc->err_mask))
4702             qc->flags |= ATA_QCFLAG_FAILED;
4703 
4704         /*
4705          * Finish internal commands without any further processing
4706          * and always with the result TF filled.
4707          */
4708         if (unlikely(ata_tag_internal(qc->tag))) {
4709             fill_result_tf(qc);
4710             trace_ata_qc_complete_internal(qc);
4711             __ata_qc_complete(qc);
4712             return;
4713         }
4714 
4715         /*
4716          * Non-internal qc has failed.  Fill the result TF and
4717          * summon EH.
4718          */
4719         if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4720             fill_result_tf(qc);
4721             trace_ata_qc_complete_failed(qc);
4722             ata_qc_schedule_eh(qc);
4723             return;
4724         }
4725 
4726         WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4727 
4728         /* read result TF if requested */
4729         if (qc->flags & ATA_QCFLAG_RESULT_TF)
4730             fill_result_tf(qc);
4731 
4732         trace_ata_qc_complete_done(qc);
4733         /* Some commands need post-processing after successful
4734          * completion.
4735          */
4736         switch (qc->tf.command) {
4737         case ATA_CMD_SET_FEATURES:
4738             if (qc->tf.feature != SETFEATURES_WC_ON &&
4739                 qc->tf.feature != SETFEATURES_WC_OFF &&
4740                 qc->tf.feature != SETFEATURES_RA_ON &&
4741                 qc->tf.feature != SETFEATURES_RA_OFF)
4742                 break;
4743             fallthrough;
4744         case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4745         case ATA_CMD_SET_MULTI: /* multi_count changed */
4746             /* revalidate device */
4747             ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4748             ata_port_schedule_eh(ap);
4749             break;
4750 
4751         case ATA_CMD_SLEEP:
4752             dev->flags |= ATA_DFLAG_SLEEPING;
4753             break;
4754         }
4755 
4756         if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4757             ata_verify_xfer(qc);
4758 
4759         __ata_qc_complete(qc);
4760     } else {
4761         if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4762             return;
4763 
4764         /* read result TF if failed or requested */
4765         if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4766             fill_result_tf(qc);
4767 
4768         __ata_qc_complete(qc);
4769     }
4770 }
4771 EXPORT_SYMBOL_GPL(ata_qc_complete);
4772 
4773 /**
4774  *  ata_qc_get_active - get bitmask of active qcs
4775  *  @ap: port in question
4776  *
4777  *  LOCKING:
4778  *  spin_lock_irqsave(host lock)
4779  *
4780  *  RETURNS:
4781  *  Bitmask of active qcs
4782  */
4783 u64 ata_qc_get_active(struct ata_port *ap)
4784 {
4785     u64 qc_active = ap->qc_active;
4786 
4787     /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4788     if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4789         qc_active |= (1 << 0);
4790         qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4791     }
4792 
4793     return qc_active;
4794 }
4795 EXPORT_SYMBOL_GPL(ata_qc_get_active);
4796 
4797 /**
4798  *  ata_qc_issue - issue taskfile to device
4799  *  @qc: command to issue to device
4800  *
4801  *  Prepare an ATA command to submission to device.
4802  *  This includes mapping the data into a DMA-able
4803  *  area, filling in the S/G table, and finally
4804  *  writing the taskfile to hardware, starting the command.
4805  *
4806  *  LOCKING:
4807  *  spin_lock_irqsave(host lock)
4808  */
4809 void ata_qc_issue(struct ata_queued_cmd *qc)
4810 {
4811     struct ata_port *ap = qc->ap;
4812     struct ata_link *link = qc->dev->link;
4813     u8 prot = qc->tf.protocol;
4814 
4815     /* Make sure only one non-NCQ command is outstanding.  The
4816      * check is skipped for old EH because it reuses active qc to
4817      * request ATAPI sense.
4818      */
4819     WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4820 
4821     if (ata_is_ncq(prot)) {
4822         WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
4823 
4824         if (!link->sactive)
4825             ap->nr_active_links++;
4826         link->sactive |= 1 << qc->hw_tag;
4827     } else {
4828         WARN_ON_ONCE(link->sactive);
4829 
4830         ap->nr_active_links++;
4831         link->active_tag = qc->tag;
4832     }
4833 
4834     qc->flags |= ATA_QCFLAG_ACTIVE;
4835     ap->qc_active |= 1ULL << qc->tag;
4836 
4837     /*
4838      * We guarantee to LLDs that they will have at least one
4839      * non-zero sg if the command is a data command.
4840      */
4841     if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
4842         goto sys_err;
4843 
4844     if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4845                  (ap->flags & ATA_FLAG_PIO_DMA)))
4846         if (ata_sg_setup(qc))
4847             goto sys_err;
4848 
4849     /* if device is sleeping, schedule reset and abort the link */
4850     if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4851         link->eh_info.action |= ATA_EH_RESET;
4852         ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4853         ata_link_abort(link);
4854         return;
4855     }
4856 
4857     trace_ata_qc_prep(qc);
4858     qc->err_mask |= ap->ops->qc_prep(qc);
4859     if (unlikely(qc->err_mask))
4860         goto err;
4861     trace_ata_qc_issue(qc);
4862     qc->err_mask |= ap->ops->qc_issue(qc);
4863     if (unlikely(qc->err_mask))
4864         goto err;
4865     return;
4866 
4867 sys_err:
4868     qc->err_mask |= AC_ERR_SYSTEM;
4869 err:
4870     ata_qc_complete(qc);
4871 }
4872 
4873 /**
4874  *  ata_phys_link_online - test whether the given link is online
4875  *  @link: ATA link to test
4876  *
4877  *  Test whether @link is online.  Note that this function returns
4878  *  0 if online status of @link cannot be obtained, so
4879  *  ata_link_online(link) != !ata_link_offline(link).
4880  *
4881  *  LOCKING:
4882  *  None.
4883  *
4884  *  RETURNS:
4885  *  True if the port online status is available and online.
4886  */
4887 bool ata_phys_link_online(struct ata_link *link)
4888 {
4889     u32 sstatus;
4890 
4891     if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4892         ata_sstatus_online(sstatus))
4893         return true;
4894     return false;
4895 }
4896 
4897 /**
4898  *  ata_phys_link_offline - test whether the given link is offline
4899  *  @link: ATA link to test
4900  *
4901  *  Test whether @link is offline.  Note that this function
4902  *  returns 0 if offline status of @link cannot be obtained, so
4903  *  ata_link_online(link) != !ata_link_offline(link).
4904  *
4905  *  LOCKING:
4906  *  None.
4907  *
4908  *  RETURNS:
4909  *  True if the port offline status is available and offline.
4910  */
4911 bool ata_phys_link_offline(struct ata_link *link)
4912 {
4913     u32 sstatus;
4914 
4915     if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4916         !ata_sstatus_online(sstatus))
4917         return true;
4918     return false;
4919 }
4920 
4921 /**
4922  *  ata_link_online - test whether the given link is online
4923  *  @link: ATA link to test
4924  *
4925  *  Test whether @link is online.  This is identical to
4926  *  ata_phys_link_online() when there's no slave link.  When
4927  *  there's a slave link, this function should only be called on
4928  *  the master link and will return true if any of M/S links is
4929  *  online.
4930  *
4931  *  LOCKING:
4932  *  None.
4933  *
4934  *  RETURNS:
4935  *  True if the port online status is available and online.
4936  */
4937 bool ata_link_online(struct ata_link *link)
4938 {
4939     struct ata_link *slave = link->ap->slave_link;
4940 
4941     WARN_ON(link == slave); /* shouldn't be called on slave link */
4942 
4943     return ata_phys_link_online(link) ||
4944         (slave && ata_phys_link_online(slave));
4945 }
4946 EXPORT_SYMBOL_GPL(ata_link_online);
4947 
4948 /**
4949  *  ata_link_offline - test whether the given link is offline
4950  *  @link: ATA link to test
4951  *
4952  *  Test whether @link is offline.  This is identical to
4953  *  ata_phys_link_offline() when there's no slave link.  When
4954  *  there's a slave link, this function should only be called on
4955  *  the master link and will return true if both M/S links are
4956  *  offline.
4957  *
4958  *  LOCKING:
4959  *  None.
4960  *
4961  *  RETURNS:
4962  *  True if the port offline status is available and offline.
4963  */
4964 bool ata_link_offline(struct ata_link *link)
4965 {
4966     struct ata_link *slave = link->ap->slave_link;
4967 
4968     WARN_ON(link == slave); /* shouldn't be called on slave link */
4969 
4970     return ata_phys_link_offline(link) &&
4971         (!slave || ata_phys_link_offline(slave));
4972 }
4973 EXPORT_SYMBOL_GPL(ata_link_offline);
4974 
4975 #ifdef CONFIG_PM
4976 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
4977                 unsigned int action, unsigned int ehi_flags,
4978                 bool async)
4979 {
4980     struct ata_link *link;
4981     unsigned long flags;
4982 
4983     /* Previous resume operation might still be in
4984      * progress.  Wait for PM_PENDING to clear.
4985      */
4986     if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4987         ata_port_wait_eh(ap);
4988         WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4989     }
4990 
4991     /* request PM ops to EH */
4992     spin_lock_irqsave(ap->lock, flags);
4993 
4994     ap->pm_mesg = mesg;
4995     ap->pflags |= ATA_PFLAG_PM_PENDING;
4996     ata_for_each_link(link, ap, HOST_FIRST) {
4997         link->eh_info.action |= action;
4998         link->eh_info.flags |= ehi_flags;
4999     }
5000 
5001     ata_port_schedule_eh(ap);
5002 
5003     spin_unlock_irqrestore(ap->lock, flags);
5004 
5005     if (!async) {
5006         ata_port_wait_eh(ap);
5007         WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5008     }
5009 }
5010 
5011 /*
5012  * On some hardware, device fails to respond after spun down for suspend.  As
5013  * the device won't be used before being resumed, we don't need to touch the
5014  * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5015  *
5016  * http://thread.gmane.org/gmane.linux.ide/46764
5017  */
5018 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5019                          | ATA_EHI_NO_AUTOPSY
5020                          | ATA_EHI_NO_RECOVERY;
5021 
5022 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5023 {
5024     ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5025 }
5026 
5027 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5028 {
5029     ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5030 }
5031 
5032 static int ata_port_pm_suspend(struct device *dev)
5033 {
5034     struct ata_port *ap = to_ata_port(dev);
5035 
5036     if (pm_runtime_suspended(dev))
5037         return 0;
5038 
5039     ata_port_suspend(ap, PMSG_SUSPEND);
5040     return 0;
5041 }
5042 
5043 static int ata_port_pm_freeze(struct device *dev)
5044 {
5045     struct ata_port *ap = to_ata_port(dev);
5046 
5047     if (pm_runtime_suspended(dev))
5048         return 0;
5049 
5050     ata_port_suspend(ap, PMSG_FREEZE);
5051     return 0;
5052 }
5053 
5054 static int ata_port_pm_poweroff(struct device *dev)
5055 {
5056     ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5057     return 0;
5058 }
5059 
5060 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5061                         | ATA_EHI_QUIET;
5062 
5063 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5064 {
5065     ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5066 }
5067 
5068 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5069 {
5070     ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5071 }
5072 
5073 static int ata_port_pm_resume(struct device *dev)
5074 {
5075     ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5076     pm_runtime_disable(dev);
5077     pm_runtime_set_active(dev);
5078     pm_runtime_enable(dev);
5079     return 0;
5080 }
5081 
5082 /*
5083  * For ODDs, the upper layer will poll for media change every few seconds,
5084  * which will make it enter and leave suspend state every few seconds. And
5085  * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5086  * is very little and the ODD may malfunction after constantly being reset.
5087  * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5088  * ODD is attached to the port.
5089  */
5090 static int ata_port_runtime_idle(struct device *dev)
5091 {
5092     struct ata_port *ap = to_ata_port(dev);
5093     struct ata_link *link;
5094     struct ata_device *adev;
5095 
5096     ata_for_each_link(link, ap, HOST_FIRST) {
5097         ata_for_each_dev(adev, link, ENABLED)
5098             if (adev->class == ATA_DEV_ATAPI &&
5099                 !zpodd_dev_enabled(adev))
5100                 return -EBUSY;
5101     }
5102 
5103     return 0;
5104 }
5105 
5106 static int ata_port_runtime_suspend(struct device *dev)
5107 {
5108     ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5109     return 0;
5110 }
5111 
5112 static int ata_port_runtime_resume(struct device *dev)
5113 {
5114     ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5115     return 0;
5116 }
5117 
5118 static const struct dev_pm_ops ata_port_pm_ops = {
5119     .suspend = ata_port_pm_suspend,
5120     .resume = ata_port_pm_resume,
5121     .freeze = ata_port_pm_freeze,
5122     .thaw = ata_port_pm_resume,
5123     .poweroff = ata_port_pm_poweroff,
5124     .restore = ata_port_pm_resume,
5125 
5126     .runtime_suspend = ata_port_runtime_suspend,
5127     .runtime_resume = ata_port_runtime_resume,
5128     .runtime_idle = ata_port_runtime_idle,
5129 };
5130 
5131 /* sas ports don't participate in pm runtime management of ata_ports,
5132  * and need to resume ata devices at the domain level, not the per-port
5133  * level. sas suspend/resume is async to allow parallel port recovery
5134  * since sas has multiple ata_port instances per Scsi_Host.
5135  */
5136 void ata_sas_port_suspend(struct ata_port *ap)
5137 {
5138     ata_port_suspend_async(ap, PMSG_SUSPEND);
5139 }
5140 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5141 
5142 void ata_sas_port_resume(struct ata_port *ap)
5143 {
5144     ata_port_resume_async(ap, PMSG_RESUME);
5145 }
5146 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5147 
5148 /**
5149  *  ata_host_suspend - suspend host
5150  *  @host: host to suspend
5151  *  @mesg: PM message
5152  *
5153  *  Suspend @host.  Actual operation is performed by port suspend.
5154  */
5155 void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5156 {
5157     host->dev->power.power_state = mesg;
5158 }
5159 EXPORT_SYMBOL_GPL(ata_host_suspend);
5160 
5161 /**
5162  *  ata_host_resume - resume host
5163  *  @host: host to resume
5164  *
5165  *  Resume @host.  Actual operation is performed by port resume.
5166  */
5167 void ata_host_resume(struct ata_host *host)
5168 {
5169     host->dev->power.power_state = PMSG_ON;
5170 }
5171 EXPORT_SYMBOL_GPL(ata_host_resume);
5172 #endif
5173 
5174 const struct device_type ata_port_type = {
5175     .name = "ata_port",
5176 #ifdef CONFIG_PM
5177     .pm = &ata_port_pm_ops,
5178 #endif
5179 };
5180 
5181 /**
5182  *  ata_dev_init - Initialize an ata_device structure
5183  *  @dev: Device structure to initialize
5184  *
5185  *  Initialize @dev in preparation for probing.
5186  *
5187  *  LOCKING:
5188  *  Inherited from caller.
5189  */
5190 void ata_dev_init(struct ata_device *dev)
5191 {
5192     struct ata_link *link = ata_dev_phys_link(dev);
5193     struct ata_port *ap = link->ap;
5194     unsigned long flags;
5195 
5196     /* SATA spd limit is bound to the attached device, reset together */
5197     link->sata_spd_limit = link->hw_sata_spd_limit;
5198     link->sata_spd = 0;
5199 
5200     /* High bits of dev->flags are used to record warm plug
5201      * requests which occur asynchronously.  Synchronize using
5202      * host lock.
5203      */
5204     spin_lock_irqsave(ap->lock, flags);
5205     dev->flags &= ~ATA_DFLAG_INIT_MASK;
5206     dev->horkage = 0;
5207     spin_unlock_irqrestore(ap->lock, flags);
5208 
5209     memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5210            ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5211     dev->pio_mask = UINT_MAX;
5212     dev->mwdma_mask = UINT_MAX;
5213     dev->udma_mask = UINT_MAX;
5214 }
5215 
5216 /**
5217  *  ata_link_init - Initialize an ata_link structure
5218  *  @ap: ATA port link is attached to
5219  *  @link: Link structure to initialize
5220  *  @pmp: Port multiplier port number
5221  *
5222  *  Initialize @link.
5223  *
5224  *  LOCKING:
5225  *  Kernel thread context (may sleep)
5226  */
5227 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5228 {
5229     int i;
5230 
5231     /* clear everything except for devices */
5232     memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5233            ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5234 
5235     link->ap = ap;
5236     link->pmp = pmp;
5237     link->active_tag = ATA_TAG_POISON;
5238     link->hw_sata_spd_limit = UINT_MAX;
5239 
5240     /* can't use iterator, ap isn't initialized yet */
5241     for (i = 0; i < ATA_MAX_DEVICES; i++) {
5242         struct ata_device *dev = &link->device[i];
5243 
5244         dev->link = link;
5245         dev->devno = dev - link->device;
5246 #ifdef CONFIG_ATA_ACPI
5247         dev->gtf_filter = ata_acpi_gtf_filter;
5248 #endif
5249         ata_dev_init(dev);
5250     }
5251 }
5252 
5253 /**
5254  *  sata_link_init_spd - Initialize link->sata_spd_limit
5255  *  @link: Link to configure sata_spd_limit for
5256  *
5257  *  Initialize ``link->[hw_]sata_spd_limit`` to the currently
5258  *  configured value.
5259  *
5260  *  LOCKING:
5261  *  Kernel thread context (may sleep).
5262  *
5263  *  RETURNS:
5264  *  0 on success, -errno on failure.
5265  */
5266 int sata_link_init_spd(struct ata_link *link)
5267 {
5268     u8 spd;
5269     int rc;
5270 
5271     rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5272     if (rc)
5273         return rc;
5274 
5275     spd = (link->saved_scontrol >> 4) & 0xf;
5276     if (spd)
5277         link->hw_sata_spd_limit &= (1 << spd) - 1;
5278 
5279     ata_force_link_limits(link);
5280 
5281     link->sata_spd_limit = link->hw_sata_spd_limit;
5282 
5283     return 0;
5284 }
5285 
5286 /**
5287  *  ata_port_alloc - allocate and initialize basic ATA port resources
5288  *  @host: ATA host this allocated port belongs to
5289  *
5290  *  Allocate and initialize basic ATA port resources.
5291  *
5292  *  RETURNS:
5293  *  Allocate ATA port on success, NULL on failure.
5294  *
5295  *  LOCKING:
5296  *  Inherited from calling layer (may sleep).
5297  */
5298 struct ata_port *ata_port_alloc(struct ata_host *host)
5299 {
5300     struct ata_port *ap;
5301 
5302     ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5303     if (!ap)
5304         return NULL;
5305 
5306     ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5307     ap->lock = &host->lock;
5308     ap->print_id = -1;
5309     ap->local_port_no = -1;
5310     ap->host = host;
5311     ap->dev = host->dev;
5312 
5313     mutex_init(&ap->scsi_scan_mutex);
5314     INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5315     INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5316     INIT_LIST_HEAD(&ap->eh_done_q);
5317     init_waitqueue_head(&ap->eh_wait_q);
5318     init_completion(&ap->park_req_pending);
5319     timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5320             TIMER_DEFERRABLE);
5321 
5322     ap->cbl = ATA_CBL_NONE;
5323 
5324     ata_link_init(ap, &ap->link, 0);
5325 
5326 #ifdef ATA_IRQ_TRAP
5327     ap->stats.unhandled_irq = 1;
5328     ap->stats.idle_irq = 1;
5329 #endif
5330     ata_sff_port_init(ap);
5331 
5332     return ap;
5333 }
5334 
5335 static void ata_devres_release(struct device *gendev, void *res)
5336 {
5337     struct ata_host *host = dev_get_drvdata(gendev);
5338     int i;
5339 
5340     for (i = 0; i < host->n_ports; i++) {
5341         struct ata_port *ap = host->ports[i];
5342 
5343         if (!ap)
5344             continue;
5345 
5346         if (ap->scsi_host)
5347             scsi_host_put(ap->scsi_host);
5348 
5349     }
5350 
5351     dev_set_drvdata(gendev, NULL);
5352     ata_host_put(host);
5353 }
5354 
5355 static void ata_host_release(struct kref *kref)
5356 {
5357     struct ata_host *host = container_of(kref, struct ata_host, kref);
5358     int i;
5359 
5360     for (i = 0; i < host->n_ports; i++) {
5361         struct ata_port *ap = host->ports[i];
5362 
5363         kfree(ap->pmp_link);
5364         kfree(ap->slave_link);
5365         kfree(ap);
5366         host->ports[i] = NULL;
5367     }
5368     kfree(host);
5369 }
5370 
5371 void ata_host_get(struct ata_host *host)
5372 {
5373     kref_get(&host->kref);
5374 }
5375 
5376 void ata_host_put(struct ata_host *host)
5377 {
5378     kref_put(&host->kref, ata_host_release);
5379 }
5380 EXPORT_SYMBOL_GPL(ata_host_put);
5381 
5382 /**
5383  *  ata_host_alloc - allocate and init basic ATA host resources
5384  *  @dev: generic device this host is associated with
5385  *  @max_ports: maximum number of ATA ports associated with this host
5386  *
5387  *  Allocate and initialize basic ATA host resources.  LLD calls
5388  *  this function to allocate a host, initializes it fully and
5389  *  attaches it using ata_host_register().
5390  *
5391  *  @max_ports ports are allocated and host->n_ports is
5392  *  initialized to @max_ports.  The caller is allowed to decrease
5393  *  host->n_ports before calling ata_host_register().  The unused
5394  *  ports will be automatically freed on registration.
5395  *
5396  *  RETURNS:
5397  *  Allocate ATA host on success, NULL on failure.
5398  *
5399  *  LOCKING:
5400  *  Inherited from calling layer (may sleep).
5401  */
5402 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5403 {
5404     struct ata_host *host;
5405     size_t sz;
5406     int i;
5407     void *dr;
5408 
5409     /* alloc a container for our list of ATA ports (buses) */
5410     sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5411     host = kzalloc(sz, GFP_KERNEL);
5412     if (!host)
5413         return NULL;
5414 
5415     if (!devres_open_group(dev, NULL, GFP_KERNEL))
5416         goto err_free;
5417 
5418     dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5419     if (!dr)
5420         goto err_out;
5421 
5422     devres_add(dev, dr);
5423     dev_set_drvdata(dev, host);
5424 
5425     spin_lock_init(&host->lock);
5426     mutex_init(&host->eh_mutex);
5427     host->dev = dev;
5428     host->n_ports = max_ports;
5429     kref_init(&host->kref);
5430 
5431     /* allocate ports bound to this host */
5432     for (i = 0; i < max_ports; i++) {
5433         struct ata_port *ap;
5434 
5435         ap = ata_port_alloc(host);
5436         if (!ap)
5437             goto err_out;
5438 
5439         ap->port_no = i;
5440         host->ports[i] = ap;
5441     }
5442 
5443     devres_remove_group(dev, NULL);
5444     return host;
5445 
5446  err_out:
5447     devres_release_group(dev, NULL);
5448  err_free:
5449     kfree(host);
5450     return NULL;
5451 }
5452 EXPORT_SYMBOL_GPL(ata_host_alloc);
5453 
5454 /**
5455  *  ata_host_alloc_pinfo - alloc host and init with port_info array
5456  *  @dev: generic device this host is associated with
5457  *  @ppi: array of ATA port_info to initialize host with
5458  *  @n_ports: number of ATA ports attached to this host
5459  *
5460  *  Allocate ATA host and initialize with info from @ppi.  If NULL
5461  *  terminated, @ppi may contain fewer entries than @n_ports.  The
5462  *  last entry will be used for the remaining ports.
5463  *
5464  *  RETURNS:
5465  *  Allocate ATA host on success, NULL on failure.
5466  *
5467  *  LOCKING:
5468  *  Inherited from calling layer (may sleep).
5469  */
5470 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5471                       const struct ata_port_info * const * ppi,
5472                       int n_ports)
5473 {
5474     const struct ata_port_info *pi = &ata_dummy_port_info;
5475     struct ata_host *host;
5476     int i, j;
5477 
5478     host = ata_host_alloc(dev, n_ports);
5479     if (!host)
5480         return NULL;
5481 
5482     for (i = 0, j = 0; i < host->n_ports; i++) {
5483         struct ata_port *ap = host->ports[i];
5484 
5485         if (ppi[j])
5486             pi = ppi[j++];
5487 
5488         ap->pio_mask = pi->pio_mask;
5489         ap->mwdma_mask = pi->mwdma_mask;
5490         ap->udma_mask = pi->udma_mask;
5491         ap->flags |= pi->flags;
5492         ap->link.flags |= pi->link_flags;
5493         ap->ops = pi->port_ops;
5494 
5495         if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5496             host->ops = pi->port_ops;
5497     }
5498 
5499     return host;
5500 }
5501 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
5502 
5503 static void ata_host_stop(struct device *gendev, void *res)
5504 {
5505     struct ata_host *host = dev_get_drvdata(gendev);
5506     int i;
5507 
5508     WARN_ON(!(host->flags & ATA_HOST_STARTED));
5509 
5510     for (i = 0; i < host->n_ports; i++) {
5511         struct ata_port *ap = host->ports[i];
5512 
5513         if (ap->ops->port_stop)
5514             ap->ops->port_stop(ap);
5515     }
5516 
5517     if (host->ops->host_stop)
5518         host->ops->host_stop(host);
5519 }
5520 
5521 /**
5522  *  ata_finalize_port_ops - finalize ata_port_operations
5523  *  @ops: ata_port_operations to finalize
5524  *
5525  *  An ata_port_operations can inherit from another ops and that
5526  *  ops can again inherit from another.  This can go on as many
5527  *  times as necessary as long as there is no loop in the
5528  *  inheritance chain.
5529  *
5530  *  Ops tables are finalized when the host is started.  NULL or
5531  *  unspecified entries are inherited from the closet ancestor
5532  *  which has the method and the entry is populated with it.
5533  *  After finalization, the ops table directly points to all the
5534  *  methods and ->inherits is no longer necessary and cleared.
5535  *
5536  *  Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5537  *
5538  *  LOCKING:
5539  *  None.
5540  */
5541 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5542 {
5543     static DEFINE_SPINLOCK(lock);
5544     const struct ata_port_operations *cur;
5545     void **begin = (void **)ops;
5546     void **end = (void **)&ops->inherits;
5547     void **pp;
5548 
5549     if (!ops || !ops->inherits)
5550         return;
5551 
5552     spin_lock(&lock);
5553 
5554     for (cur = ops->inherits; cur; cur = cur->inherits) {
5555         void **inherit = (void **)cur;
5556 
5557         for (pp = begin; pp < end; pp++, inherit++)
5558             if (!*pp)
5559                 *pp = *inherit;
5560     }
5561 
5562     for (pp = begin; pp < end; pp++)
5563         if (IS_ERR(*pp))
5564             *pp = NULL;
5565 
5566     ops->inherits = NULL;
5567 
5568     spin_unlock(&lock);
5569 }
5570 
5571 /**
5572  *  ata_host_start - start and freeze ports of an ATA host
5573  *  @host: ATA host to start ports for
5574  *
5575  *  Start and then freeze ports of @host.  Started status is
5576  *  recorded in host->flags, so this function can be called
5577  *  multiple times.  Ports are guaranteed to get started only
5578  *  once.  If host->ops is not initialized yet, it is set to the
5579  *  first non-dummy port ops.
5580  *
5581  *  LOCKING:
5582  *  Inherited from calling layer (may sleep).
5583  *
5584  *  RETURNS:
5585  *  0 if all ports are started successfully, -errno otherwise.
5586  */
5587 int ata_host_start(struct ata_host *host)
5588 {
5589     int have_stop = 0;
5590     void *start_dr = NULL;
5591     int i, rc;
5592 
5593     if (host->flags & ATA_HOST_STARTED)
5594         return 0;
5595 
5596     ata_finalize_port_ops(host->ops);
5597 
5598     for (i = 0; i < host->n_ports; i++) {
5599         struct ata_port *ap = host->ports[i];
5600 
5601         ata_finalize_port_ops(ap->ops);
5602 
5603         if (!host->ops && !ata_port_is_dummy(ap))
5604             host->ops = ap->ops;
5605 
5606         if (ap->ops->port_stop)
5607             have_stop = 1;
5608     }
5609 
5610     if (host->ops && host->ops->host_stop)
5611         have_stop = 1;
5612 
5613     if (have_stop) {
5614         start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5615         if (!start_dr)
5616             return -ENOMEM;
5617     }
5618 
5619     for (i = 0; i < host->n_ports; i++) {
5620         struct ata_port *ap = host->ports[i];
5621 
5622         if (ap->ops->port_start) {
5623             rc = ap->ops->port_start(ap);
5624             if (rc) {
5625                 if (rc != -ENODEV)
5626                     dev_err(host->dev,
5627                         "failed to start port %d (errno=%d)\n",
5628                         i, rc);
5629                 goto err_out;
5630             }
5631         }
5632         ata_eh_freeze_port(ap);
5633     }
5634 
5635     if (start_dr)
5636         devres_add(host->dev, start_dr);
5637     host->flags |= ATA_HOST_STARTED;
5638     return 0;
5639 
5640  err_out:
5641     while (--i >= 0) {
5642         struct ata_port *ap = host->ports[i];
5643 
5644         if (ap->ops->port_stop)
5645             ap->ops->port_stop(ap);
5646     }
5647     devres_free(start_dr);
5648     return rc;
5649 }
5650 EXPORT_SYMBOL_GPL(ata_host_start);
5651 
5652 /**
5653  *  ata_host_init - Initialize a host struct for sas (ipr, libsas)
5654  *  @host:  host to initialize
5655  *  @dev:   device host is attached to
5656  *  @ops:   port_ops
5657  *
5658  */
5659 void ata_host_init(struct ata_host *host, struct device *dev,
5660            struct ata_port_operations *ops)
5661 {
5662     spin_lock_init(&host->lock);
5663     mutex_init(&host->eh_mutex);
5664     host->n_tags = ATA_MAX_QUEUE;
5665     host->dev = dev;
5666     host->ops = ops;
5667     kref_init(&host->kref);
5668 }
5669 EXPORT_SYMBOL_GPL(ata_host_init);
5670 
5671 void __ata_port_probe(struct ata_port *ap)
5672 {
5673     struct ata_eh_info *ehi = &ap->link.eh_info;
5674     unsigned long flags;
5675 
5676     /* kick EH for boot probing */
5677     spin_lock_irqsave(ap->lock, flags);
5678 
5679     ehi->probe_mask |= ATA_ALL_DEVICES;
5680     ehi->action |= ATA_EH_RESET;
5681     ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5682 
5683     ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5684     ap->pflags |= ATA_PFLAG_LOADING;
5685     ata_port_schedule_eh(ap);
5686 
5687     spin_unlock_irqrestore(ap->lock, flags);
5688 }
5689 
5690 int ata_port_probe(struct ata_port *ap)
5691 {
5692     int rc = 0;
5693 
5694     if (ap->ops->error_handler) {
5695         __ata_port_probe(ap);
5696         ata_port_wait_eh(ap);
5697     } else {
5698         rc = ata_bus_probe(ap);
5699     }
5700     return rc;
5701 }
5702 
5703 
5704 static void async_port_probe(void *data, async_cookie_t cookie)
5705 {
5706     struct ata_port *ap = data;
5707 
5708     /*
5709      * If we're not allowed to scan this host in parallel,
5710      * we need to wait until all previous scans have completed
5711      * before going further.
5712      * Jeff Garzik says this is only within a controller, so we
5713      * don't need to wait for port 0, only for later ports.
5714      */
5715     if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5716         async_synchronize_cookie(cookie);
5717 
5718     (void)ata_port_probe(ap);
5719 
5720     /* in order to keep device order, we need to synchronize at this point */
5721     async_synchronize_cookie(cookie);
5722 
5723     ata_scsi_scan_host(ap, 1);
5724 }
5725 
5726 /**
5727  *  ata_host_register - register initialized ATA host
5728  *  @host: ATA host to register
5729  *  @sht: template for SCSI host
5730  *
5731  *  Register initialized ATA host.  @host is allocated using
5732  *  ata_host_alloc() and fully initialized by LLD.  This function
5733  *  starts ports, registers @host with ATA and SCSI layers and
5734  *  probe registered devices.
5735  *
5736  *  LOCKING:
5737  *  Inherited from calling layer (may sleep).
5738  *
5739  *  RETURNS:
5740  *  0 on success, -errno otherwise.
5741  */
5742 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5743 {
5744     int i, rc;
5745 
5746     host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5747 
5748     /* host must have been started */
5749     if (!(host->flags & ATA_HOST_STARTED)) {
5750         dev_err(host->dev, "BUG: trying to register unstarted host\n");
5751         WARN_ON(1);
5752         return -EINVAL;
5753     }
5754 
5755     /* Blow away unused ports.  This happens when LLD can't
5756      * determine the exact number of ports to allocate at
5757      * allocation time.
5758      */
5759     for (i = host->n_ports; host->ports[i]; i++)
5760         kfree(host->ports[i]);
5761 
5762     /* give ports names and add SCSI hosts */
5763     for (i = 0; i < host->n_ports; i++) {
5764         host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5765         host->ports[i]->local_port_no = i + 1;
5766     }
5767 
5768     /* Create associated sysfs transport objects  */
5769     for (i = 0; i < host->n_ports; i++) {
5770         rc = ata_tport_add(host->dev,host->ports[i]);
5771         if (rc) {
5772             goto err_tadd;
5773         }
5774     }
5775 
5776     rc = ata_scsi_add_hosts(host, sht);
5777     if (rc)
5778         goto err_tadd;
5779 
5780     /* set cable, sata_spd_limit and report */
5781     for (i = 0; i < host->n_ports; i++) {
5782         struct ata_port *ap = host->ports[i];
5783         unsigned int xfer_mask;
5784 
5785         /* set SATA cable type if still unset */
5786         if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5787             ap->cbl = ATA_CBL_SATA;
5788 
5789         /* init sata_spd_limit to the current value */
5790         sata_link_init_spd(&ap->link);
5791         if (ap->slave_link)
5792             sata_link_init_spd(ap->slave_link);
5793 
5794         /* print per-port info to dmesg */
5795         xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5796                           ap->udma_mask);
5797 
5798         if (!ata_port_is_dummy(ap)) {
5799             ata_port_info(ap, "%cATA max %s %s\n",
5800                       (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5801                       ata_mode_string(xfer_mask),
5802                       ap->link.eh_info.desc);
5803             ata_ehi_clear_desc(&ap->link.eh_info);
5804         } else
5805             ata_port_info(ap, "DUMMY\n");
5806     }
5807 
5808     /* perform each probe asynchronously */
5809     for (i = 0; i < host->n_ports; i++) {
5810         struct ata_port *ap = host->ports[i];
5811         ap->cookie = async_schedule(async_port_probe, ap);
5812     }
5813 
5814     return 0;
5815 
5816  err_tadd:
5817     while (--i >= 0) {
5818         ata_tport_delete(host->ports[i]);
5819     }
5820     return rc;
5821 
5822 }
5823 EXPORT_SYMBOL_GPL(ata_host_register);
5824 
5825 /**
5826  *  ata_host_activate - start host, request IRQ and register it
5827  *  @host: target ATA host
5828  *  @irq: IRQ to request
5829  *  @irq_handler: irq_handler used when requesting IRQ
5830  *  @irq_flags: irq_flags used when requesting IRQ
5831  *  @sht: scsi_host_template to use when registering the host
5832  *
5833  *  After allocating an ATA host and initializing it, most libata
5834  *  LLDs perform three steps to activate the host - start host,
5835  *  request IRQ and register it.  This helper takes necessary
5836  *  arguments and performs the three steps in one go.
5837  *
5838  *  An invalid IRQ skips the IRQ registration and expects the host to
5839  *  have set polling mode on the port. In this case, @irq_handler
5840  *  should be NULL.
5841  *
5842  *  LOCKING:
5843  *  Inherited from calling layer (may sleep).
5844  *
5845  *  RETURNS:
5846  *  0 on success, -errno otherwise.
5847  */
5848 int ata_host_activate(struct ata_host *host, int irq,
5849               irq_handler_t irq_handler, unsigned long irq_flags,
5850               struct scsi_host_template *sht)
5851 {
5852     int i, rc;
5853     char *irq_desc;
5854 
5855     rc = ata_host_start(host);
5856     if (rc)
5857         return rc;
5858 
5859     /* Special case for polling mode */
5860     if (!irq) {
5861         WARN_ON(irq_handler);
5862         return ata_host_register(host, sht);
5863     }
5864 
5865     irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5866                   dev_driver_string(host->dev),
5867                   dev_name(host->dev));
5868     if (!irq_desc)
5869         return -ENOMEM;
5870 
5871     rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5872                   irq_desc, host);
5873     if (rc)
5874         return rc;
5875 
5876     for (i = 0; i < host->n_ports; i++)
5877         ata_port_desc(host->ports[i], "irq %d", irq);
5878 
5879     rc = ata_host_register(host, sht);
5880     /* if failed, just free the IRQ and leave ports alone */
5881     if (rc)
5882         devm_free_irq(host->dev, irq, host);
5883 
5884     return rc;
5885 }
5886 EXPORT_SYMBOL_GPL(ata_host_activate);
5887 
5888 /**
5889  *  ata_port_detach - Detach ATA port in preparation of device removal
5890  *  @ap: ATA port to be detached
5891  *
5892  *  Detach all ATA devices and the associated SCSI devices of @ap;
5893  *  then, remove the associated SCSI host.  @ap is guaranteed to
5894  *  be quiescent on return from this function.
5895  *
5896  *  LOCKING:
5897  *  Kernel thread context (may sleep).
5898  */
5899 static void ata_port_detach(struct ata_port *ap)
5900 {
5901     unsigned long flags;
5902     struct ata_link *link;
5903     struct ata_device *dev;
5904 
5905     if (!ap->ops->error_handler)
5906         goto skip_eh;
5907 
5908     /* tell EH we're leaving & flush EH */
5909     spin_lock_irqsave(ap->lock, flags);
5910     ap->pflags |= ATA_PFLAG_UNLOADING;
5911     ata_port_schedule_eh(ap);
5912     spin_unlock_irqrestore(ap->lock, flags);
5913 
5914     /* wait till EH commits suicide */
5915     ata_port_wait_eh(ap);
5916 
5917     /* it better be dead now */
5918     WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
5919 
5920     cancel_delayed_work_sync(&ap->hotplug_task);
5921 
5922  skip_eh:
5923     /* clean up zpodd on port removal */
5924     ata_for_each_link(link, ap, HOST_FIRST) {
5925         ata_for_each_dev(dev, link, ALL) {
5926             if (zpodd_dev_enabled(dev))
5927                 zpodd_exit(dev);
5928         }
5929     }
5930     if (ap->pmp_link) {
5931         int i;
5932         for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5933             ata_tlink_delete(&ap->pmp_link[i]);
5934     }
5935     /* remove the associated SCSI host */
5936     scsi_remove_host(ap->scsi_host);
5937     ata_tport_delete(ap);
5938 }
5939 
5940 /**
5941  *  ata_host_detach - Detach all ports of an ATA host
5942  *  @host: Host to detach
5943  *
5944  *  Detach all ports of @host.
5945  *
5946  *  LOCKING:
5947  *  Kernel thread context (may sleep).
5948  */
5949 void ata_host_detach(struct ata_host *host)
5950 {
5951     int i;
5952 
5953     for (i = 0; i < host->n_ports; i++) {
5954         /* Ensure ata_port probe has completed */
5955         async_synchronize_cookie(host->ports[i]->cookie + 1);
5956         ata_port_detach(host->ports[i]);
5957     }
5958 
5959     /* the host is dead now, dissociate ACPI */
5960     ata_acpi_dissociate(host);
5961 }
5962 EXPORT_SYMBOL_GPL(ata_host_detach);
5963 
5964 #ifdef CONFIG_PCI
5965 
5966 /**
5967  *  ata_pci_remove_one - PCI layer callback for device removal
5968  *  @pdev: PCI device that was removed
5969  *
5970  *  PCI layer indicates to libata via this hook that hot-unplug or
5971  *  module unload event has occurred.  Detach all ports.  Resource
5972  *  release is handled via devres.
5973  *
5974  *  LOCKING:
5975  *  Inherited from PCI layer (may sleep).
5976  */
5977 void ata_pci_remove_one(struct pci_dev *pdev)
5978 {
5979     struct ata_host *host = pci_get_drvdata(pdev);
5980 
5981     ata_host_detach(host);
5982 }
5983 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5984 
5985 void ata_pci_shutdown_one(struct pci_dev *pdev)
5986 {
5987     struct ata_host *host = pci_get_drvdata(pdev);
5988     int i;
5989 
5990     for (i = 0; i < host->n_ports; i++) {
5991         struct ata_port *ap = host->ports[i];
5992 
5993         ap->pflags |= ATA_PFLAG_FROZEN;
5994 
5995         /* Disable port interrupts */
5996         if (ap->ops->freeze)
5997             ap->ops->freeze(ap);
5998 
5999         /* Stop the port DMA engines */
6000         if (ap->ops->port_stop)
6001             ap->ops->port_stop(ap);
6002     }
6003 }
6004 EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6005 
6006 /* move to PCI subsystem */
6007 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6008 {
6009     unsigned long tmp = 0;
6010 
6011     switch (bits->width) {
6012     case 1: {
6013         u8 tmp8 = 0;
6014         pci_read_config_byte(pdev, bits->reg, &tmp8);
6015         tmp = tmp8;
6016         break;
6017     }
6018     case 2: {
6019         u16 tmp16 = 0;
6020         pci_read_config_word(pdev, bits->reg, &tmp16);
6021         tmp = tmp16;
6022         break;
6023     }
6024     case 4: {
6025         u32 tmp32 = 0;
6026         pci_read_config_dword(pdev, bits->reg, &tmp32);
6027         tmp = tmp32;
6028         break;
6029     }
6030 
6031     default:
6032         return -EINVAL;
6033     }
6034 
6035     tmp &= bits->mask;
6036 
6037     return (tmp == bits->val) ? 1 : 0;
6038 }
6039 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6040 
6041 #ifdef CONFIG_PM
6042 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6043 {
6044     pci_save_state(pdev);
6045     pci_disable_device(pdev);
6046 
6047     if (mesg.event & PM_EVENT_SLEEP)
6048         pci_set_power_state(pdev, PCI_D3hot);
6049 }
6050 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6051 
6052 int ata_pci_device_do_resume(struct pci_dev *pdev)
6053 {
6054     int rc;
6055 
6056     pci_set_power_state(pdev, PCI_D0);
6057     pci_restore_state(pdev);
6058 
6059     rc = pcim_enable_device(pdev);
6060     if (rc) {
6061         dev_err(&pdev->dev,
6062             "failed to enable device after resume (%d)\n", rc);
6063         return rc;
6064     }
6065 
6066     pci_set_master(pdev);
6067     return 0;
6068 }
6069 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6070 
6071 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6072 {
6073     struct ata_host *host = pci_get_drvdata(pdev);
6074 
6075     ata_host_suspend(host, mesg);
6076 
6077     ata_pci_device_do_suspend(pdev, mesg);
6078 
6079     return 0;
6080 }
6081 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6082 
6083 int ata_pci_device_resume(struct pci_dev *pdev)
6084 {
6085     struct ata_host *host = pci_get_drvdata(pdev);
6086     int rc;
6087 
6088     rc = ata_pci_device_do_resume(pdev);
6089     if (rc == 0)
6090         ata_host_resume(host);
6091     return rc;
6092 }
6093 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6094 #endif /* CONFIG_PM */
6095 #endif /* CONFIG_PCI */
6096 
6097 /**
6098  *  ata_platform_remove_one - Platform layer callback for device removal
6099  *  @pdev: Platform device that was removed
6100  *
6101  *  Platform layer indicates to libata via this hook that hot-unplug or
6102  *  module unload event has occurred.  Detach all ports.  Resource
6103  *  release is handled via devres.
6104  *
6105  *  LOCKING:
6106  *  Inherited from platform layer (may sleep).
6107  */
6108 int ata_platform_remove_one(struct platform_device *pdev)
6109 {
6110     struct ata_host *host = platform_get_drvdata(pdev);
6111 
6112     ata_host_detach(host);
6113 
6114     return 0;
6115 }
6116 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6117 
6118 #ifdef CONFIG_ATA_FORCE
6119 
6120 #define force_cbl(name, flag)               \
6121     { #name,    .cbl        = (flag) }
6122 
6123 #define force_spd_limit(spd, val)           \
6124     { #spd, .spd_limit      = (val) }
6125 
6126 #define force_xfer(mode, shift)             \
6127     { #mode,    .xfer_mask  = (1UL << (shift)) }
6128 
6129 #define force_lflag_on(name, flags)         \
6130     { #name,    .lflags_on  = (flags) }
6131 
6132 #define force_lflag_onoff(name, flags)          \
6133     { "no" #name,   .lflags_on  = (flags) },    \
6134     { #name,    .lflags_off = (flags) }
6135 
6136 #define force_horkage_on(name, flag)            \
6137     { #name,    .horkage_on = (flag) }
6138 
6139 #define force_horkage_onoff(name, flag)         \
6140     { "no" #name,   .horkage_on = (flag) }, \
6141     { #name,    .horkage_off    = (flag) }
6142 
6143 static const struct ata_force_param force_tbl[] __initconst = {
6144     force_cbl(40c,          ATA_CBL_PATA40),
6145     force_cbl(80c,          ATA_CBL_PATA80),
6146     force_cbl(short40c,     ATA_CBL_PATA40_SHORT),
6147     force_cbl(unk,          ATA_CBL_PATA_UNK),
6148     force_cbl(ign,          ATA_CBL_PATA_IGN),
6149     force_cbl(sata,         ATA_CBL_SATA),
6150 
6151     force_spd_limit(1.5Gbps,    1),
6152     force_spd_limit(3.0Gbps,    2),
6153 
6154     force_xfer(pio0,        ATA_SHIFT_PIO + 0),
6155     force_xfer(pio1,        ATA_SHIFT_PIO + 1),
6156     force_xfer(pio2,        ATA_SHIFT_PIO + 2),
6157     force_xfer(pio3,        ATA_SHIFT_PIO + 3),
6158     force_xfer(pio4,        ATA_SHIFT_PIO + 4),
6159     force_xfer(pio5,        ATA_SHIFT_PIO + 5),
6160     force_xfer(pio6,        ATA_SHIFT_PIO + 6),
6161     force_xfer(mwdma0,      ATA_SHIFT_MWDMA + 0),
6162     force_xfer(mwdma1,      ATA_SHIFT_MWDMA + 1),
6163     force_xfer(mwdma2,      ATA_SHIFT_MWDMA + 2),
6164     force_xfer(mwdma3,      ATA_SHIFT_MWDMA + 3),
6165     force_xfer(mwdma4,      ATA_SHIFT_MWDMA + 4),
6166     force_xfer(udma0,       ATA_SHIFT_UDMA + 0),
6167     force_xfer(udma16,      ATA_SHIFT_UDMA + 0),
6168     force_xfer(udma/16,     ATA_SHIFT_UDMA + 0),
6169     force_xfer(udma1,       ATA_SHIFT_UDMA + 1),
6170     force_xfer(udma25,      ATA_SHIFT_UDMA + 1),
6171     force_xfer(udma/25,     ATA_SHIFT_UDMA + 1),
6172     force_xfer(udma2,       ATA_SHIFT_UDMA + 2),
6173     force_xfer(udma33,      ATA_SHIFT_UDMA + 2),
6174     force_xfer(udma/33,     ATA_SHIFT_UDMA + 2),
6175     force_xfer(udma3,       ATA_SHIFT_UDMA + 3),
6176     force_xfer(udma44,      ATA_SHIFT_UDMA + 3),
6177     force_xfer(udma/44,     ATA_SHIFT_UDMA + 3),
6178     force_xfer(udma4,       ATA_SHIFT_UDMA + 4),
6179     force_xfer(udma66,      ATA_SHIFT_UDMA + 4),
6180     force_xfer(udma/66,     ATA_SHIFT_UDMA + 4),
6181     force_xfer(udma5,       ATA_SHIFT_UDMA + 5),
6182     force_xfer(udma100,     ATA_SHIFT_UDMA + 5),
6183     force_xfer(udma/100,        ATA_SHIFT_UDMA + 5),
6184     force_xfer(udma6,       ATA_SHIFT_UDMA + 6),
6185     force_xfer(udma133,     ATA_SHIFT_UDMA + 6),
6186     force_xfer(udma/133,        ATA_SHIFT_UDMA + 6),
6187     force_xfer(udma7,       ATA_SHIFT_UDMA + 7),
6188 
6189     force_lflag_on(nohrst,      ATA_LFLAG_NO_HRST),
6190     force_lflag_on(nosrst,      ATA_LFLAG_NO_SRST),
6191     force_lflag_on(norst,       ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6192     force_lflag_on(rstonce,     ATA_LFLAG_RST_ONCE),
6193     force_lflag_onoff(dbdelay,  ATA_LFLAG_NO_DEBOUNCE_DELAY),
6194 
6195     force_horkage_onoff(ncq,    ATA_HORKAGE_NONCQ),
6196     force_horkage_onoff(ncqtrim,    ATA_HORKAGE_NO_NCQ_TRIM),
6197     force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
6198 
6199     force_horkage_onoff(trim,   ATA_HORKAGE_NOTRIM),
6200     force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
6201     force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
6202 
6203     force_horkage_onoff(dma,    ATA_HORKAGE_NODMA),
6204     force_horkage_on(atapi_dmadir,  ATA_HORKAGE_ATAPI_DMADIR),
6205     force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
6206 
6207     force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
6208     force_horkage_onoff(iddevlog,   ATA_HORKAGE_NO_ID_DEV_LOG),
6209     force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
6210 
6211     force_horkage_on(max_sec_128,   ATA_HORKAGE_MAX_SEC_128),
6212     force_horkage_on(max_sec_1024,  ATA_HORKAGE_MAX_SEC_1024),
6213     force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
6214 
6215     force_horkage_onoff(lpm,    ATA_HORKAGE_NOLPM),
6216     force_horkage_onoff(setxfer,    ATA_HORKAGE_NOSETXFER),
6217     force_horkage_on(dump_id,   ATA_HORKAGE_DUMP_ID),
6218 
6219     force_horkage_on(disable,   ATA_HORKAGE_DISABLE),
6220 };
6221 
6222 static int __init ata_parse_force_one(char **cur,
6223                       struct ata_force_ent *force_ent,
6224                       const char **reason)
6225 {
6226     char *start = *cur, *p = *cur;
6227     char *id, *val, *endp;
6228     const struct ata_force_param *match_fp = NULL;
6229     int nr_matches = 0, i;
6230 
6231     /* find where this param ends and update *cur */
6232     while (*p != '\0' && *p != ',')
6233         p++;
6234 
6235     if (*p == '\0')
6236         *cur = p;
6237     else
6238         *cur = p + 1;
6239 
6240     *p = '\0';
6241 
6242     /* parse */
6243     p = strchr(start, ':');
6244     if (!p) {
6245         val = strstrip(start);
6246         goto parse_val;
6247     }
6248     *p = '\0';
6249 
6250     id = strstrip(start);
6251     val = strstrip(p + 1);
6252 
6253     /* parse id */
6254     p = strchr(id, '.');
6255     if (p) {
6256         *p++ = '\0';
6257         force_ent->device = simple_strtoul(p, &endp, 10);
6258         if (p == endp || *endp != '\0') {
6259             *reason = "invalid device";
6260             return -EINVAL;
6261         }
6262     }
6263 
6264     force_ent->port = simple_strtoul(id, &endp, 10);
6265     if (id == endp || *endp != '\0') {
6266         *reason = "invalid port/link";
6267         return -EINVAL;
6268     }
6269 
6270  parse_val:
6271     /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6272     for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6273         const struct ata_force_param *fp = &force_tbl[i];
6274 
6275         if (strncasecmp(val, fp->name, strlen(val)))
6276             continue;
6277 
6278         nr_matches++;
6279         match_fp = fp;
6280 
6281         if (strcasecmp(val, fp->name) == 0) {
6282             nr_matches = 1;
6283             break;
6284         }
6285     }
6286 
6287     if (!nr_matches) {
6288         *reason = "unknown value";
6289         return -EINVAL;
6290     }
6291     if (nr_matches > 1) {
6292         *reason = "ambiguous value";
6293         return -EINVAL;
6294     }
6295 
6296     force_ent->param = *match_fp;
6297 
6298     return 0;
6299 }
6300 
6301 static void __init ata_parse_force_param(void)
6302 {
6303     int idx = 0, size = 1;
6304     int last_port = -1, last_device = -1;
6305     char *p, *cur, *next;
6306 
6307     /* Calculate maximum number of params and allocate ata_force_tbl */
6308     for (p = ata_force_param_buf; *p; p++)
6309         if (*p == ',')
6310             size++;
6311 
6312     ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6313     if (!ata_force_tbl) {
6314         printk(KERN_WARNING "ata: failed to extend force table, "
6315                "libata.force ignored\n");
6316         return;
6317     }
6318 
6319     /* parse and populate the table */
6320     for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6321         const char *reason = "";
6322         struct ata_force_ent te = { .port = -1, .device = -1 };
6323 
6324         next = cur;
6325         if (ata_parse_force_one(&next, &te, &reason)) {
6326             printk(KERN_WARNING "ata: failed to parse force "
6327                    "parameter \"%s\" (%s)\n",
6328                    cur, reason);
6329             continue;
6330         }
6331 
6332         if (te.port == -1) {
6333             te.port = last_port;
6334             te.device = last_device;
6335         }
6336 
6337         ata_force_tbl[idx++] = te;
6338 
6339         last_port = te.port;
6340         last_device = te.device;
6341     }
6342 
6343     ata_force_tbl_size = idx;
6344 }
6345 
6346 static void ata_free_force_param(void)
6347 {
6348     kfree(ata_force_tbl);
6349 }
6350 #else
6351 static inline void ata_parse_force_param(void) { }
6352 static inline void ata_free_force_param(void) { }
6353 #endif
6354 
6355 static int __init ata_init(void)
6356 {
6357     int rc;
6358 
6359     ata_parse_force_param();
6360 
6361     rc = ata_sff_init();
6362     if (rc) {
6363         ata_free_force_param();
6364         return rc;
6365     }
6366 
6367     libata_transport_init();
6368     ata_scsi_transport_template = ata_attach_transport();
6369     if (!ata_scsi_transport_template) {
6370         ata_sff_exit();
6371         rc = -ENOMEM;
6372         goto err_out;
6373     }
6374 
6375     printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6376     return 0;
6377 
6378 err_out:
6379     return rc;
6380 }
6381 
6382 static void __exit ata_exit(void)
6383 {
6384     ata_release_transport(ata_scsi_transport_template);
6385     libata_transport_exit();
6386     ata_sff_exit();
6387     ata_free_force_param();
6388 }
6389 
6390 subsys_initcall(ata_init);
6391 module_exit(ata_exit);
6392 
6393 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6394 
6395 int ata_ratelimit(void)
6396 {
6397     return __ratelimit(&ratelimit);
6398 }
6399 EXPORT_SYMBOL_GPL(ata_ratelimit);
6400 
6401 /**
6402  *  ata_msleep - ATA EH owner aware msleep
6403  *  @ap: ATA port to attribute the sleep to
6404  *  @msecs: duration to sleep in milliseconds
6405  *
6406  *  Sleeps @msecs.  If the current task is owner of @ap's EH, the
6407  *  ownership is released before going to sleep and reacquired
6408  *  after the sleep is complete.  IOW, other ports sharing the
6409  *  @ap->host will be allowed to own the EH while this task is
6410  *  sleeping.
6411  *
6412  *  LOCKING:
6413  *  Might sleep.
6414  */
6415 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6416 {
6417     bool owns_eh = ap && ap->host->eh_owner == current;
6418 
6419     if (owns_eh)
6420         ata_eh_release(ap);
6421 
6422     if (msecs < 20) {
6423         unsigned long usecs = msecs * USEC_PER_MSEC;
6424         usleep_range(usecs, usecs + 50);
6425     } else {
6426         msleep(msecs);
6427     }
6428 
6429     if (owns_eh)
6430         ata_eh_acquire(ap);
6431 }
6432 EXPORT_SYMBOL_GPL(ata_msleep);
6433 
6434 /**
6435  *  ata_wait_register - wait until register value changes
6436  *  @ap: ATA port to wait register for, can be NULL
6437  *  @reg: IO-mapped register
6438  *  @mask: Mask to apply to read register value
6439  *  @val: Wait condition
6440  *  @interval: polling interval in milliseconds
6441  *  @timeout: timeout in milliseconds
6442  *
6443  *  Waiting for some bits of register to change is a common
6444  *  operation for ATA controllers.  This function reads 32bit LE
6445  *  IO-mapped register @reg and tests for the following condition.
6446  *
6447  *  (*@reg & mask) != val
6448  *
6449  *  If the condition is met, it returns; otherwise, the process is
6450  *  repeated after @interval_msec until timeout.
6451  *
6452  *  LOCKING:
6453  *  Kernel thread context (may sleep)
6454  *
6455  *  RETURNS:
6456  *  The final register value.
6457  */
6458 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6459               unsigned long interval, unsigned long timeout)
6460 {
6461     unsigned long deadline;
6462     u32 tmp;
6463 
6464     tmp = ioread32(reg);
6465 
6466     /* Calculate timeout _after_ the first read to make sure
6467      * preceding writes reach the controller before starting to
6468      * eat away the timeout.
6469      */
6470     deadline = ata_deadline(jiffies, timeout);
6471 
6472     while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6473         ata_msleep(ap, interval);
6474         tmp = ioread32(reg);
6475     }
6476 
6477     return tmp;
6478 }
6479 EXPORT_SYMBOL_GPL(ata_wait_register);
6480 
6481 /*
6482  * Dummy port_ops
6483  */
6484 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6485 {
6486     return AC_ERR_SYSTEM;
6487 }
6488 
6489 static void ata_dummy_error_handler(struct ata_port *ap)
6490 {
6491     /* truly dummy */
6492 }
6493 
6494 struct ata_port_operations ata_dummy_port_ops = {
6495     .qc_prep        = ata_noop_qc_prep,
6496     .qc_issue       = ata_dummy_qc_issue,
6497     .error_handler      = ata_dummy_error_handler,
6498     .sched_eh       = ata_std_sched_eh,
6499     .end_eh         = ata_std_end_eh,
6500 };
6501 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6502 
6503 const struct ata_port_info ata_dummy_port_info = {
6504     .port_ops       = &ata_dummy_port_ops,
6505 };
6506 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6507 
6508 void ata_print_version(const struct device *dev, const char *version)
6509 {
6510     dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6511 }
6512 EXPORT_SYMBOL(ata_print_version);
6513 
6514 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6515 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6516 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6517 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6518 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);