0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/kernel.h>
0029 #include <linux/module.h>
0030 #include <linux/pci.h>
0031 #include <linux/init.h>
0032 #include <linux/list.h>
0033 #include <linux/mm.h>
0034 #include <linux/spinlock.h>
0035 #include <linux/blkdev.h>
0036 #include <linux/delay.h>
0037 #include <linux/timer.h>
0038 #include <linux/time.h>
0039 #include <linux/interrupt.h>
0040 #include <linux/completion.h>
0041 #include <linux/suspend.h>
0042 #include <linux/workqueue.h>
0043 #include <linux/scatterlist.h>
0044 #include <linux/io.h>
0045 #include <linux/log2.h>
0046 #include <linux/slab.h>
0047 #include <linux/glob.h>
0048 #include <scsi/scsi.h>
0049 #include <scsi/scsi_cmnd.h>
0050 #include <scsi/scsi_host.h>
0051 #include <linux/libata.h>
0052 #include <asm/byteorder.h>
0053 #include <asm/unaligned.h>
0054 #include <linux/cdrom.h>
0055 #include <linux/ratelimit.h>
0056 #include <linux/leds.h>
0057 #include <linux/pm_runtime.h>
0058 #include <linux/platform_device.h>
0059 #include <asm/setup.h>
0060
0061 #define CREATE_TRACE_POINTS
0062 #include <trace/events/libata.h>
0063
0064 #include "libata.h"
0065 #include "libata-transport.h"
0066
0067 const struct ata_port_operations ata_base_port_ops = {
0068 .prereset = ata_std_prereset,
0069 .postreset = ata_std_postreset,
0070 .error_handler = ata_std_error_handler,
0071 .sched_eh = ata_std_sched_eh,
0072 .end_eh = ata_std_end_eh,
0073 };
0074
0075 const struct ata_port_operations sata_port_ops = {
0076 .inherits = &ata_base_port_ops,
0077
0078 .qc_defer = ata_std_qc_defer,
0079 .hardreset = sata_std_hardreset,
0080 };
0081 EXPORT_SYMBOL_GPL(sata_port_ops);
0082
0083 static unsigned int ata_dev_init_params(struct ata_device *dev,
0084 u16 heads, u16 sectors);
0085 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
0086 static void ata_dev_xfermask(struct ata_device *dev);
0087 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
0088
0089 atomic_t ata_print_id = ATOMIC_INIT(0);
0090
0091 #ifdef CONFIG_ATA_FORCE
0092 struct ata_force_param {
0093 const char *name;
0094 u8 cbl;
0095 u8 spd_limit;
0096 unsigned int xfer_mask;
0097 unsigned int horkage_on;
0098 unsigned int horkage_off;
0099 u16 lflags_on;
0100 u16 lflags_off;
0101 };
0102
0103 struct ata_force_ent {
0104 int port;
0105 int device;
0106 struct ata_force_param param;
0107 };
0108
0109 static struct ata_force_ent *ata_force_tbl;
0110 static int ata_force_tbl_size;
0111
0112 static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
0113
0114 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
0115 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
0116 #endif
0117
0118 static int atapi_enabled = 1;
0119 module_param(atapi_enabled, int, 0444);
0120 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
0121
0122 static int atapi_dmadir = 0;
0123 module_param(atapi_dmadir, int, 0444);
0124 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
0125
0126 int atapi_passthru16 = 1;
0127 module_param(atapi_passthru16, int, 0444);
0128 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
0129
0130 int libata_fua = 0;
0131 module_param_named(fua, libata_fua, int, 0444);
0132 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
0133
0134 static int ata_ignore_hpa;
0135 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
0136 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
0137
0138 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
0139 module_param_named(dma, libata_dma_mask, int, 0444);
0140 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
0141
0142 static int ata_probe_timeout;
0143 module_param(ata_probe_timeout, int, 0444);
0144 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
0145
0146 int libata_noacpi = 0;
0147 module_param_named(noacpi, libata_noacpi, int, 0444);
0148 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
0149
0150 int libata_allow_tpm = 0;
0151 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
0152 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
0153
0154 static int atapi_an;
0155 module_param(atapi_an, int, 0444);
0156 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
0157
0158 MODULE_AUTHOR("Jeff Garzik");
0159 MODULE_DESCRIPTION("Library module for ATA devices");
0160 MODULE_LICENSE("GPL");
0161 MODULE_VERSION(DRV_VERSION);
0162
0163 static inline bool ata_dev_print_info(struct ata_device *dev)
0164 {
0165 struct ata_eh_context *ehc = &dev->link->eh_context;
0166
0167 return ehc->i.flags & ATA_EHI_PRINTINFO;
0168 }
0169
0170 static bool ata_sstatus_online(u32 sstatus)
0171 {
0172 return (sstatus & 0xf) == 0x3;
0173 }
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
0188 enum ata_link_iter_mode mode)
0189 {
0190 BUG_ON(mode != ATA_LITER_EDGE &&
0191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
0192
0193
0194 if (!link)
0195 switch (mode) {
0196 case ATA_LITER_EDGE:
0197 case ATA_LITER_PMP_FIRST:
0198 if (sata_pmp_attached(ap))
0199 return ap->pmp_link;
0200 fallthrough;
0201 case ATA_LITER_HOST_FIRST:
0202 return &ap->link;
0203 }
0204
0205
0206 if (link == &ap->link)
0207 switch (mode) {
0208 case ATA_LITER_HOST_FIRST:
0209 if (sata_pmp_attached(ap))
0210 return ap->pmp_link;
0211 fallthrough;
0212 case ATA_LITER_PMP_FIRST:
0213 if (unlikely(ap->slave_link))
0214 return ap->slave_link;
0215 fallthrough;
0216 case ATA_LITER_EDGE:
0217 return NULL;
0218 }
0219
0220
0221 if (unlikely(link == ap->slave_link))
0222 return NULL;
0223
0224
0225 if (++link < ap->pmp_link + ap->nr_pmp_links)
0226 return link;
0227
0228 if (mode == ATA_LITER_PMP_FIRST)
0229 return &ap->link;
0230
0231 return NULL;
0232 }
0233 EXPORT_SYMBOL_GPL(ata_link_next);
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
0248 enum ata_dev_iter_mode mode)
0249 {
0250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
0251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
0252
0253
0254 if (!dev)
0255 switch (mode) {
0256 case ATA_DITER_ENABLED:
0257 case ATA_DITER_ALL:
0258 dev = link->device;
0259 goto check;
0260 case ATA_DITER_ENABLED_REVERSE:
0261 case ATA_DITER_ALL_REVERSE:
0262 dev = link->device + ata_link_max_devices(link) - 1;
0263 goto check;
0264 }
0265
0266 next:
0267
0268 switch (mode) {
0269 case ATA_DITER_ENABLED:
0270 case ATA_DITER_ALL:
0271 if (++dev < link->device + ata_link_max_devices(link))
0272 goto check;
0273 return NULL;
0274 case ATA_DITER_ENABLED_REVERSE:
0275 case ATA_DITER_ALL_REVERSE:
0276 if (--dev >= link->device)
0277 goto check;
0278 return NULL;
0279 }
0280
0281 check:
0282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
0283 !ata_dev_enabled(dev))
0284 goto next;
0285 return dev;
0286 }
0287 EXPORT_SYMBOL_GPL(ata_dev_next);
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
0304 {
0305 struct ata_port *ap = dev->link->ap;
0306
0307 if (!ap->slave_link)
0308 return dev->link;
0309 if (!dev->devno)
0310 return &ap->link;
0311 return ap->slave_link;
0312 }
0313
0314 #ifdef CONFIG_ATA_FORCE
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 void ata_force_cbl(struct ata_port *ap)
0329 {
0330 int i;
0331
0332 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
0333 const struct ata_force_ent *fe = &ata_force_tbl[i];
0334
0335 if (fe->port != -1 && fe->port != ap->print_id)
0336 continue;
0337
0338 if (fe->param.cbl == ATA_CBL_NONE)
0339 continue;
0340
0341 ap->cbl = fe->param.cbl;
0342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
0343 return;
0344 }
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 static void ata_force_link_limits(struct ata_link *link)
0364 {
0365 bool did_spd = false;
0366 int linkno = link->pmp;
0367 int i;
0368
0369 if (ata_is_host_link(link))
0370 linkno += 15;
0371
0372 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
0373 const struct ata_force_ent *fe = &ata_force_tbl[i];
0374
0375 if (fe->port != -1 && fe->port != link->ap->print_id)
0376 continue;
0377
0378 if (fe->device != -1 && fe->device != linkno)
0379 continue;
0380
0381
0382 if (!did_spd && fe->param.spd_limit) {
0383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
0384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
0385 fe->param.name);
0386 did_spd = true;
0387 }
0388
0389
0390 if (fe->param.lflags_on) {
0391 link->flags |= fe->param.lflags_on;
0392 ata_link_notice(link,
0393 "FORCE: link flag 0x%x forced -> 0x%x\n",
0394 fe->param.lflags_on, link->flags);
0395 }
0396 if (fe->param.lflags_off) {
0397 link->flags &= ~fe->param.lflags_off;
0398 ata_link_notice(link,
0399 "FORCE: link flag 0x%x cleared -> 0x%x\n",
0400 fe->param.lflags_off, link->flags);
0401 }
0402 }
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416 static void ata_force_xfermask(struct ata_device *dev)
0417 {
0418 int devno = dev->link->pmp + dev->devno;
0419 int alt_devno = devno;
0420 int i;
0421
0422
0423 if (ata_is_host_link(dev->link))
0424 alt_devno += 15;
0425
0426 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
0427 const struct ata_force_ent *fe = &ata_force_tbl[i];
0428 unsigned int pio_mask, mwdma_mask, udma_mask;
0429
0430 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
0431 continue;
0432
0433 if (fe->device != -1 && fe->device != devno &&
0434 fe->device != alt_devno)
0435 continue;
0436
0437 if (!fe->param.xfer_mask)
0438 continue;
0439
0440 ata_unpack_xfermask(fe->param.xfer_mask,
0441 &pio_mask, &mwdma_mask, &udma_mask);
0442 if (udma_mask)
0443 dev->udma_mask = udma_mask;
0444 else if (mwdma_mask) {
0445 dev->udma_mask = 0;
0446 dev->mwdma_mask = mwdma_mask;
0447 } else {
0448 dev->udma_mask = 0;
0449 dev->mwdma_mask = 0;
0450 dev->pio_mask = pio_mask;
0451 }
0452
0453 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
0454 fe->param.name);
0455 return;
0456 }
0457 }
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470 static void ata_force_horkage(struct ata_device *dev)
0471 {
0472 int devno = dev->link->pmp + dev->devno;
0473 int alt_devno = devno;
0474 int i;
0475
0476
0477 if (ata_is_host_link(dev->link))
0478 alt_devno += 15;
0479
0480 for (i = 0; i < ata_force_tbl_size; i++) {
0481 const struct ata_force_ent *fe = &ata_force_tbl[i];
0482
0483 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
0484 continue;
0485
0486 if (fe->device != -1 && fe->device != devno &&
0487 fe->device != alt_devno)
0488 continue;
0489
0490 if (!(~dev->horkage & fe->param.horkage_on) &&
0491 !(dev->horkage & fe->param.horkage_off))
0492 continue;
0493
0494 dev->horkage |= fe->param.horkage_on;
0495 dev->horkage &= ~fe->param.horkage_off;
0496
0497 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
0498 fe->param.name);
0499 }
0500 }
0501 #else
0502 static inline void ata_force_link_limits(struct ata_link *link) { }
0503 static inline void ata_force_xfermask(struct ata_device *dev) { }
0504 static inline void ata_force_horkage(struct ata_device *dev) { }
0505 #endif
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 int atapi_cmd_type(u8 opcode)
0520 {
0521 switch (opcode) {
0522 case GPCMD_READ_10:
0523 case GPCMD_READ_12:
0524 return ATAPI_READ;
0525
0526 case GPCMD_WRITE_10:
0527 case GPCMD_WRITE_12:
0528 case GPCMD_WRITE_AND_VERIFY_10:
0529 return ATAPI_WRITE;
0530
0531 case GPCMD_READ_CD:
0532 case GPCMD_READ_CD_MSF:
0533 return ATAPI_READ_CD;
0534
0535 case ATA_16:
0536 case ATA_12:
0537 if (atapi_passthru16)
0538 return ATAPI_PASS_THRU;
0539 fallthrough;
0540 default:
0541 return ATAPI_MISC;
0542 }
0543 }
0544 EXPORT_SYMBOL_GPL(atapi_cmd_type);
0545
0546 static const u8 ata_rw_cmds[] = {
0547
0548 ATA_CMD_READ_MULTI,
0549 ATA_CMD_WRITE_MULTI,
0550 ATA_CMD_READ_MULTI_EXT,
0551 ATA_CMD_WRITE_MULTI_EXT,
0552 0,
0553 0,
0554 0,
0555 ATA_CMD_WRITE_MULTI_FUA_EXT,
0556
0557 ATA_CMD_PIO_READ,
0558 ATA_CMD_PIO_WRITE,
0559 ATA_CMD_PIO_READ_EXT,
0560 ATA_CMD_PIO_WRITE_EXT,
0561 0,
0562 0,
0563 0,
0564 0,
0565
0566 ATA_CMD_READ,
0567 ATA_CMD_WRITE,
0568 ATA_CMD_READ_EXT,
0569 ATA_CMD_WRITE_EXT,
0570 0,
0571 0,
0572 0,
0573 ATA_CMD_WRITE_FUA_EXT
0574 };
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
0588 {
0589 u8 cmd;
0590
0591 int index, fua, lba48, write;
0592
0593 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
0594 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
0595 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
0596
0597 if (dev->flags & ATA_DFLAG_PIO) {
0598 tf->protocol = ATA_PROT_PIO;
0599 index = dev->multi_count ? 0 : 8;
0600 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
0601
0602 tf->protocol = ATA_PROT_PIO;
0603 index = dev->multi_count ? 0 : 8;
0604 } else {
0605 tf->protocol = ATA_PROT_DMA;
0606 index = 16;
0607 }
0608
0609 cmd = ata_rw_cmds[index + fua + lba48 + write];
0610 if (cmd) {
0611 tf->command = cmd;
0612 return 0;
0613 }
0614 return -1;
0615 }
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
0633 {
0634 u64 block = 0;
0635
0636 if (tf->flags & ATA_TFLAG_LBA) {
0637 if (tf->flags & ATA_TFLAG_LBA48) {
0638 block |= (u64)tf->hob_lbah << 40;
0639 block |= (u64)tf->hob_lbam << 32;
0640 block |= (u64)tf->hob_lbal << 24;
0641 } else
0642 block |= (tf->device & 0xf) << 24;
0643
0644 block |= tf->lbah << 16;
0645 block |= tf->lbam << 8;
0646 block |= tf->lbal;
0647 } else {
0648 u32 cyl, head, sect;
0649
0650 cyl = tf->lbam | (tf->lbah << 8);
0651 head = tf->device & 0xf;
0652 sect = tf->lbal;
0653
0654 if (!sect) {
0655 ata_dev_warn(dev,
0656 "device reported invalid CHS sector 0\n");
0657 return U64_MAX;
0658 }
0659
0660 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
0661 }
0662
0663 return block;
0664 }
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
0688 u64 block, u32 n_block, unsigned int tf_flags,
0689 unsigned int tag, int class)
0690 {
0691 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
0692 tf->flags |= tf_flags;
0693
0694 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
0695
0696 if (!lba_48_ok(block, n_block))
0697 return -ERANGE;
0698
0699 tf->protocol = ATA_PROT_NCQ;
0700 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
0701
0702 if (tf->flags & ATA_TFLAG_WRITE)
0703 tf->command = ATA_CMD_FPDMA_WRITE;
0704 else
0705 tf->command = ATA_CMD_FPDMA_READ;
0706
0707 tf->nsect = tag << 3;
0708 tf->hob_feature = (n_block >> 8) & 0xff;
0709 tf->feature = n_block & 0xff;
0710
0711 tf->hob_lbah = (block >> 40) & 0xff;
0712 tf->hob_lbam = (block >> 32) & 0xff;
0713 tf->hob_lbal = (block >> 24) & 0xff;
0714 tf->lbah = (block >> 16) & 0xff;
0715 tf->lbam = (block >> 8) & 0xff;
0716 tf->lbal = block & 0xff;
0717
0718 tf->device = ATA_LBA;
0719 if (tf->flags & ATA_TFLAG_FUA)
0720 tf->device |= 1 << 7;
0721
0722 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE &&
0723 class == IOPRIO_CLASS_RT)
0724 tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
0725 } else if (dev->flags & ATA_DFLAG_LBA) {
0726 tf->flags |= ATA_TFLAG_LBA;
0727
0728 if (lba_28_ok(block, n_block)) {
0729
0730 tf->device |= (block >> 24) & 0xf;
0731 } else if (lba_48_ok(block, n_block)) {
0732 if (!(dev->flags & ATA_DFLAG_LBA48))
0733 return -ERANGE;
0734
0735
0736 tf->flags |= ATA_TFLAG_LBA48;
0737
0738 tf->hob_nsect = (n_block >> 8) & 0xff;
0739
0740 tf->hob_lbah = (block >> 40) & 0xff;
0741 tf->hob_lbam = (block >> 32) & 0xff;
0742 tf->hob_lbal = (block >> 24) & 0xff;
0743 } else
0744
0745 return -ERANGE;
0746
0747 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
0748 return -EINVAL;
0749
0750 tf->nsect = n_block & 0xff;
0751
0752 tf->lbah = (block >> 16) & 0xff;
0753 tf->lbam = (block >> 8) & 0xff;
0754 tf->lbal = block & 0xff;
0755
0756 tf->device |= ATA_LBA;
0757 } else {
0758
0759 u32 sect, head, cyl, track;
0760
0761
0762 if (!lba_28_ok(block, n_block))
0763 return -ERANGE;
0764
0765 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
0766 return -EINVAL;
0767
0768
0769 track = (u32)block / dev->sectors;
0770 cyl = track / dev->heads;
0771 head = track % dev->heads;
0772 sect = (u32)block % dev->sectors + 1;
0773
0774
0775
0776
0777
0778 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
0779 return -ERANGE;
0780
0781 tf->nsect = n_block & 0xff;
0782 tf->lbal = sect;
0783 tf->lbam = cyl;
0784 tf->lbah = cyl >> 8;
0785 tf->device |= head;
0786 }
0787
0788 return 0;
0789 }
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 unsigned int ata_pack_xfermask(unsigned int pio_mask,
0807 unsigned int mwdma_mask,
0808 unsigned int udma_mask)
0809 {
0810 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
0811 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
0812 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
0813 }
0814 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826 void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
0827 unsigned int *mwdma_mask, unsigned int *udma_mask)
0828 {
0829 if (pio_mask)
0830 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
0831 if (mwdma_mask)
0832 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
0833 if (udma_mask)
0834 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
0835 }
0836
0837 static const struct ata_xfer_ent {
0838 int shift, bits;
0839 u8 base;
0840 } ata_xfer_tbl[] = {
0841 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
0842 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
0843 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
0844 { -1, },
0845 };
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860 u8 ata_xfer_mask2mode(unsigned int xfer_mask)
0861 {
0862 int highbit = fls(xfer_mask) - 1;
0863 const struct ata_xfer_ent *ent;
0864
0865 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
0866 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
0867 return ent->base + highbit - ent->shift;
0868 return 0xff;
0869 }
0870 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884 unsigned int ata_xfer_mode2mask(u8 xfer_mode)
0885 {
0886 const struct ata_xfer_ent *ent;
0887
0888 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
0889 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
0890 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
0891 & ~((1 << ent->shift) - 1);
0892 return 0;
0893 }
0894 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908 int ata_xfer_mode2shift(u8 xfer_mode)
0909 {
0910 const struct ata_xfer_ent *ent;
0911
0912 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
0913 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
0914 return ent->shift;
0915 return -1;
0916 }
0917 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933 const char *ata_mode_string(unsigned int xfer_mask)
0934 {
0935 static const char * const xfer_mode_str[] = {
0936 "PIO0",
0937 "PIO1",
0938 "PIO2",
0939 "PIO3",
0940 "PIO4",
0941 "PIO5",
0942 "PIO6",
0943 "MWDMA0",
0944 "MWDMA1",
0945 "MWDMA2",
0946 "MWDMA3",
0947 "MWDMA4",
0948 "UDMA/16",
0949 "UDMA/25",
0950 "UDMA/33",
0951 "UDMA/44",
0952 "UDMA/66",
0953 "UDMA/100",
0954 "UDMA/133",
0955 "UDMA7",
0956 };
0957 int highbit;
0958
0959 highbit = fls(xfer_mask) - 1;
0960 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
0961 return xfer_mode_str[highbit];
0962 return "<n/a>";
0963 }
0964 EXPORT_SYMBOL_GPL(ata_mode_string);
0965
0966 const char *sata_spd_string(unsigned int spd)
0967 {
0968 static const char * const spd_str[] = {
0969 "1.5 Gbps",
0970 "3.0 Gbps",
0971 "6.0 Gbps",
0972 };
0973
0974 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
0975 return "<unknown>";
0976 return spd_str[spd - 1];
0977 }
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
0995 {
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 if (tf->lbam == 0 && tf->lbah == 0)
1018 return ATA_DEV_ATA;
1019
1020 if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1021 return ATA_DEV_ATAPI;
1022
1023 if (tf->lbam == 0x69 && tf->lbah == 0x96)
1024 return ATA_DEV_PMP;
1025
1026 if (tf->lbam == 0x3c && tf->lbah == 0xc3)
1027 return ATA_DEV_SEMB;
1028
1029 if (tf->lbam == 0xcd && tf->lbah == 0xab)
1030 return ATA_DEV_ZAC;
1031
1032 return ATA_DEV_UNKNOWN;
1033 }
1034 EXPORT_SYMBOL_GPL(ata_dev_classify);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 void ata_id_string(const u16 *id, unsigned char *s,
1052 unsigned int ofs, unsigned int len)
1053 {
1054 unsigned int c;
1055
1056 BUG_ON(len & 1);
1057
1058 while (len > 0) {
1059 c = id[ofs] >> 8;
1060 *s = c;
1061 s++;
1062
1063 c = id[ofs] & 0xff;
1064 *s = c;
1065 s++;
1066
1067 ofs++;
1068 len -= 2;
1069 }
1070 }
1071 EXPORT_SYMBOL_GPL(ata_id_string);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 void ata_id_c_string(const u16 *id, unsigned char *s,
1088 unsigned int ofs, unsigned int len)
1089 {
1090 unsigned char *p;
1091
1092 ata_id_string(id, s, ofs, len - 1);
1093
1094 p = s + strnlen(s, len - 1);
1095 while (p > s && p[-1] == ' ')
1096 p--;
1097 *p = '\0';
1098 }
1099 EXPORT_SYMBOL_GPL(ata_id_c_string);
1100
1101 static u64 ata_id_n_sectors(const u16 *id)
1102 {
1103 if (ata_id_has_lba(id)) {
1104 if (ata_id_has_lba48(id))
1105 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1106
1107 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1108 }
1109
1110 if (ata_id_current_chs_valid(id))
1111 return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1112 (u32)id[ATA_ID_CUR_SECTORS];
1113
1114 return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1115 (u32)id[ATA_ID_SECTORS];
1116 }
1117
1118 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1119 {
1120 u64 sectors = 0;
1121
1122 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1123 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1124 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1125 sectors |= (tf->lbah & 0xff) << 16;
1126 sectors |= (tf->lbam & 0xff) << 8;
1127 sectors |= (tf->lbal & 0xff);
1128
1129 return sectors;
1130 }
1131
1132 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1133 {
1134 u64 sectors = 0;
1135
1136 sectors |= (tf->device & 0x0f) << 24;
1137 sectors |= (tf->lbah & 0xff) << 16;
1138 sectors |= (tf->lbam & 0xff) << 8;
1139 sectors |= (tf->lbal & 0xff);
1140
1141 return sectors;
1142 }
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1157 {
1158 unsigned int err_mask;
1159 struct ata_taskfile tf;
1160 int lba48 = ata_id_has_lba48(dev->id);
1161
1162 ata_tf_init(dev, &tf);
1163
1164
1165 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1166
1167 if (lba48) {
1168 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1169 tf.flags |= ATA_TFLAG_LBA48;
1170 } else
1171 tf.command = ATA_CMD_READ_NATIVE_MAX;
1172
1173 tf.protocol = ATA_PROT_NODATA;
1174 tf.device |= ATA_LBA;
1175
1176 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1177 if (err_mask) {
1178 ata_dev_warn(dev,
1179 "failed to read native max address (err_mask=0x%x)\n",
1180 err_mask);
1181 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
1182 return -EACCES;
1183 return -EIO;
1184 }
1185
1186 if (lba48)
1187 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1188 else
1189 *max_sectors = ata_tf_to_lba(&tf) + 1;
1190 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1191 (*max_sectors)--;
1192 return 0;
1193 }
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1208 {
1209 unsigned int err_mask;
1210 struct ata_taskfile tf;
1211 int lba48 = ata_id_has_lba48(dev->id);
1212
1213 new_sectors--;
1214
1215 ata_tf_init(dev, &tf);
1216
1217 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1218
1219 if (lba48) {
1220 tf.command = ATA_CMD_SET_MAX_EXT;
1221 tf.flags |= ATA_TFLAG_LBA48;
1222
1223 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1224 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1225 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1226 } else {
1227 tf.command = ATA_CMD_SET_MAX;
1228
1229 tf.device |= (new_sectors >> 24) & 0xf;
1230 }
1231
1232 tf.protocol = ATA_PROT_NODATA;
1233 tf.device |= ATA_LBA;
1234
1235 tf.lbal = (new_sectors >> 0) & 0xff;
1236 tf.lbam = (new_sectors >> 8) & 0xff;
1237 tf.lbah = (new_sectors >> 16) & 0xff;
1238
1239 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1240 if (err_mask) {
1241 ata_dev_warn(dev,
1242 "failed to set max address (err_mask=0x%x)\n",
1243 err_mask);
1244 if (err_mask == AC_ERR_DEV &&
1245 (tf.error & (ATA_ABORTED | ATA_IDNF)))
1246 return -EACCES;
1247 return -EIO;
1248 }
1249
1250 return 0;
1251 }
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264 static int ata_hpa_resize(struct ata_device *dev)
1265 {
1266 bool print_info = ata_dev_print_info(dev);
1267 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1268 u64 sectors = ata_id_n_sectors(dev->id);
1269 u64 native_sectors;
1270 int rc;
1271
1272
1273 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1274 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1275 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1276 return 0;
1277
1278
1279 rc = ata_read_native_max_address(dev, &native_sectors);
1280 if (rc) {
1281
1282
1283
1284 if (rc == -EACCES || !unlock_hpa) {
1285 ata_dev_warn(dev,
1286 "HPA support seems broken, skipping HPA handling\n");
1287 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1288
1289
1290 if (rc == -EACCES)
1291 rc = 0;
1292 }
1293
1294 return rc;
1295 }
1296 dev->n_native_sectors = native_sectors;
1297
1298
1299 if (native_sectors <= sectors || !unlock_hpa) {
1300 if (!print_info || native_sectors == sectors)
1301 return 0;
1302
1303 if (native_sectors > sectors)
1304 ata_dev_info(dev,
1305 "HPA detected: current %llu, native %llu\n",
1306 (unsigned long long)sectors,
1307 (unsigned long long)native_sectors);
1308 else if (native_sectors < sectors)
1309 ata_dev_warn(dev,
1310 "native sectors (%llu) is smaller than sectors (%llu)\n",
1311 (unsigned long long)native_sectors,
1312 (unsigned long long)sectors);
1313 return 0;
1314 }
1315
1316
1317 rc = ata_set_max_sectors(dev, native_sectors);
1318 if (rc == -EACCES) {
1319
1320 ata_dev_warn(dev,
1321 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1322 (unsigned long long)sectors,
1323 (unsigned long long)native_sectors);
1324 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1325 return 0;
1326 } else if (rc)
1327 return rc;
1328
1329
1330 rc = ata_dev_reread_id(dev, 0);
1331 if (rc) {
1332 ata_dev_err(dev,
1333 "failed to re-read IDENTIFY data after HPA resizing\n");
1334 return rc;
1335 }
1336
1337 if (print_info) {
1338 u64 new_sectors = ata_id_n_sectors(dev->id);
1339 ata_dev_info(dev,
1340 "HPA unlocked: %llu -> %llu, native %llu\n",
1341 (unsigned long long)sectors,
1342 (unsigned long long)new_sectors,
1343 (unsigned long long)native_sectors);
1344 }
1345
1346 return 0;
1347 }
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361 static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1362 {
1363 ata_dev_dbg(dev,
1364 "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
1365 "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
1366 "88==0x%04x 93==0x%04x\n",
1367 id[49], id[53], id[63], id[64], id[75], id[80],
1368 id[81], id[82], id[83], id[84], id[88], id[93]);
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 unsigned int ata_id_xfermask(const u16 *id)
1387 {
1388 unsigned int pio_mask, mwdma_mask, udma_mask;
1389
1390
1391 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1392 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1393 pio_mask <<= 3;
1394 pio_mask |= 0x7;
1395 } else {
1396
1397
1398
1399
1400 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1401 if (mode < 5)
1402 pio_mask = (2 << mode) - 1;
1403 else
1404 pio_mask = 1;
1405
1406
1407
1408
1409
1410
1411
1412 }
1413
1414 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1415
1416 if (ata_id_is_cfa(id)) {
1417
1418
1419
1420 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1421 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1422
1423 if (pio)
1424 pio_mask |= (1 << 5);
1425 if (pio > 1)
1426 pio_mask |= (1 << 6);
1427 if (dma)
1428 mwdma_mask |= (1 << 3);
1429 if (dma > 1)
1430 mwdma_mask |= (1 << 4);
1431 }
1432
1433 udma_mask = 0;
1434 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1435 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1436
1437 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1438 }
1439 EXPORT_SYMBOL_GPL(ata_id_xfermask);
1440
1441 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1442 {
1443 struct completion *waiting = qc->private_data;
1444
1445 complete(waiting);
1446 }
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 static unsigned ata_exec_internal_sg(struct ata_device *dev,
1471 struct ata_taskfile *tf, const u8 *cdb,
1472 int dma_dir, struct scatterlist *sgl,
1473 unsigned int n_elem, unsigned int timeout)
1474 {
1475 struct ata_link *link = dev->link;
1476 struct ata_port *ap = link->ap;
1477 u8 command = tf->command;
1478 int auto_timeout = 0;
1479 struct ata_queued_cmd *qc;
1480 unsigned int preempted_tag;
1481 u32 preempted_sactive;
1482 u64 preempted_qc_active;
1483 int preempted_nr_active_links;
1484 DECLARE_COMPLETION_ONSTACK(wait);
1485 unsigned long flags;
1486 unsigned int err_mask;
1487 int rc;
1488
1489 spin_lock_irqsave(ap->lock, flags);
1490
1491
1492 if (ap->pflags & ATA_PFLAG_FROZEN) {
1493 spin_unlock_irqrestore(ap->lock, flags);
1494 return AC_ERR_SYSTEM;
1495 }
1496
1497
1498 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1499
1500 qc->tag = ATA_TAG_INTERNAL;
1501 qc->hw_tag = 0;
1502 qc->scsicmd = NULL;
1503 qc->ap = ap;
1504 qc->dev = dev;
1505 ata_qc_reinit(qc);
1506
1507 preempted_tag = link->active_tag;
1508 preempted_sactive = link->sactive;
1509 preempted_qc_active = ap->qc_active;
1510 preempted_nr_active_links = ap->nr_active_links;
1511 link->active_tag = ATA_TAG_POISON;
1512 link->sactive = 0;
1513 ap->qc_active = 0;
1514 ap->nr_active_links = 0;
1515
1516
1517 qc->tf = *tf;
1518 if (cdb)
1519 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1520
1521
1522 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1523 dma_dir == DMA_FROM_DEVICE)
1524 qc->tf.feature |= ATAPI_DMADIR;
1525
1526 qc->flags |= ATA_QCFLAG_RESULT_TF;
1527 qc->dma_dir = dma_dir;
1528 if (dma_dir != DMA_NONE) {
1529 unsigned int i, buflen = 0;
1530 struct scatterlist *sg;
1531
1532 for_each_sg(sgl, sg, n_elem, i)
1533 buflen += sg->length;
1534
1535 ata_sg_init(qc, sgl, n_elem);
1536 qc->nbytes = buflen;
1537 }
1538
1539 qc->private_data = &wait;
1540 qc->complete_fn = ata_qc_complete_internal;
1541
1542 ata_qc_issue(qc);
1543
1544 spin_unlock_irqrestore(ap->lock, flags);
1545
1546 if (!timeout) {
1547 if (ata_probe_timeout)
1548 timeout = ata_probe_timeout * 1000;
1549 else {
1550 timeout = ata_internal_cmd_timeout(dev, command);
1551 auto_timeout = 1;
1552 }
1553 }
1554
1555 if (ap->ops->error_handler)
1556 ata_eh_release(ap);
1557
1558 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1559
1560 if (ap->ops->error_handler)
1561 ata_eh_acquire(ap);
1562
1563 ata_sff_flush_pio_task(ap);
1564
1565 if (!rc) {
1566 spin_lock_irqsave(ap->lock, flags);
1567
1568
1569
1570
1571
1572
1573 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1574 qc->err_mask |= AC_ERR_TIMEOUT;
1575
1576 if (ap->ops->error_handler)
1577 ata_port_freeze(ap);
1578 else
1579 ata_qc_complete(qc);
1580
1581 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1582 command);
1583 }
1584
1585 spin_unlock_irqrestore(ap->lock, flags);
1586 }
1587
1588
1589 if (ap->ops->post_internal_cmd)
1590 ap->ops->post_internal_cmd(qc);
1591
1592
1593 if (qc->flags & ATA_QCFLAG_FAILED) {
1594 if (qc->result_tf.status & (ATA_ERR | ATA_DF))
1595 qc->err_mask |= AC_ERR_DEV;
1596
1597 if (!qc->err_mask)
1598 qc->err_mask |= AC_ERR_OTHER;
1599
1600 if (qc->err_mask & ~AC_ERR_OTHER)
1601 qc->err_mask &= ~AC_ERR_OTHER;
1602 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1603 qc->result_tf.status |= ATA_SENSE;
1604 }
1605
1606
1607 spin_lock_irqsave(ap->lock, flags);
1608
1609 *tf = qc->result_tf;
1610 err_mask = qc->err_mask;
1611
1612 ata_qc_free(qc);
1613 link->active_tag = preempted_tag;
1614 link->sactive = preempted_sactive;
1615 ap->qc_active = preempted_qc_active;
1616 ap->nr_active_links = preempted_nr_active_links;
1617
1618 spin_unlock_irqrestore(ap->lock, flags);
1619
1620 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1621 ata_internal_cmd_timed_out(dev, command);
1622
1623 return err_mask;
1624 }
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 unsigned ata_exec_internal(struct ata_device *dev,
1646 struct ata_taskfile *tf, const u8 *cdb,
1647 int dma_dir, void *buf, unsigned int buflen,
1648 unsigned int timeout)
1649 {
1650 struct scatterlist *psg = NULL, sg;
1651 unsigned int n_elem = 0;
1652
1653 if (dma_dir != DMA_NONE) {
1654 WARN_ON(!buf);
1655 sg_init_one(&sg, buf, buflen);
1656 psg = &sg;
1657 n_elem++;
1658 }
1659
1660 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1661 timeout);
1662 }
1663
1664
1665
1666
1667
1668
1669
1670
1671 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1672 {
1673
1674
1675
1676
1677 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1678 return 0;
1679
1680
1681
1682 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1683 return 0;
1684
1685 if (ata_id_is_cfa(adev->id)
1686 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1687 return 0;
1688
1689 if (adev->pio_mode > XFER_PIO_2)
1690 return 1;
1691
1692 if (ata_id_has_iordy(adev->id))
1693 return 1;
1694 return 0;
1695 }
1696 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1697
1698
1699
1700
1701
1702
1703
1704
1705 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1706 {
1707
1708 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1709 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1710
1711 if (pio) {
1712
1713 if (pio > 240)
1714 return 3 << ATA_SHIFT_PIO;
1715 return 7 << ATA_SHIFT_PIO;
1716 }
1717 }
1718 return 3 << ATA_SHIFT_PIO;
1719 }
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1732 struct ata_taskfile *tf, __le16 *id)
1733 {
1734 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1735 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1736 }
1737 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1761 unsigned int flags, u16 *id)
1762 {
1763 struct ata_port *ap = dev->link->ap;
1764 unsigned int class = *p_class;
1765 struct ata_taskfile tf;
1766 unsigned int err_mask = 0;
1767 const char *reason;
1768 bool is_semb = class == ATA_DEV_SEMB;
1769 int may_fallback = 1, tried_spinup = 0;
1770 int rc;
1771
1772 retry:
1773 ata_tf_init(dev, &tf);
1774
1775 switch (class) {
1776 case ATA_DEV_SEMB:
1777 class = ATA_DEV_ATA;
1778 fallthrough;
1779 case ATA_DEV_ATA:
1780 case ATA_DEV_ZAC:
1781 tf.command = ATA_CMD_ID_ATA;
1782 break;
1783 case ATA_DEV_ATAPI:
1784 tf.command = ATA_CMD_ID_ATAPI;
1785 break;
1786 default:
1787 rc = -ENODEV;
1788 reason = "unsupported class";
1789 goto err_out;
1790 }
1791
1792 tf.protocol = ATA_PROT_PIO;
1793
1794
1795
1796
1797 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1798
1799
1800
1801
1802 tf.flags |= ATA_TFLAG_POLLING;
1803
1804 if (ap->ops->read_id)
1805 err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
1806 else
1807 err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
1808
1809 if (err_mask) {
1810 if (err_mask & AC_ERR_NODEV_HINT) {
1811 ata_dev_dbg(dev, "NODEV after polling detection\n");
1812 return -ENOENT;
1813 }
1814
1815 if (is_semb) {
1816 ata_dev_info(dev,
1817 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1818
1819 *p_class = ATA_DEV_SEMB_UNSUP;
1820 return 0;
1821 }
1822
1823 if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1824
1825
1826
1827
1828
1829 if (may_fallback) {
1830 may_fallback = 0;
1831
1832 if (class == ATA_DEV_ATA)
1833 class = ATA_DEV_ATAPI;
1834 else
1835 class = ATA_DEV_ATA;
1836 goto retry;
1837 }
1838
1839
1840
1841
1842
1843 ata_dev_dbg(dev,
1844 "both IDENTIFYs aborted, assuming NODEV\n");
1845 return -ENOENT;
1846 }
1847
1848 rc = -EIO;
1849 reason = "I/O error";
1850 goto err_out;
1851 }
1852
1853 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1854 ata_dev_info(dev, "dumping IDENTIFY data, "
1855 "class=%d may_fallback=%d tried_spinup=%d\n",
1856 class, may_fallback, tried_spinup);
1857 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1858 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1859 }
1860
1861
1862
1863
1864 may_fallback = 0;
1865
1866 swap_buf_le16(id, ATA_ID_WORDS);
1867
1868
1869 rc = -EINVAL;
1870 reason = "device reports invalid type";
1871
1872 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1873 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1874 goto err_out;
1875 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1876 ata_id_is_ata(id)) {
1877 ata_dev_dbg(dev,
1878 "host indicates ignore ATA devices, ignored\n");
1879 return -ENOENT;
1880 }
1881 } else {
1882 if (ata_id_is_ata(id))
1883 goto err_out;
1884 }
1885
1886 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1887 tried_spinup = 1;
1888
1889
1890
1891
1892
1893 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1894 if (err_mask && id[2] != 0x738c) {
1895 rc = -EIO;
1896 reason = "SPINUP failed";
1897 goto err_out;
1898 }
1899
1900
1901
1902
1903 if (id[2] == 0x37c8)
1904 goto retry;
1905 }
1906
1907 if ((flags & ATA_READID_POSTRESET) &&
1908 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1921 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1922 if (err_mask) {
1923 rc = -EIO;
1924 reason = "INIT_DEV_PARAMS failed";
1925 goto err_out;
1926 }
1927
1928
1929
1930
1931 flags &= ~ATA_READID_POSTRESET;
1932 goto retry;
1933 }
1934 }
1935
1936 *p_class = class;
1937
1938 return 0;
1939
1940 err_out:
1941 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1942 reason, err_mask);
1943 return rc;
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1963 u8 page, void *buf, unsigned int sectors)
1964 {
1965 unsigned long ap_flags = dev->link->ap->flags;
1966 struct ata_taskfile tf;
1967 unsigned int err_mask;
1968 bool dma = false;
1969
1970 ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
1971
1972
1973
1974
1975
1976 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1977 return AC_ERR_DEV;
1978
1979 retry:
1980 ata_tf_init(dev, &tf);
1981 if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
1982 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
1983 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1984 tf.protocol = ATA_PROT_DMA;
1985 dma = true;
1986 } else {
1987 tf.command = ATA_CMD_READ_LOG_EXT;
1988 tf.protocol = ATA_PROT_PIO;
1989 dma = false;
1990 }
1991 tf.lbal = log;
1992 tf.lbam = page;
1993 tf.nsect = sectors;
1994 tf.hob_nsect = sectors >> 8;
1995 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1996
1997 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1998 buf, sectors * ATA_SECT_SIZE, 0);
1999
2000 if (err_mask) {
2001 if (dma) {
2002 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2003 goto retry;
2004 }
2005 ata_dev_err(dev,
2006 "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2007 (unsigned int)log, (unsigned int)page, err_mask);
2008 }
2009
2010 return err_mask;
2011 }
2012
2013 static int ata_log_supported(struct ata_device *dev, u8 log)
2014 {
2015 struct ata_port *ap = dev->link->ap;
2016
2017 if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
2018 return 0;
2019
2020 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2021 return 0;
2022 return get_unaligned_le16(&ap->sector_buf[log * 2]);
2023 }
2024
2025 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2026 {
2027 struct ata_port *ap = dev->link->ap;
2028 unsigned int err, i;
2029
2030 if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2031 return false;
2032
2033 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2034
2035
2036
2037
2038
2039 if (ata_id_major_version(dev->id) >= 10)
2040 ata_dev_warn(dev,
2041 "ATA Identify Device Log not supported\n");
2042 dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
2043 return false;
2044 }
2045
2046
2047
2048
2049
2050 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2051 1);
2052 if (err)
2053 return false;
2054
2055 for (i = 0; i < ap->sector_buf[8]; i++) {
2056 if (ap->sector_buf[9 + i] == page)
2057 return true;
2058 }
2059
2060 return false;
2061 }
2062
2063 static int ata_do_link_spd_horkage(struct ata_device *dev)
2064 {
2065 struct ata_link *plink = ata_dev_phys_link(dev);
2066 u32 target, target_limit;
2067
2068 if (!sata_scr_valid(plink))
2069 return 0;
2070
2071 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2072 target = 1;
2073 else
2074 return 0;
2075
2076 target_limit = (1 << target) - 1;
2077
2078
2079 if (plink->sata_spd_limit <= target_limit)
2080 return 0;
2081
2082 plink->sata_spd_limit = target_limit;
2083
2084
2085
2086
2087
2088 if (plink->sata_spd > target) {
2089 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2090 sata_spd_string(target));
2091 return -EAGAIN;
2092 }
2093 return 0;
2094 }
2095
2096 static inline u8 ata_dev_knobble(struct ata_device *dev)
2097 {
2098 struct ata_port *ap = dev->link->ap;
2099
2100 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2101 return 0;
2102
2103 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2104 }
2105
2106 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2107 {
2108 struct ata_port *ap = dev->link->ap;
2109 unsigned int err_mask;
2110
2111 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2112 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2113 return;
2114 }
2115 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2116 0, ap->sector_buf, 1);
2117 if (!err_mask) {
2118 u8 *cmds = dev->ncq_send_recv_cmds;
2119
2120 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2121 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2122
2123 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2124 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2125 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2126 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2127 }
2128 }
2129 }
2130
2131 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2132 {
2133 struct ata_port *ap = dev->link->ap;
2134 unsigned int err_mask;
2135
2136 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2137 ata_dev_warn(dev,
2138 "NCQ Send/Recv Log not supported\n");
2139 return;
2140 }
2141 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2142 0, ap->sector_buf, 1);
2143 if (!err_mask) {
2144 u8 *cmds = dev->ncq_non_data_cmds;
2145
2146 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2147 }
2148 }
2149
2150 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2151 {
2152 struct ata_port *ap = dev->link->ap;
2153 unsigned int err_mask;
2154
2155 if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2156 return;
2157
2158 err_mask = ata_read_log_page(dev,
2159 ATA_LOG_IDENTIFY_DEVICE,
2160 ATA_LOG_SATA_SETTINGS,
2161 ap->sector_buf,
2162 1);
2163 if (err_mask)
2164 goto not_supported;
2165
2166 if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2167 goto not_supported;
2168
2169 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2170
2171 return;
2172
2173 not_supported:
2174 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
2175 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2176 }
2177
2178 static bool ata_dev_check_adapter(struct ata_device *dev,
2179 unsigned short vendor_id)
2180 {
2181 struct pci_dev *pcidev = NULL;
2182 struct device *parent_dev = NULL;
2183
2184 for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2185 parent_dev = parent_dev->parent) {
2186 if (dev_is_pci(parent_dev)) {
2187 pcidev = to_pci_dev(parent_dev);
2188 if (pcidev->vendor == vendor_id)
2189 return true;
2190 break;
2191 }
2192 }
2193
2194 return false;
2195 }
2196
2197 static int ata_dev_config_ncq(struct ata_device *dev,
2198 char *desc, size_t desc_sz)
2199 {
2200 struct ata_port *ap = dev->link->ap;
2201 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2202 unsigned int err_mask;
2203 char *aa_desc = "";
2204
2205 if (!ata_id_has_ncq(dev->id)) {
2206 desc[0] = '\0';
2207 return 0;
2208 }
2209 if (!IS_ENABLED(CONFIG_SATA_HOST))
2210 return 0;
2211 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2212 snprintf(desc, desc_sz, "NCQ (not used)");
2213 return 0;
2214 }
2215
2216 if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2217 ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2218 snprintf(desc, desc_sz, "NCQ (not used)");
2219 return 0;
2220 }
2221
2222 if (ap->flags & ATA_FLAG_NCQ) {
2223 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2224 dev->flags |= ATA_DFLAG_NCQ;
2225 }
2226
2227 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2228 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2229 ata_id_has_fpdma_aa(dev->id)) {
2230 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2231 SATA_FPDMA_AA);
2232 if (err_mask) {
2233 ata_dev_err(dev,
2234 "failed to enable AA (error_mask=0x%x)\n",
2235 err_mask);
2236 if (err_mask != AC_ERR_DEV) {
2237 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2238 return -EIO;
2239 }
2240 } else
2241 aa_desc = ", AA";
2242 }
2243
2244 if (hdepth >= ddepth)
2245 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2246 else
2247 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2248 ddepth, aa_desc);
2249
2250 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2251 if (ata_id_has_ncq_send_and_recv(dev->id))
2252 ata_dev_config_ncq_send_recv(dev);
2253 if (ata_id_has_ncq_non_data(dev->id))
2254 ata_dev_config_ncq_non_data(dev);
2255 if (ata_id_has_ncq_prio(dev->id))
2256 ata_dev_config_ncq_prio(dev);
2257 }
2258
2259 return 0;
2260 }
2261
2262 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2263 {
2264 unsigned int err_mask;
2265
2266 if (!ata_id_has_sense_reporting(dev->id))
2267 return;
2268
2269 if (ata_id_sense_reporting_enabled(dev->id))
2270 return;
2271
2272 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2273 if (err_mask) {
2274 ata_dev_dbg(dev,
2275 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2276 err_mask);
2277 }
2278 }
2279
2280 static void ata_dev_config_zac(struct ata_device *dev)
2281 {
2282 struct ata_port *ap = dev->link->ap;
2283 unsigned int err_mask;
2284 u8 *identify_buf = ap->sector_buf;
2285
2286 dev->zac_zones_optimal_open = U32_MAX;
2287 dev->zac_zones_optimal_nonseq = U32_MAX;
2288 dev->zac_zones_max_open = U32_MAX;
2289
2290
2291
2292
2293 if (dev->class == ATA_DEV_ZAC)
2294 dev->flags |= ATA_DFLAG_ZAC;
2295 else if (ata_id_zoned_cap(dev->id) == 0x01)
2296
2297
2298
2299 dev->flags |= ATA_DFLAG_ZAC;
2300
2301 if (!(dev->flags & ATA_DFLAG_ZAC))
2302 return;
2303
2304 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2305 ata_dev_warn(dev,
2306 "ATA Zoned Information Log not supported\n");
2307 return;
2308 }
2309
2310
2311
2312
2313 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2314 ATA_LOG_ZONED_INFORMATION,
2315 identify_buf, 1);
2316 if (!err_mask) {
2317 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2318
2319 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2320 if ((zoned_cap >> 63))
2321 dev->zac_zoned_cap = (zoned_cap & 1);
2322 opt_open = get_unaligned_le64(&identify_buf[24]);
2323 if ((opt_open >> 63))
2324 dev->zac_zones_optimal_open = (u32)opt_open;
2325 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2326 if ((opt_nonseq >> 63))
2327 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2328 max_open = get_unaligned_le64(&identify_buf[40]);
2329 if ((max_open >> 63))
2330 dev->zac_zones_max_open = (u32)max_open;
2331 }
2332 }
2333
2334 static void ata_dev_config_trusted(struct ata_device *dev)
2335 {
2336 struct ata_port *ap = dev->link->ap;
2337 u64 trusted_cap;
2338 unsigned int err;
2339
2340 if (!ata_id_has_trusted(dev->id))
2341 return;
2342
2343 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2344 ata_dev_warn(dev,
2345 "Security Log not supported\n");
2346 return;
2347 }
2348
2349 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2350 ap->sector_buf, 1);
2351 if (err)
2352 return;
2353
2354 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2355 if (!(trusted_cap & (1ULL << 63))) {
2356 ata_dev_dbg(dev,
2357 "Trusted Computing capability qword not valid!\n");
2358 return;
2359 }
2360
2361 if (trusted_cap & (1 << 0))
2362 dev->flags |= ATA_DFLAG_TRUSTED;
2363 }
2364
2365 static int ata_dev_config_lba(struct ata_device *dev)
2366 {
2367 const u16 *id = dev->id;
2368 const char *lba_desc;
2369 char ncq_desc[24];
2370 int ret;
2371
2372 dev->flags |= ATA_DFLAG_LBA;
2373
2374 if (ata_id_has_lba48(id)) {
2375 lba_desc = "LBA48";
2376 dev->flags |= ATA_DFLAG_LBA48;
2377 if (dev->n_sectors >= (1UL << 28) &&
2378 ata_id_has_flush_ext(id))
2379 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2380 } else {
2381 lba_desc = "LBA";
2382 }
2383
2384
2385 ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2386
2387
2388 if (ata_dev_print_info(dev))
2389 ata_dev_info(dev,
2390 "%llu sectors, multi %u: %s %s\n",
2391 (unsigned long long)dev->n_sectors,
2392 dev->multi_count, lba_desc, ncq_desc);
2393
2394 return ret;
2395 }
2396
2397 static void ata_dev_config_chs(struct ata_device *dev)
2398 {
2399 const u16 *id = dev->id;
2400
2401 if (ata_id_current_chs_valid(id)) {
2402
2403 dev->cylinders = id[54];
2404 dev->heads = id[55];
2405 dev->sectors = id[56];
2406 } else {
2407
2408 dev->cylinders = id[1];
2409 dev->heads = id[3];
2410 dev->sectors = id[6];
2411 }
2412
2413
2414 if (ata_dev_print_info(dev))
2415 ata_dev_info(dev,
2416 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2417 (unsigned long long)dev->n_sectors,
2418 dev->multi_count, dev->cylinders,
2419 dev->heads, dev->sectors);
2420 }
2421
2422 static void ata_dev_config_devslp(struct ata_device *dev)
2423 {
2424 u8 *sata_setting = dev->link->ap->sector_buf;
2425 unsigned int err_mask;
2426 int i, j;
2427
2428
2429
2430
2431
2432 if (!ata_id_has_devslp(dev->id) ||
2433 !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2434 return;
2435
2436 err_mask = ata_read_log_page(dev,
2437 ATA_LOG_IDENTIFY_DEVICE,
2438 ATA_LOG_SATA_SETTINGS,
2439 sata_setting, 1);
2440 if (err_mask)
2441 return;
2442
2443 dev->flags |= ATA_DFLAG_DEVSLP;
2444 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2445 j = ATA_LOG_DEVSLP_OFFSET + i;
2446 dev->devslp_timing[i] = sata_setting[j];
2447 }
2448 }
2449
2450 static void ata_dev_config_cpr(struct ata_device *dev)
2451 {
2452 unsigned int err_mask;
2453 size_t buf_len;
2454 int i, nr_cpr = 0;
2455 struct ata_cpr_log *cpr_log = NULL;
2456 u8 *desc, *buf = NULL;
2457
2458 if (ata_id_major_version(dev->id) < 11)
2459 goto out;
2460
2461 buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2462 if (buf_len == 0)
2463 goto out;
2464
2465
2466
2467
2468
2469
2470
2471 buf_len <<= 9;
2472 buf = kzalloc(buf_len, GFP_KERNEL);
2473 if (!buf)
2474 goto out;
2475
2476 err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2477 0, buf, buf_len >> 9);
2478 if (err_mask)
2479 goto out;
2480
2481 nr_cpr = buf[0];
2482 if (!nr_cpr)
2483 goto out;
2484
2485 cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2486 if (!cpr_log)
2487 goto out;
2488
2489 cpr_log->nr_cpr = nr_cpr;
2490 desc = &buf[64];
2491 for (i = 0; i < nr_cpr; i++, desc += 32) {
2492 cpr_log->cpr[i].num = desc[0];
2493 cpr_log->cpr[i].num_storage_elements = desc[1];
2494 cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2495 cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2496 }
2497
2498 out:
2499 swap(dev->cpr_log, cpr_log);
2500 kfree(cpr_log);
2501 kfree(buf);
2502 }
2503
2504 static void ata_dev_print_features(struct ata_device *dev)
2505 {
2506 if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2507 return;
2508
2509 ata_dev_info(dev,
2510 "Features:%s%s%s%s%s%s\n",
2511 dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2512 dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2513 dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2514 dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
2515 dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2516 dev->cpr_log ? " CPR" : "");
2517 }
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 int ata_dev_configure(struct ata_device *dev)
2533 {
2534 struct ata_port *ap = dev->link->ap;
2535 bool print_info = ata_dev_print_info(dev);
2536 const u16 *id = dev->id;
2537 unsigned int xfer_mask;
2538 unsigned int err_mask;
2539 char revbuf[7];
2540 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2541 char modelbuf[ATA_ID_PROD_LEN+1];
2542 int rc;
2543
2544 if (!ata_dev_enabled(dev)) {
2545 ata_dev_dbg(dev, "no device\n");
2546 return 0;
2547 }
2548
2549
2550 dev->horkage |= ata_dev_blacklisted(dev);
2551 ata_force_horkage(dev);
2552
2553 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2554 ata_dev_info(dev, "unsupported device, disabling\n");
2555 ata_dev_disable(dev);
2556 return 0;
2557 }
2558
2559 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2560 dev->class == ATA_DEV_ATAPI) {
2561 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2562 atapi_enabled ? "not supported with this driver"
2563 : "disabled");
2564 ata_dev_disable(dev);
2565 return 0;
2566 }
2567
2568 rc = ata_do_link_spd_horkage(dev);
2569 if (rc)
2570 return rc;
2571
2572
2573 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2574 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2575 dev->horkage |= ATA_HORKAGE_NOLPM;
2576
2577 if (ap->flags & ATA_FLAG_NO_LPM)
2578 dev->horkage |= ATA_HORKAGE_NOLPM;
2579
2580 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2581 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2582 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2583 }
2584
2585
2586 rc = ata_acpi_on_devcfg(dev);
2587 if (rc)
2588 return rc;
2589
2590
2591 rc = ata_hpa_resize(dev);
2592 if (rc)
2593 return rc;
2594
2595
2596 ata_dev_dbg(dev,
2597 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2598 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2599 __func__,
2600 id[49], id[82], id[83], id[84],
2601 id[85], id[86], id[87], id[88]);
2602
2603
2604 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2605 dev->max_sectors = 0;
2606 dev->cdb_len = 0;
2607 dev->n_sectors = 0;
2608 dev->cylinders = 0;
2609 dev->heads = 0;
2610 dev->sectors = 0;
2611 dev->multi_count = 0;
2612
2613
2614
2615
2616
2617
2618 xfer_mask = ata_id_xfermask(id);
2619
2620 ata_dump_id(dev, id);
2621
2622
2623 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2624 sizeof(fwrevbuf));
2625
2626 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2627 sizeof(modelbuf));
2628
2629
2630 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2631 if (ata_id_is_cfa(id)) {
2632
2633 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2634 ata_dev_warn(dev,
2635 "supports DRM functions and may not be fully accessible\n");
2636 snprintf(revbuf, 7, "CFA");
2637 } else {
2638 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2639
2640 if (ata_id_has_tpm(id))
2641 ata_dev_warn(dev,
2642 "supports DRM functions and may not be fully accessible\n");
2643 }
2644
2645 dev->n_sectors = ata_id_n_sectors(id);
2646
2647
2648 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2649 unsigned int max = dev->id[47] & 0xff;
2650 unsigned int cnt = dev->id[59] & 0xff;
2651
2652 if (is_power_of_2(max) && is_power_of_2(cnt))
2653 if (cnt <= max)
2654 dev->multi_count = cnt;
2655 }
2656
2657
2658 if (print_info)
2659 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2660 revbuf, modelbuf, fwrevbuf,
2661 ata_mode_string(xfer_mask));
2662
2663 if (ata_id_has_lba(id)) {
2664 rc = ata_dev_config_lba(dev);
2665 if (rc)
2666 return rc;
2667 } else {
2668 ata_dev_config_chs(dev);
2669 }
2670
2671 ata_dev_config_devslp(dev);
2672 ata_dev_config_sense_reporting(dev);
2673 ata_dev_config_zac(dev);
2674 ata_dev_config_trusted(dev);
2675 ata_dev_config_cpr(dev);
2676 dev->cdb_len = 32;
2677
2678 if (print_info)
2679 ata_dev_print_features(dev);
2680 }
2681
2682
2683 else if (dev->class == ATA_DEV_ATAPI) {
2684 const char *cdb_intr_string = "";
2685 const char *atapi_an_string = "";
2686 const char *dma_dir_string = "";
2687 u32 sntf;
2688
2689 rc = atapi_cdb_len(id);
2690 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2691 ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
2692 rc = -EINVAL;
2693 goto err_out_nosup;
2694 }
2695 dev->cdb_len = (unsigned int) rc;
2696
2697
2698
2699
2700
2701
2702 if (atapi_an &&
2703 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2704 (!sata_pmp_attached(ap) ||
2705 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2706
2707 err_mask = ata_dev_set_feature(dev,
2708 SETFEATURES_SATA_ENABLE, SATA_AN);
2709 if (err_mask)
2710 ata_dev_err(dev,
2711 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2712 err_mask);
2713 else {
2714 dev->flags |= ATA_DFLAG_AN;
2715 atapi_an_string = ", ATAPI AN";
2716 }
2717 }
2718
2719 if (ata_id_cdb_intr(dev->id)) {
2720 dev->flags |= ATA_DFLAG_CDB_INTR;
2721 cdb_intr_string = ", CDB intr";
2722 }
2723
2724 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2725 dev->flags |= ATA_DFLAG_DMADIR;
2726 dma_dir_string = ", DMADIR";
2727 }
2728
2729 if (ata_id_has_da(dev->id)) {
2730 dev->flags |= ATA_DFLAG_DA;
2731 zpodd_init(dev);
2732 }
2733
2734
2735 if (print_info)
2736 ata_dev_info(dev,
2737 "ATAPI: %s, %s, max %s%s%s%s\n",
2738 modelbuf, fwrevbuf,
2739 ata_mode_string(xfer_mask),
2740 cdb_intr_string, atapi_an_string,
2741 dma_dir_string);
2742 }
2743
2744
2745 dev->max_sectors = ATA_MAX_SECTORS;
2746 if (dev->flags & ATA_DFLAG_LBA48)
2747 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2748
2749
2750
2751 if (ata_dev_knobble(dev)) {
2752 if (print_info)
2753 ata_dev_info(dev, "applying bridge limits\n");
2754 dev->udma_mask &= ATA_UDMA5;
2755 dev->max_sectors = ATA_MAX_SECTORS;
2756 }
2757
2758 if ((dev->class == ATA_DEV_ATAPI) &&
2759 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2760 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2761 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2762 }
2763
2764 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2765 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2766 dev->max_sectors);
2767
2768 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2769 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2770 dev->max_sectors);
2771
2772 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2773 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2774
2775 if (ap->ops->dev_config)
2776 ap->ops->dev_config(dev);
2777
2778 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2779
2780
2781
2782
2783
2784
2785 if (print_info) {
2786 ata_dev_warn(dev,
2787 "Drive reports diagnostics failure. This may indicate a drive\n");
2788 ata_dev_warn(dev,
2789 "fault or invalid emulation. Contact drive vendor for information.\n");
2790 }
2791 }
2792
2793 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2794 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2795 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2796 }
2797
2798 return 0;
2799
2800 err_out_nosup:
2801 return rc;
2802 }
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 int ata_cable_40wire(struct ata_port *ap)
2813 {
2814 return ATA_CBL_PATA40;
2815 }
2816 EXPORT_SYMBOL_GPL(ata_cable_40wire);
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826 int ata_cable_80wire(struct ata_port *ap)
2827 {
2828 return ATA_CBL_PATA80;
2829 }
2830 EXPORT_SYMBOL_GPL(ata_cable_80wire);
2831
2832
2833
2834
2835
2836
2837
2838
2839 int ata_cable_unknown(struct ata_port *ap)
2840 {
2841 return ATA_CBL_PATA_UNK;
2842 }
2843 EXPORT_SYMBOL_GPL(ata_cable_unknown);
2844
2845
2846
2847
2848
2849
2850
2851
2852 int ata_cable_ignore(struct ata_port *ap)
2853 {
2854 return ATA_CBL_PATA_IGN;
2855 }
2856 EXPORT_SYMBOL_GPL(ata_cable_ignore);
2857
2858
2859
2860
2861
2862
2863
2864
2865 int ata_cable_sata(struct ata_port *ap)
2866 {
2867 return ATA_CBL_SATA;
2868 }
2869 EXPORT_SYMBOL_GPL(ata_cable_sata);
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886 int ata_bus_probe(struct ata_port *ap)
2887 {
2888 unsigned int classes[ATA_MAX_DEVICES];
2889 int tries[ATA_MAX_DEVICES];
2890 int rc;
2891 struct ata_device *dev;
2892
2893 ata_for_each_dev(dev, &ap->link, ALL)
2894 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2895
2896 retry:
2897 ata_for_each_dev(dev, &ap->link, ALL) {
2898
2899
2900
2901
2902
2903
2904
2905 dev->pio_mode = XFER_PIO_0;
2906 dev->dma_mode = 0xff;
2907
2908
2909
2910
2911
2912
2913 if (ap->ops->set_piomode)
2914 ap->ops->set_piomode(ap, dev);
2915 }
2916
2917
2918 ap->ops->phy_reset(ap);
2919
2920 ata_for_each_dev(dev, &ap->link, ALL) {
2921 if (dev->class != ATA_DEV_UNKNOWN)
2922 classes[dev->devno] = dev->class;
2923 else
2924 classes[dev->devno] = ATA_DEV_NONE;
2925
2926 dev->class = ATA_DEV_UNKNOWN;
2927 }
2928
2929
2930
2931
2932
2933 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2934 if (tries[dev->devno])
2935 dev->class = classes[dev->devno];
2936
2937 if (!ata_dev_enabled(dev))
2938 continue;
2939
2940 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2941 dev->id);
2942 if (rc)
2943 goto fail;
2944 }
2945
2946
2947 if (ap->ops->cable_detect)
2948 ap->cbl = ap->ops->cable_detect(ap);
2949
2950
2951
2952
2953
2954
2955 ata_for_each_dev(dev, &ap->link, ENABLED)
2956 if (ata_id_is_sata(dev->id))
2957 ap->cbl = ATA_CBL_SATA;
2958
2959
2960
2961
2962 ata_for_each_dev(dev, &ap->link, ENABLED) {
2963 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2964 rc = ata_dev_configure(dev);
2965 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2966 if (rc)
2967 goto fail;
2968 }
2969
2970
2971 rc = ata_set_mode(&ap->link, &dev);
2972 if (rc)
2973 goto fail;
2974
2975 ata_for_each_dev(dev, &ap->link, ENABLED)
2976 return 0;
2977
2978 return -ENODEV;
2979
2980 fail:
2981 tries[dev->devno]--;
2982
2983 switch (rc) {
2984 case -EINVAL:
2985
2986 tries[dev->devno] = 0;
2987 break;
2988
2989 case -ENODEV:
2990
2991 tries[dev->devno] = min(tries[dev->devno], 1);
2992 fallthrough;
2993 case -EIO:
2994 if (tries[dev->devno] == 1) {
2995
2996
2997
2998 sata_down_spd_limit(&ap->link, 0);
2999 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3000 }
3001 }
3002
3003 if (!tries[dev->devno])
3004 ata_dev_disable(dev);
3005
3006 goto retry;
3007 }
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018 static void sata_print_link_status(struct ata_link *link)
3019 {
3020 u32 sstatus, scontrol, tmp;
3021
3022 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3023 return;
3024 sata_scr_read(link, SCR_CONTROL, &scontrol);
3025
3026 if (ata_phys_link_online(link)) {
3027 tmp = (sstatus >> 4) & 0xf;
3028 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3029 sata_spd_string(tmp), sstatus, scontrol);
3030 } else {
3031 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3032 sstatus, scontrol);
3033 }
3034 }
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044 struct ata_device *ata_dev_pair(struct ata_device *adev)
3045 {
3046 struct ata_link *link = adev->link;
3047 struct ata_device *pair = &link->device[1 - adev->devno];
3048 if (!ata_dev_enabled(pair))
3049 return NULL;
3050 return pair;
3051 }
3052 EXPORT_SYMBOL_GPL(ata_dev_pair);
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3075 {
3076 u32 sstatus, spd, mask;
3077 int rc, bit;
3078
3079 if (!sata_scr_valid(link))
3080 return -EOPNOTSUPP;
3081
3082
3083
3084
3085 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3086 if (rc == 0 && ata_sstatus_online(sstatus))
3087 spd = (sstatus >> 4) & 0xf;
3088 else
3089 spd = link->sata_spd;
3090
3091 mask = link->sata_spd_limit;
3092 if (mask <= 1)
3093 return -EINVAL;
3094
3095
3096 bit = fls(mask) - 1;
3097 mask &= ~(1 << bit);
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108 if (spd > 1)
3109 mask &= (1 << (spd - 1)) - 1;
3110 else
3111 return -EINVAL;
3112
3113
3114 if (!mask)
3115 return -EINVAL;
3116
3117 if (spd_limit) {
3118 if (mask & ((1 << spd_limit) - 1))
3119 mask &= (1 << spd_limit) - 1;
3120 else {
3121 bit = ffs(mask) - 1;
3122 mask = 1 << bit;
3123 }
3124 }
3125
3126 link->sata_spd_limit = mask;
3127
3128 ata_link_warn(link, "limiting SATA link speed to %s\n",
3129 sata_spd_string(fls(mask)));
3130
3131 return 0;
3132 }
3133
3134 #ifdef CONFIG_ATA_ACPI
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3152 {
3153 u8 base_mode = 0xff, last_mode = 0xff;
3154 const struct ata_xfer_ent *ent;
3155 const struct ata_timing *t;
3156
3157 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3158 if (ent->shift == xfer_shift)
3159 base_mode = ent->base;
3160
3161 for (t = ata_timing_find_mode(base_mode);
3162 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3163 unsigned short this_cycle;
3164
3165 switch (xfer_shift) {
3166 case ATA_SHIFT_PIO:
3167 case ATA_SHIFT_MWDMA:
3168 this_cycle = t->cycle;
3169 break;
3170 case ATA_SHIFT_UDMA:
3171 this_cycle = t->udma;
3172 break;
3173 default:
3174 return 0xff;
3175 }
3176
3177 if (cycle > this_cycle)
3178 break;
3179
3180 last_mode = t->mode;
3181 }
3182
3183 return last_mode;
3184 }
3185 #endif
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3203 {
3204 char buf[32];
3205 unsigned int orig_mask, xfer_mask;
3206 unsigned int pio_mask, mwdma_mask, udma_mask;
3207 int quiet, highbit;
3208
3209 quiet = !!(sel & ATA_DNXFER_QUIET);
3210 sel &= ~ATA_DNXFER_QUIET;
3211
3212 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3213 dev->mwdma_mask,
3214 dev->udma_mask);
3215 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3216
3217 switch (sel) {
3218 case ATA_DNXFER_PIO:
3219 highbit = fls(pio_mask) - 1;
3220 pio_mask &= ~(1 << highbit);
3221 break;
3222
3223 case ATA_DNXFER_DMA:
3224 if (udma_mask) {
3225 highbit = fls(udma_mask) - 1;
3226 udma_mask &= ~(1 << highbit);
3227 if (!udma_mask)
3228 return -ENOENT;
3229 } else if (mwdma_mask) {
3230 highbit = fls(mwdma_mask) - 1;
3231 mwdma_mask &= ~(1 << highbit);
3232 if (!mwdma_mask)
3233 return -ENOENT;
3234 }
3235 break;
3236
3237 case ATA_DNXFER_40C:
3238 udma_mask &= ATA_UDMA_MASK_40C;
3239 break;
3240
3241 case ATA_DNXFER_FORCE_PIO0:
3242 pio_mask &= 1;
3243 fallthrough;
3244 case ATA_DNXFER_FORCE_PIO:
3245 mwdma_mask = 0;
3246 udma_mask = 0;
3247 break;
3248
3249 default:
3250 BUG();
3251 }
3252
3253 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3254
3255 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3256 return -ENOENT;
3257
3258 if (!quiet) {
3259 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3260 snprintf(buf, sizeof(buf), "%s:%s",
3261 ata_mode_string(xfer_mask),
3262 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3263 else
3264 snprintf(buf, sizeof(buf), "%s",
3265 ata_mode_string(xfer_mask));
3266
3267 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3268 }
3269
3270 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3271 &dev->udma_mask);
3272
3273 return 0;
3274 }
3275
3276 static int ata_dev_set_mode(struct ata_device *dev)
3277 {
3278 struct ata_port *ap = dev->link->ap;
3279 struct ata_eh_context *ehc = &dev->link->eh_context;
3280 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3281 const char *dev_err_whine = "";
3282 int ign_dev_err = 0;
3283 unsigned int err_mask = 0;
3284 int rc;
3285
3286 dev->flags &= ~ATA_DFLAG_PIO;
3287 if (dev->xfer_shift == ATA_SHIFT_PIO)
3288 dev->flags |= ATA_DFLAG_PIO;
3289
3290 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3291 dev_err_whine = " (SET_XFERMODE skipped)";
3292 else {
3293 if (nosetxfer)
3294 ata_dev_warn(dev,
3295 "NOSETXFER but PATA detected - can't "
3296 "skip SETXFER, might malfunction\n");
3297 err_mask = ata_dev_set_xfermode(dev);
3298 }
3299
3300 if (err_mask & ~AC_ERR_DEV)
3301 goto fail;
3302
3303
3304 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3305 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3306 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3307 if (rc)
3308 return rc;
3309
3310 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3311
3312 if (ata_id_is_cfa(dev->id))
3313 ign_dev_err = 1;
3314
3315
3316 if (ata_id_major_version(dev->id) == 0 &&
3317 dev->pio_mode <= XFER_PIO_2)
3318 ign_dev_err = 1;
3319
3320
3321
3322 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3323 ign_dev_err = 1;
3324 }
3325
3326
3327 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3328 dev->dma_mode == XFER_MW_DMA_0 &&
3329 (dev->id[63] >> 8) & 1)
3330 ign_dev_err = 1;
3331
3332
3333 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3334 ign_dev_err = 1;
3335
3336 if (err_mask & AC_ERR_DEV) {
3337 if (!ign_dev_err)
3338 goto fail;
3339 else
3340 dev_err_whine = " (device error ignored)";
3341 }
3342
3343 ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3344 dev->xfer_shift, (int)dev->xfer_mode);
3345
3346 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3347 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3348 ata_dev_info(dev, "configured for %s%s\n",
3349 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3350 dev_err_whine);
3351
3352 return 0;
3353
3354 fail:
3355 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3356 return -EIO;
3357 }
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3377 {
3378 struct ata_port *ap = link->ap;
3379 struct ata_device *dev;
3380 int rc = 0, used_dma = 0, found = 0;
3381
3382
3383 ata_for_each_dev(dev, link, ENABLED) {
3384 unsigned int pio_mask, dma_mask;
3385 unsigned int mode_mask;
3386
3387 mode_mask = ATA_DMA_MASK_ATA;
3388 if (dev->class == ATA_DEV_ATAPI)
3389 mode_mask = ATA_DMA_MASK_ATAPI;
3390 else if (ata_id_is_cfa(dev->id))
3391 mode_mask = ATA_DMA_MASK_CFA;
3392
3393 ata_dev_xfermask(dev);
3394 ata_force_xfermask(dev);
3395
3396 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3397
3398 if (libata_dma_mask & mode_mask)
3399 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3400 dev->udma_mask);
3401 else
3402 dma_mask = 0;
3403
3404 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3405 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3406
3407 found = 1;
3408 if (ata_dma_enabled(dev))
3409 used_dma = 1;
3410 }
3411 if (!found)
3412 goto out;
3413
3414
3415 ata_for_each_dev(dev, link, ENABLED) {
3416 if (dev->pio_mode == 0xff) {
3417 ata_dev_warn(dev, "no PIO support\n");
3418 rc = -EINVAL;
3419 goto out;
3420 }
3421
3422 dev->xfer_mode = dev->pio_mode;
3423 dev->xfer_shift = ATA_SHIFT_PIO;
3424 if (ap->ops->set_piomode)
3425 ap->ops->set_piomode(ap, dev);
3426 }
3427
3428
3429 ata_for_each_dev(dev, link, ENABLED) {
3430 if (!ata_dma_enabled(dev))
3431 continue;
3432
3433 dev->xfer_mode = dev->dma_mode;
3434 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3435 if (ap->ops->set_dmamode)
3436 ap->ops->set_dmamode(ap, dev);
3437 }
3438
3439
3440 ata_for_each_dev(dev, link, ENABLED) {
3441 rc = ata_dev_set_mode(dev);
3442 if (rc)
3443 goto out;
3444 }
3445
3446
3447
3448
3449 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3450 ap->host->simplex_claimed = ap;
3451
3452 out:
3453 if (rc)
3454 *r_failed_dev = dev;
3455 return rc;
3456 }
3457 EXPORT_SYMBOL_GPL(ata_do_set_mode);
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3480 int (*check_ready)(struct ata_link *link))
3481 {
3482 unsigned long start = jiffies;
3483 unsigned long nodev_deadline;
3484 int warned = 0;
3485
3486
3487 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3488 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3489 else
3490 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3491
3492
3493
3494
3495
3496 WARN_ON(link == link->ap->slave_link);
3497
3498 if (time_after(nodev_deadline, deadline))
3499 nodev_deadline = deadline;
3500
3501 while (1) {
3502 unsigned long now = jiffies;
3503 int ready, tmp;
3504
3505 ready = tmp = check_ready(link);
3506 if (ready > 0)
3507 return 0;
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520 if (ready == -ENODEV) {
3521 if (ata_link_online(link))
3522 ready = 0;
3523 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3524 !ata_link_offline(link) &&
3525 time_before(now, nodev_deadline))
3526 ready = 0;
3527 }
3528
3529 if (ready)
3530 return ready;
3531 if (time_after(now, deadline))
3532 return -EBUSY;
3533
3534 if (!warned && time_after(now, start + 5 * HZ) &&
3535 (deadline - now > 3 * HZ)) {
3536 ata_link_warn(link,
3537 "link is slow to respond, please be patient "
3538 "(ready=%d)\n", tmp);
3539 warned = 1;
3540 }
3541
3542 ata_msleep(link->ap, 50);
3543 }
3544 }
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3561 int (*check_ready)(struct ata_link *link))
3562 {
3563 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3564
3565 return ata_wait_ready(link, deadline, check_ready);
3566 }
3567 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3587 {
3588 struct ata_port *ap = link->ap;
3589 struct ata_eh_context *ehc = &link->eh_context;
3590 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3591 int rc;
3592
3593
3594 if (ehc->i.action & ATA_EH_HARDRESET)
3595 return 0;
3596
3597
3598 if (ap->flags & ATA_FLAG_SATA) {
3599 rc = sata_link_resume(link, timing, deadline);
3600
3601 if (rc && rc != -EOPNOTSUPP)
3602 ata_link_warn(link,
3603 "failed to resume link for reset (errno=%d)\n",
3604 rc);
3605 }
3606
3607
3608 if (ata_phys_link_offline(link))
3609 ehc->i.action &= ~ATA_EH_SOFTRESET;
3610
3611 return 0;
3612 }
3613 EXPORT_SYMBOL_GPL(ata_std_prereset);
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3630 unsigned long deadline)
3631 {
3632 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3633 bool online;
3634 int rc;
3635
3636
3637 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3638 return online ? -EAGAIN : rc;
3639 }
3640 EXPORT_SYMBOL_GPL(sata_std_hardreset);
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3655 {
3656 u32 serror;
3657
3658
3659 if (!sata_scr_read(link, SCR_ERROR, &serror))
3660 sata_scr_write(link, SCR_ERROR, serror);
3661
3662
3663 sata_print_link_status(link);
3664 }
3665 EXPORT_SYMBOL_GPL(ata_std_postreset);
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3684 const u16 *new_id)
3685 {
3686 const u16 *old_id = dev->id;
3687 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3688 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3689
3690 if (dev->class != new_class) {
3691 ata_dev_info(dev, "class mismatch %d != %d\n",
3692 dev->class, new_class);
3693 return 0;
3694 }
3695
3696 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3697 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3698 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3699 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3700
3701 if (strcmp(model[0], model[1])) {
3702 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3703 model[0], model[1]);
3704 return 0;
3705 }
3706
3707 if (strcmp(serial[0], serial[1])) {
3708 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3709 serial[0], serial[1]);
3710 return 0;
3711 }
3712
3713 return 1;
3714 }
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3731 {
3732 unsigned int class = dev->class;
3733 u16 *id = (void *)dev->link->ap->sector_buf;
3734 int rc;
3735
3736
3737 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3738 if (rc)
3739 return rc;
3740
3741
3742 if (!ata_dev_same_device(dev, class, id))
3743 return -ENODEV;
3744
3745 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3746 return 0;
3747 }
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3765 unsigned int readid_flags)
3766 {
3767 u64 n_sectors = dev->n_sectors;
3768 u64 n_native_sectors = dev->n_native_sectors;
3769 int rc;
3770
3771 if (!ata_dev_enabled(dev))
3772 return -ENODEV;
3773
3774
3775 if (ata_class_enabled(new_class) &&
3776 new_class != ATA_DEV_ATA &&
3777 new_class != ATA_DEV_ATAPI &&
3778 new_class != ATA_DEV_ZAC &&
3779 new_class != ATA_DEV_SEMB) {
3780 ata_dev_info(dev, "class mismatch %u != %u\n",
3781 dev->class, new_class);
3782 rc = -ENODEV;
3783 goto fail;
3784 }
3785
3786
3787 rc = ata_dev_reread_id(dev, readid_flags);
3788 if (rc)
3789 goto fail;
3790
3791
3792 rc = ata_dev_configure(dev);
3793 if (rc)
3794 goto fail;
3795
3796
3797 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3798 dev->n_sectors == n_sectors)
3799 return 0;
3800
3801
3802 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3803 (unsigned long long)n_sectors,
3804 (unsigned long long)dev->n_sectors);
3805
3806
3807
3808
3809
3810
3811 if (dev->n_native_sectors == n_native_sectors &&
3812 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3813 ata_dev_warn(dev,
3814 "new n_sectors matches native, probably "
3815 "late HPA unlock, n_sectors updated\n");
3816
3817 return 0;
3818 }
3819
3820
3821
3822
3823
3824
3825
3826 if (dev->n_native_sectors == n_native_sectors &&
3827 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3828 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
3829 ata_dev_warn(dev,
3830 "old n_sectors matches native, probably "
3831 "late HPA lock, will try to unlock HPA\n");
3832
3833 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3834 rc = -EIO;
3835 } else
3836 rc = -ENODEV;
3837
3838
3839 dev->n_native_sectors = n_native_sectors;
3840 dev->n_sectors = n_sectors;
3841 fail:
3842 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
3843 return rc;
3844 }
3845
3846 struct ata_blacklist_entry {
3847 const char *model_num;
3848 const char *model_rev;
3849 unsigned long horkage;
3850 };
3851
3852 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3853
3854 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3855 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3856 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3857 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3858 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3859 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3860 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3861 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3862 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3863 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
3864 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3865 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3866 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3867 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3868 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3869 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
3870 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3871 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3872 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3873 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3874 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3875 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3876 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3877 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3878 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3879 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3880 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3881 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3882 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
3883 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3884
3885 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
3886
3887 { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
3888
3889
3890 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3891 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
3892 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3893 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3894
3895
3896
3897
3898
3899 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
3900
3901
3902
3903
3904
3905 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3906 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3907
3908
3909
3910
3911
3912 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3913 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
3914
3915 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3916
3917 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3918 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3919 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3920 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3921 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
3922
3923
3924 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3925 ATA_HORKAGE_FIRMWARE_WARN },
3926
3927 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3928 ATA_HORKAGE_FIRMWARE_WARN },
3929
3930 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3931 ATA_HORKAGE_FIRMWARE_WARN },
3932
3933 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3934 ATA_HORKAGE_FIRMWARE_WARN },
3935
3936
3937
3938 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
3939 ATA_HORKAGE_NOLPM },
3940 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
3941
3942
3943
3944 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
3945 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
3946 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
3947
3948
3949 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
3950
3951
3952 { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
3953
3954
3955 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
3956 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3957 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3958 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3959
3960
3961 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
3962
3963
3964 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
3965 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
3966 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
3967
3968
3969 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
3970
3971 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
3972
3973
3974 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
3975 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
3976
3977
3978 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
3979 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
3980
3981
3982
3983
3984
3985 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3986 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
3987 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
3988 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
3989 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
3990
3991
3992 { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
3993 { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
3994
3995
3996 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
3997
3998
3999 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4000 ATA_HORKAGE_ZERO_AFTER_TRIM |
4001 ATA_HORKAGE_NOLPM },
4002
4003 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4004 ATA_HORKAGE_NOLPM },
4005
4006
4007 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4008 ATA_HORKAGE_ZERO_AFTER_TRIM |
4009 ATA_HORKAGE_NOLPM },
4010 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4011 ATA_HORKAGE_ZERO_AFTER_TRIM |
4012 ATA_HORKAGE_NOLPM },
4013
4014
4015 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4016 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
4017 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
4018 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
4019
4020
4021 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4022 ATA_HORKAGE_ZERO_AFTER_TRIM },
4023 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4024 ATA_HORKAGE_ZERO_AFTER_TRIM },
4025 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4026 ATA_HORKAGE_ZERO_AFTER_TRIM },
4027 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4028 ATA_HORKAGE_ZERO_AFTER_TRIM },
4029 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4030 ATA_HORKAGE_ZERO_AFTER_TRIM },
4031 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4032 ATA_HORKAGE_ZERO_AFTER_TRIM },
4033 { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4034 ATA_HORKAGE_NO_DMA_LOG |
4035 ATA_HORKAGE_ZERO_AFTER_TRIM },
4036 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4037 ATA_HORKAGE_ZERO_AFTER_TRIM },
4038 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4039 ATA_HORKAGE_ZERO_AFTER_TRIM },
4040 { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4041 ATA_HORKAGE_ZERO_AFTER_TRIM |
4042 ATA_HORKAGE_NO_NCQ_ON_ATI },
4043 { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4044 ATA_HORKAGE_ZERO_AFTER_TRIM |
4045 ATA_HORKAGE_NO_NCQ_ON_ATI },
4046 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4047 ATA_HORKAGE_ZERO_AFTER_TRIM },
4048
4049
4050 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
4051 { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069 { "INTEL*SSDSC2MH*", NULL, 0 },
4070
4071 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4072 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4073 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4074 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4075 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4076 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4077 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4078 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4090 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4091 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4092 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4093 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4094 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4095 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4096
4097
4098
4099
4100
4101
4102 { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
4103
4104
4105 { }
4106 };
4107
4108 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4109 {
4110 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4111 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4112 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4113
4114 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4115 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4116
4117 while (ad->model_num) {
4118 if (glob_match(ad->model_num, model_num)) {
4119 if (ad->model_rev == NULL)
4120 return ad->horkage;
4121 if (glob_match(ad->model_rev, model_rev))
4122 return ad->horkage;
4123 }
4124 ad++;
4125 }
4126 return 0;
4127 }
4128
4129 static int ata_dma_blacklisted(const struct ata_device *dev)
4130 {
4131
4132
4133
4134
4135 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4136 (dev->flags & ATA_DFLAG_CDB_INTR))
4137 return 1;
4138 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4139 }
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149 static int ata_is_40wire(struct ata_device *dev)
4150 {
4151 if (dev->horkage & ATA_HORKAGE_IVB)
4152 return ata_drive_40wire_relaxed(dev->id);
4153 return ata_drive_40wire(dev->id);
4154 }
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169 static int cable_is_40wire(struct ata_port *ap)
4170 {
4171 struct ata_link *link;
4172 struct ata_device *dev;
4173
4174
4175 if (ap->cbl == ATA_CBL_PATA40)
4176 return 1;
4177
4178
4179 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4180 return 0;
4181
4182
4183
4184
4185
4186 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4187 return 0;
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198 ata_for_each_link(link, ap, EDGE) {
4199 ata_for_each_dev(dev, link, ENABLED) {
4200 if (!ata_is_40wire(dev))
4201 return 0;
4202 }
4203 }
4204 return 1;
4205 }
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219 static void ata_dev_xfermask(struct ata_device *dev)
4220 {
4221 struct ata_link *link = dev->link;
4222 struct ata_port *ap = link->ap;
4223 struct ata_host *host = ap->host;
4224 unsigned int xfer_mask;
4225
4226
4227 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4228 ap->mwdma_mask, ap->udma_mask);
4229
4230
4231 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4232 dev->mwdma_mask, dev->udma_mask);
4233 xfer_mask &= ata_id_xfermask(dev->id);
4234
4235
4236
4237
4238
4239 if (ata_dev_pair(dev)) {
4240
4241 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4242
4243 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4244 }
4245
4246 if (ata_dma_blacklisted(dev)) {
4247 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4248 ata_dev_warn(dev,
4249 "device is on DMA blacklist, disabling DMA\n");
4250 }
4251
4252 if ((host->flags & ATA_HOST_SIMPLEX) &&
4253 host->simplex_claimed && host->simplex_claimed != ap) {
4254 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4255 ata_dev_warn(dev,
4256 "simplex DMA is claimed by other device, disabling DMA\n");
4257 }
4258
4259 if (ap->flags & ATA_FLAG_NO_IORDY)
4260 xfer_mask &= ata_pio_mask_no_iordy(dev);
4261
4262 if (ap->ops->mode_filter)
4263 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4274
4275 if (cable_is_40wire(ap)) {
4276 ata_dev_warn(dev,
4277 "limited to UDMA/33 due to 40-wire cable\n");
4278 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4279 }
4280
4281 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4282 &dev->mwdma_mask, &dev->udma_mask);
4283 }
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4300 {
4301 struct ata_taskfile tf;
4302 unsigned int err_mask;
4303
4304
4305 ata_dev_dbg(dev, "set features - xfer mode\n");
4306
4307
4308
4309
4310 ata_tf_init(dev, &tf);
4311 tf.command = ATA_CMD_SET_FEATURES;
4312 tf.feature = SETFEATURES_XFER;
4313 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4314 tf.protocol = ATA_PROT_NODATA;
4315
4316 if (ata_pio_need_iordy(dev))
4317 tf.nsect = dev->xfer_mode;
4318
4319 else if (ata_id_has_iordy(dev->id))
4320 tf.nsect = 0x01;
4321 else
4322 return 0;
4323
4324
4325 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4326
4327 return err_mask;
4328 }
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4346 {
4347 struct ata_taskfile tf;
4348 unsigned int err_mask;
4349 unsigned int timeout = 0;
4350
4351
4352 ata_dev_dbg(dev, "set features - SATA features\n");
4353
4354 ata_tf_init(dev, &tf);
4355 tf.command = ATA_CMD_SET_FEATURES;
4356 tf.feature = enable;
4357 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4358 tf.protocol = ATA_PROT_NODATA;
4359 tf.nsect = feature;
4360
4361 if (enable == SETFEATURES_SPINUP)
4362 timeout = ata_probe_timeout ?
4363 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4364 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4365
4366 return err_mask;
4367 }
4368 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382 static unsigned int ata_dev_init_params(struct ata_device *dev,
4383 u16 heads, u16 sectors)
4384 {
4385 struct ata_taskfile tf;
4386 unsigned int err_mask;
4387
4388
4389 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4390 return AC_ERR_INVALID;
4391
4392
4393 ata_dev_dbg(dev, "init dev params \n");
4394
4395 ata_tf_init(dev, &tf);
4396 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4397 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4398 tf.protocol = ATA_PROT_NODATA;
4399 tf.nsect = sectors;
4400 tf.device |= (heads - 1) & 0x0f;
4401
4402 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4403
4404
4405
4406 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
4407 err_mask = 0;
4408
4409 return err_mask;
4410 }
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426 int atapi_check_dma(struct ata_queued_cmd *qc)
4427 {
4428 struct ata_port *ap = qc->ap;
4429
4430
4431
4432
4433 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4434 unlikely(qc->nbytes & 15))
4435 return 1;
4436
4437 if (ap->ops->check_atapi_dma)
4438 return ap->ops->check_atapi_dma(qc);
4439
4440 return 0;
4441 }
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4459 {
4460 struct ata_link *link = qc->dev->link;
4461
4462 if (ata_is_ncq(qc->tf.protocol)) {
4463 if (!ata_tag_valid(link->active_tag))
4464 return 0;
4465 } else {
4466 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4467 return 0;
4468 }
4469
4470 return ATA_DEFER_LINK;
4471 }
4472 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4473
4474 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4475 {
4476 return AC_ERR_OK;
4477 }
4478 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4494 unsigned int n_elem)
4495 {
4496 qc->sg = sg;
4497 qc->n_elem = n_elem;
4498 qc->cursg = qc->sg;
4499 }
4500
4501 #ifdef CONFIG_HAS_DMA
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512 static void ata_sg_clean(struct ata_queued_cmd *qc)
4513 {
4514 struct ata_port *ap = qc->ap;
4515 struct scatterlist *sg = qc->sg;
4516 int dir = qc->dma_dir;
4517
4518 WARN_ON_ONCE(sg == NULL);
4519
4520 if (qc->n_elem)
4521 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4522
4523 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4524 qc->sg = NULL;
4525 }
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540 static int ata_sg_setup(struct ata_queued_cmd *qc)
4541 {
4542 struct ata_port *ap = qc->ap;
4543 unsigned int n_elem;
4544
4545 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4546 if (n_elem < 1)
4547 return -1;
4548
4549 qc->orig_n_elem = qc->n_elem;
4550 qc->n_elem = n_elem;
4551 qc->flags |= ATA_QCFLAG_DMAMAP;
4552
4553 return 0;
4554 }
4555
4556 #else
4557
4558 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4559 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4560
4561 #endif
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4576 {
4577 #ifdef __BIG_ENDIAN
4578 unsigned int i;
4579
4580 for (i = 0; i < buf_words; i++)
4581 buf[i] = le16_to_cpu(buf[i]);
4582 #endif
4583 }
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595 void ata_qc_free(struct ata_queued_cmd *qc)
4596 {
4597 qc->flags = 0;
4598 if (ata_tag_valid(qc->tag))
4599 qc->tag = ATA_TAG_POISON;
4600 }
4601
4602 void __ata_qc_complete(struct ata_queued_cmd *qc)
4603 {
4604 struct ata_port *ap;
4605 struct ata_link *link;
4606
4607 WARN_ON_ONCE(qc == NULL);
4608 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4609 ap = qc->ap;
4610 link = qc->dev->link;
4611
4612 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4613 ata_sg_clean(qc);
4614
4615
4616 if (ata_is_ncq(qc->tf.protocol)) {
4617 link->sactive &= ~(1 << qc->hw_tag);
4618 if (!link->sactive)
4619 ap->nr_active_links--;
4620 } else {
4621 link->active_tag = ATA_TAG_POISON;
4622 ap->nr_active_links--;
4623 }
4624
4625
4626 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4627 ap->excl_link == link))
4628 ap->excl_link = NULL;
4629
4630
4631
4632
4633
4634 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4635 ap->qc_active &= ~(1ULL << qc->tag);
4636
4637
4638 qc->complete_fn(qc);
4639 }
4640
4641 static void fill_result_tf(struct ata_queued_cmd *qc)
4642 {
4643 struct ata_port *ap = qc->ap;
4644
4645 qc->result_tf.flags = qc->tf.flags;
4646 ap->ops->qc_fill_rtf(qc);
4647 }
4648
4649 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4650 {
4651 struct ata_device *dev = qc->dev;
4652
4653 if (!ata_is_data(qc->tf.protocol))
4654 return;
4655
4656 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4657 return;
4658
4659 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4660 }
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677 void ata_qc_complete(struct ata_queued_cmd *qc)
4678 {
4679 struct ata_port *ap = qc->ap;
4680
4681
4682 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697 if (ap->ops->error_handler) {
4698 struct ata_device *dev = qc->dev;
4699 struct ata_eh_info *ehi = &dev->link->eh_info;
4700
4701 if (unlikely(qc->err_mask))
4702 qc->flags |= ATA_QCFLAG_FAILED;
4703
4704
4705
4706
4707
4708 if (unlikely(ata_tag_internal(qc->tag))) {
4709 fill_result_tf(qc);
4710 trace_ata_qc_complete_internal(qc);
4711 __ata_qc_complete(qc);
4712 return;
4713 }
4714
4715
4716
4717
4718
4719 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4720 fill_result_tf(qc);
4721 trace_ata_qc_complete_failed(qc);
4722 ata_qc_schedule_eh(qc);
4723 return;
4724 }
4725
4726 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4727
4728
4729 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4730 fill_result_tf(qc);
4731
4732 trace_ata_qc_complete_done(qc);
4733
4734
4735
4736 switch (qc->tf.command) {
4737 case ATA_CMD_SET_FEATURES:
4738 if (qc->tf.feature != SETFEATURES_WC_ON &&
4739 qc->tf.feature != SETFEATURES_WC_OFF &&
4740 qc->tf.feature != SETFEATURES_RA_ON &&
4741 qc->tf.feature != SETFEATURES_RA_OFF)
4742 break;
4743 fallthrough;
4744 case ATA_CMD_INIT_DEV_PARAMS:
4745 case ATA_CMD_SET_MULTI:
4746
4747 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4748 ata_port_schedule_eh(ap);
4749 break;
4750
4751 case ATA_CMD_SLEEP:
4752 dev->flags |= ATA_DFLAG_SLEEPING;
4753 break;
4754 }
4755
4756 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4757 ata_verify_xfer(qc);
4758
4759 __ata_qc_complete(qc);
4760 } else {
4761 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4762 return;
4763
4764
4765 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4766 fill_result_tf(qc);
4767
4768 __ata_qc_complete(qc);
4769 }
4770 }
4771 EXPORT_SYMBOL_GPL(ata_qc_complete);
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783 u64 ata_qc_get_active(struct ata_port *ap)
4784 {
4785 u64 qc_active = ap->qc_active;
4786
4787
4788 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4789 qc_active |= (1 << 0);
4790 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4791 }
4792
4793 return qc_active;
4794 }
4795 EXPORT_SYMBOL_GPL(ata_qc_get_active);
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809 void ata_qc_issue(struct ata_queued_cmd *qc)
4810 {
4811 struct ata_port *ap = qc->ap;
4812 struct ata_link *link = qc->dev->link;
4813 u8 prot = qc->tf.protocol;
4814
4815
4816
4817
4818
4819 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4820
4821 if (ata_is_ncq(prot)) {
4822 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
4823
4824 if (!link->sactive)
4825 ap->nr_active_links++;
4826 link->sactive |= 1 << qc->hw_tag;
4827 } else {
4828 WARN_ON_ONCE(link->sactive);
4829
4830 ap->nr_active_links++;
4831 link->active_tag = qc->tag;
4832 }
4833
4834 qc->flags |= ATA_QCFLAG_ACTIVE;
4835 ap->qc_active |= 1ULL << qc->tag;
4836
4837
4838
4839
4840
4841 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
4842 goto sys_err;
4843
4844 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4845 (ap->flags & ATA_FLAG_PIO_DMA)))
4846 if (ata_sg_setup(qc))
4847 goto sys_err;
4848
4849
4850 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4851 link->eh_info.action |= ATA_EH_RESET;
4852 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4853 ata_link_abort(link);
4854 return;
4855 }
4856
4857 trace_ata_qc_prep(qc);
4858 qc->err_mask |= ap->ops->qc_prep(qc);
4859 if (unlikely(qc->err_mask))
4860 goto err;
4861 trace_ata_qc_issue(qc);
4862 qc->err_mask |= ap->ops->qc_issue(qc);
4863 if (unlikely(qc->err_mask))
4864 goto err;
4865 return;
4866
4867 sys_err:
4868 qc->err_mask |= AC_ERR_SYSTEM;
4869 err:
4870 ata_qc_complete(qc);
4871 }
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887 bool ata_phys_link_online(struct ata_link *link)
4888 {
4889 u32 sstatus;
4890
4891 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4892 ata_sstatus_online(sstatus))
4893 return true;
4894 return false;
4895 }
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911 bool ata_phys_link_offline(struct ata_link *link)
4912 {
4913 u32 sstatus;
4914
4915 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4916 !ata_sstatus_online(sstatus))
4917 return true;
4918 return false;
4919 }
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937 bool ata_link_online(struct ata_link *link)
4938 {
4939 struct ata_link *slave = link->ap->slave_link;
4940
4941 WARN_ON(link == slave);
4942
4943 return ata_phys_link_online(link) ||
4944 (slave && ata_phys_link_online(slave));
4945 }
4946 EXPORT_SYMBOL_GPL(ata_link_online);
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964 bool ata_link_offline(struct ata_link *link)
4965 {
4966 struct ata_link *slave = link->ap->slave_link;
4967
4968 WARN_ON(link == slave);
4969
4970 return ata_phys_link_offline(link) &&
4971 (!slave || ata_phys_link_offline(slave));
4972 }
4973 EXPORT_SYMBOL_GPL(ata_link_offline);
4974
4975 #ifdef CONFIG_PM
4976 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
4977 unsigned int action, unsigned int ehi_flags,
4978 bool async)
4979 {
4980 struct ata_link *link;
4981 unsigned long flags;
4982
4983
4984
4985
4986 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4987 ata_port_wait_eh(ap);
4988 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4989 }
4990
4991
4992 spin_lock_irqsave(ap->lock, flags);
4993
4994 ap->pm_mesg = mesg;
4995 ap->pflags |= ATA_PFLAG_PM_PENDING;
4996 ata_for_each_link(link, ap, HOST_FIRST) {
4997 link->eh_info.action |= action;
4998 link->eh_info.flags |= ehi_flags;
4999 }
5000
5001 ata_port_schedule_eh(ap);
5002
5003 spin_unlock_irqrestore(ap->lock, flags);
5004
5005 if (!async) {
5006 ata_port_wait_eh(ap);
5007 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5008 }
5009 }
5010
5011
5012
5013
5014
5015
5016
5017
5018 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5019 | ATA_EHI_NO_AUTOPSY
5020 | ATA_EHI_NO_RECOVERY;
5021
5022 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5023 {
5024 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5025 }
5026
5027 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5028 {
5029 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5030 }
5031
5032 static int ata_port_pm_suspend(struct device *dev)
5033 {
5034 struct ata_port *ap = to_ata_port(dev);
5035
5036 if (pm_runtime_suspended(dev))
5037 return 0;
5038
5039 ata_port_suspend(ap, PMSG_SUSPEND);
5040 return 0;
5041 }
5042
5043 static int ata_port_pm_freeze(struct device *dev)
5044 {
5045 struct ata_port *ap = to_ata_port(dev);
5046
5047 if (pm_runtime_suspended(dev))
5048 return 0;
5049
5050 ata_port_suspend(ap, PMSG_FREEZE);
5051 return 0;
5052 }
5053
5054 static int ata_port_pm_poweroff(struct device *dev)
5055 {
5056 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5057 return 0;
5058 }
5059
5060 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5061 | ATA_EHI_QUIET;
5062
5063 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5064 {
5065 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5066 }
5067
5068 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5069 {
5070 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5071 }
5072
5073 static int ata_port_pm_resume(struct device *dev)
5074 {
5075 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5076 pm_runtime_disable(dev);
5077 pm_runtime_set_active(dev);
5078 pm_runtime_enable(dev);
5079 return 0;
5080 }
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090 static int ata_port_runtime_idle(struct device *dev)
5091 {
5092 struct ata_port *ap = to_ata_port(dev);
5093 struct ata_link *link;
5094 struct ata_device *adev;
5095
5096 ata_for_each_link(link, ap, HOST_FIRST) {
5097 ata_for_each_dev(adev, link, ENABLED)
5098 if (adev->class == ATA_DEV_ATAPI &&
5099 !zpodd_dev_enabled(adev))
5100 return -EBUSY;
5101 }
5102
5103 return 0;
5104 }
5105
5106 static int ata_port_runtime_suspend(struct device *dev)
5107 {
5108 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5109 return 0;
5110 }
5111
5112 static int ata_port_runtime_resume(struct device *dev)
5113 {
5114 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5115 return 0;
5116 }
5117
5118 static const struct dev_pm_ops ata_port_pm_ops = {
5119 .suspend = ata_port_pm_suspend,
5120 .resume = ata_port_pm_resume,
5121 .freeze = ata_port_pm_freeze,
5122 .thaw = ata_port_pm_resume,
5123 .poweroff = ata_port_pm_poweroff,
5124 .restore = ata_port_pm_resume,
5125
5126 .runtime_suspend = ata_port_runtime_suspend,
5127 .runtime_resume = ata_port_runtime_resume,
5128 .runtime_idle = ata_port_runtime_idle,
5129 };
5130
5131
5132
5133
5134
5135
5136 void ata_sas_port_suspend(struct ata_port *ap)
5137 {
5138 ata_port_suspend_async(ap, PMSG_SUSPEND);
5139 }
5140 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5141
5142 void ata_sas_port_resume(struct ata_port *ap)
5143 {
5144 ata_port_resume_async(ap, PMSG_RESUME);
5145 }
5146 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5147
5148
5149
5150
5151
5152
5153
5154
5155 void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5156 {
5157 host->dev->power.power_state = mesg;
5158 }
5159 EXPORT_SYMBOL_GPL(ata_host_suspend);
5160
5161
5162
5163
5164
5165
5166
5167 void ata_host_resume(struct ata_host *host)
5168 {
5169 host->dev->power.power_state = PMSG_ON;
5170 }
5171 EXPORT_SYMBOL_GPL(ata_host_resume);
5172 #endif
5173
5174 const struct device_type ata_port_type = {
5175 .name = "ata_port",
5176 #ifdef CONFIG_PM
5177 .pm = &ata_port_pm_ops,
5178 #endif
5179 };
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190 void ata_dev_init(struct ata_device *dev)
5191 {
5192 struct ata_link *link = ata_dev_phys_link(dev);
5193 struct ata_port *ap = link->ap;
5194 unsigned long flags;
5195
5196
5197 link->sata_spd_limit = link->hw_sata_spd_limit;
5198 link->sata_spd = 0;
5199
5200
5201
5202
5203
5204 spin_lock_irqsave(ap->lock, flags);
5205 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5206 dev->horkage = 0;
5207 spin_unlock_irqrestore(ap->lock, flags);
5208
5209 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5210 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5211 dev->pio_mask = UINT_MAX;
5212 dev->mwdma_mask = UINT_MAX;
5213 dev->udma_mask = UINT_MAX;
5214 }
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5228 {
5229 int i;
5230
5231
5232 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5233 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5234
5235 link->ap = ap;
5236 link->pmp = pmp;
5237 link->active_tag = ATA_TAG_POISON;
5238 link->hw_sata_spd_limit = UINT_MAX;
5239
5240
5241 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5242 struct ata_device *dev = &link->device[i];
5243
5244 dev->link = link;
5245 dev->devno = dev - link->device;
5246 #ifdef CONFIG_ATA_ACPI
5247 dev->gtf_filter = ata_acpi_gtf_filter;
5248 #endif
5249 ata_dev_init(dev);
5250 }
5251 }
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266 int sata_link_init_spd(struct ata_link *link)
5267 {
5268 u8 spd;
5269 int rc;
5270
5271 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5272 if (rc)
5273 return rc;
5274
5275 spd = (link->saved_scontrol >> 4) & 0xf;
5276 if (spd)
5277 link->hw_sata_spd_limit &= (1 << spd) - 1;
5278
5279 ata_force_link_limits(link);
5280
5281 link->sata_spd_limit = link->hw_sata_spd_limit;
5282
5283 return 0;
5284 }
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298 struct ata_port *ata_port_alloc(struct ata_host *host)
5299 {
5300 struct ata_port *ap;
5301
5302 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5303 if (!ap)
5304 return NULL;
5305
5306 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5307 ap->lock = &host->lock;
5308 ap->print_id = -1;
5309 ap->local_port_no = -1;
5310 ap->host = host;
5311 ap->dev = host->dev;
5312
5313 mutex_init(&ap->scsi_scan_mutex);
5314 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5315 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5316 INIT_LIST_HEAD(&ap->eh_done_q);
5317 init_waitqueue_head(&ap->eh_wait_q);
5318 init_completion(&ap->park_req_pending);
5319 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5320 TIMER_DEFERRABLE);
5321
5322 ap->cbl = ATA_CBL_NONE;
5323
5324 ata_link_init(ap, &ap->link, 0);
5325
5326 #ifdef ATA_IRQ_TRAP
5327 ap->stats.unhandled_irq = 1;
5328 ap->stats.idle_irq = 1;
5329 #endif
5330 ata_sff_port_init(ap);
5331
5332 return ap;
5333 }
5334
5335 static void ata_devres_release(struct device *gendev, void *res)
5336 {
5337 struct ata_host *host = dev_get_drvdata(gendev);
5338 int i;
5339
5340 for (i = 0; i < host->n_ports; i++) {
5341 struct ata_port *ap = host->ports[i];
5342
5343 if (!ap)
5344 continue;
5345
5346 if (ap->scsi_host)
5347 scsi_host_put(ap->scsi_host);
5348
5349 }
5350
5351 dev_set_drvdata(gendev, NULL);
5352 ata_host_put(host);
5353 }
5354
5355 static void ata_host_release(struct kref *kref)
5356 {
5357 struct ata_host *host = container_of(kref, struct ata_host, kref);
5358 int i;
5359
5360 for (i = 0; i < host->n_ports; i++) {
5361 struct ata_port *ap = host->ports[i];
5362
5363 kfree(ap->pmp_link);
5364 kfree(ap->slave_link);
5365 kfree(ap);
5366 host->ports[i] = NULL;
5367 }
5368 kfree(host);
5369 }
5370
5371 void ata_host_get(struct ata_host *host)
5372 {
5373 kref_get(&host->kref);
5374 }
5375
5376 void ata_host_put(struct ata_host *host)
5377 {
5378 kref_put(&host->kref, ata_host_release);
5379 }
5380 EXPORT_SYMBOL_GPL(ata_host_put);
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5403 {
5404 struct ata_host *host;
5405 size_t sz;
5406 int i;
5407 void *dr;
5408
5409
5410 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5411 host = kzalloc(sz, GFP_KERNEL);
5412 if (!host)
5413 return NULL;
5414
5415 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5416 goto err_free;
5417
5418 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5419 if (!dr)
5420 goto err_out;
5421
5422 devres_add(dev, dr);
5423 dev_set_drvdata(dev, host);
5424
5425 spin_lock_init(&host->lock);
5426 mutex_init(&host->eh_mutex);
5427 host->dev = dev;
5428 host->n_ports = max_ports;
5429 kref_init(&host->kref);
5430
5431
5432 for (i = 0; i < max_ports; i++) {
5433 struct ata_port *ap;
5434
5435 ap = ata_port_alloc(host);
5436 if (!ap)
5437 goto err_out;
5438
5439 ap->port_no = i;
5440 host->ports[i] = ap;
5441 }
5442
5443 devres_remove_group(dev, NULL);
5444 return host;
5445
5446 err_out:
5447 devres_release_group(dev, NULL);
5448 err_free:
5449 kfree(host);
5450 return NULL;
5451 }
5452 EXPORT_SYMBOL_GPL(ata_host_alloc);
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5471 const struct ata_port_info * const * ppi,
5472 int n_ports)
5473 {
5474 const struct ata_port_info *pi = &ata_dummy_port_info;
5475 struct ata_host *host;
5476 int i, j;
5477
5478 host = ata_host_alloc(dev, n_ports);
5479 if (!host)
5480 return NULL;
5481
5482 for (i = 0, j = 0; i < host->n_ports; i++) {
5483 struct ata_port *ap = host->ports[i];
5484
5485 if (ppi[j])
5486 pi = ppi[j++];
5487
5488 ap->pio_mask = pi->pio_mask;
5489 ap->mwdma_mask = pi->mwdma_mask;
5490 ap->udma_mask = pi->udma_mask;
5491 ap->flags |= pi->flags;
5492 ap->link.flags |= pi->link_flags;
5493 ap->ops = pi->port_ops;
5494
5495 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5496 host->ops = pi->port_ops;
5497 }
5498
5499 return host;
5500 }
5501 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
5502
5503 static void ata_host_stop(struct device *gendev, void *res)
5504 {
5505 struct ata_host *host = dev_get_drvdata(gendev);
5506 int i;
5507
5508 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5509
5510 for (i = 0; i < host->n_ports; i++) {
5511 struct ata_port *ap = host->ports[i];
5512
5513 if (ap->ops->port_stop)
5514 ap->ops->port_stop(ap);
5515 }
5516
5517 if (host->ops->host_stop)
5518 host->ops->host_stop(host);
5519 }
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5542 {
5543 static DEFINE_SPINLOCK(lock);
5544 const struct ata_port_operations *cur;
5545 void **begin = (void **)ops;
5546 void **end = (void **)&ops->inherits;
5547 void **pp;
5548
5549 if (!ops || !ops->inherits)
5550 return;
5551
5552 spin_lock(&lock);
5553
5554 for (cur = ops->inherits; cur; cur = cur->inherits) {
5555 void **inherit = (void **)cur;
5556
5557 for (pp = begin; pp < end; pp++, inherit++)
5558 if (!*pp)
5559 *pp = *inherit;
5560 }
5561
5562 for (pp = begin; pp < end; pp++)
5563 if (IS_ERR(*pp))
5564 *pp = NULL;
5565
5566 ops->inherits = NULL;
5567
5568 spin_unlock(&lock);
5569 }
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587 int ata_host_start(struct ata_host *host)
5588 {
5589 int have_stop = 0;
5590 void *start_dr = NULL;
5591 int i, rc;
5592
5593 if (host->flags & ATA_HOST_STARTED)
5594 return 0;
5595
5596 ata_finalize_port_ops(host->ops);
5597
5598 for (i = 0; i < host->n_ports; i++) {
5599 struct ata_port *ap = host->ports[i];
5600
5601 ata_finalize_port_ops(ap->ops);
5602
5603 if (!host->ops && !ata_port_is_dummy(ap))
5604 host->ops = ap->ops;
5605
5606 if (ap->ops->port_stop)
5607 have_stop = 1;
5608 }
5609
5610 if (host->ops && host->ops->host_stop)
5611 have_stop = 1;
5612
5613 if (have_stop) {
5614 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5615 if (!start_dr)
5616 return -ENOMEM;
5617 }
5618
5619 for (i = 0; i < host->n_ports; i++) {
5620 struct ata_port *ap = host->ports[i];
5621
5622 if (ap->ops->port_start) {
5623 rc = ap->ops->port_start(ap);
5624 if (rc) {
5625 if (rc != -ENODEV)
5626 dev_err(host->dev,
5627 "failed to start port %d (errno=%d)\n",
5628 i, rc);
5629 goto err_out;
5630 }
5631 }
5632 ata_eh_freeze_port(ap);
5633 }
5634
5635 if (start_dr)
5636 devres_add(host->dev, start_dr);
5637 host->flags |= ATA_HOST_STARTED;
5638 return 0;
5639
5640 err_out:
5641 while (--i >= 0) {
5642 struct ata_port *ap = host->ports[i];
5643
5644 if (ap->ops->port_stop)
5645 ap->ops->port_stop(ap);
5646 }
5647 devres_free(start_dr);
5648 return rc;
5649 }
5650 EXPORT_SYMBOL_GPL(ata_host_start);
5651
5652
5653
5654
5655
5656
5657
5658
5659 void ata_host_init(struct ata_host *host, struct device *dev,
5660 struct ata_port_operations *ops)
5661 {
5662 spin_lock_init(&host->lock);
5663 mutex_init(&host->eh_mutex);
5664 host->n_tags = ATA_MAX_QUEUE;
5665 host->dev = dev;
5666 host->ops = ops;
5667 kref_init(&host->kref);
5668 }
5669 EXPORT_SYMBOL_GPL(ata_host_init);
5670
5671 void __ata_port_probe(struct ata_port *ap)
5672 {
5673 struct ata_eh_info *ehi = &ap->link.eh_info;
5674 unsigned long flags;
5675
5676
5677 spin_lock_irqsave(ap->lock, flags);
5678
5679 ehi->probe_mask |= ATA_ALL_DEVICES;
5680 ehi->action |= ATA_EH_RESET;
5681 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5682
5683 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5684 ap->pflags |= ATA_PFLAG_LOADING;
5685 ata_port_schedule_eh(ap);
5686
5687 spin_unlock_irqrestore(ap->lock, flags);
5688 }
5689
5690 int ata_port_probe(struct ata_port *ap)
5691 {
5692 int rc = 0;
5693
5694 if (ap->ops->error_handler) {
5695 __ata_port_probe(ap);
5696 ata_port_wait_eh(ap);
5697 } else {
5698 rc = ata_bus_probe(ap);
5699 }
5700 return rc;
5701 }
5702
5703
5704 static void async_port_probe(void *data, async_cookie_t cookie)
5705 {
5706 struct ata_port *ap = data;
5707
5708
5709
5710
5711
5712
5713
5714
5715 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5716 async_synchronize_cookie(cookie);
5717
5718 (void)ata_port_probe(ap);
5719
5720
5721 async_synchronize_cookie(cookie);
5722
5723 ata_scsi_scan_host(ap, 1);
5724 }
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5743 {
5744 int i, rc;
5745
5746 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5747
5748
5749 if (!(host->flags & ATA_HOST_STARTED)) {
5750 dev_err(host->dev, "BUG: trying to register unstarted host\n");
5751 WARN_ON(1);
5752 return -EINVAL;
5753 }
5754
5755
5756
5757
5758
5759 for (i = host->n_ports; host->ports[i]; i++)
5760 kfree(host->ports[i]);
5761
5762
5763 for (i = 0; i < host->n_ports; i++) {
5764 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5765 host->ports[i]->local_port_no = i + 1;
5766 }
5767
5768
5769 for (i = 0; i < host->n_ports; i++) {
5770 rc = ata_tport_add(host->dev,host->ports[i]);
5771 if (rc) {
5772 goto err_tadd;
5773 }
5774 }
5775
5776 rc = ata_scsi_add_hosts(host, sht);
5777 if (rc)
5778 goto err_tadd;
5779
5780
5781 for (i = 0; i < host->n_ports; i++) {
5782 struct ata_port *ap = host->ports[i];
5783 unsigned int xfer_mask;
5784
5785
5786 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5787 ap->cbl = ATA_CBL_SATA;
5788
5789
5790 sata_link_init_spd(&ap->link);
5791 if (ap->slave_link)
5792 sata_link_init_spd(ap->slave_link);
5793
5794
5795 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5796 ap->udma_mask);
5797
5798 if (!ata_port_is_dummy(ap)) {
5799 ata_port_info(ap, "%cATA max %s %s\n",
5800 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5801 ata_mode_string(xfer_mask),
5802 ap->link.eh_info.desc);
5803 ata_ehi_clear_desc(&ap->link.eh_info);
5804 } else
5805 ata_port_info(ap, "DUMMY\n");
5806 }
5807
5808
5809 for (i = 0; i < host->n_ports; i++) {
5810 struct ata_port *ap = host->ports[i];
5811 ap->cookie = async_schedule(async_port_probe, ap);
5812 }
5813
5814 return 0;
5815
5816 err_tadd:
5817 while (--i >= 0) {
5818 ata_tport_delete(host->ports[i]);
5819 }
5820 return rc;
5821
5822 }
5823 EXPORT_SYMBOL_GPL(ata_host_register);
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848 int ata_host_activate(struct ata_host *host, int irq,
5849 irq_handler_t irq_handler, unsigned long irq_flags,
5850 struct scsi_host_template *sht)
5851 {
5852 int i, rc;
5853 char *irq_desc;
5854
5855 rc = ata_host_start(host);
5856 if (rc)
5857 return rc;
5858
5859
5860 if (!irq) {
5861 WARN_ON(irq_handler);
5862 return ata_host_register(host, sht);
5863 }
5864
5865 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5866 dev_driver_string(host->dev),
5867 dev_name(host->dev));
5868 if (!irq_desc)
5869 return -ENOMEM;
5870
5871 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5872 irq_desc, host);
5873 if (rc)
5874 return rc;
5875
5876 for (i = 0; i < host->n_ports; i++)
5877 ata_port_desc(host->ports[i], "irq %d", irq);
5878
5879 rc = ata_host_register(host, sht);
5880
5881 if (rc)
5882 devm_free_irq(host->dev, irq, host);
5883
5884 return rc;
5885 }
5886 EXPORT_SYMBOL_GPL(ata_host_activate);
5887
5888
5889
5890
5891
5892
5893
5894
5895
5896
5897
5898
5899 static void ata_port_detach(struct ata_port *ap)
5900 {
5901 unsigned long flags;
5902 struct ata_link *link;
5903 struct ata_device *dev;
5904
5905 if (!ap->ops->error_handler)
5906 goto skip_eh;
5907
5908
5909 spin_lock_irqsave(ap->lock, flags);
5910 ap->pflags |= ATA_PFLAG_UNLOADING;
5911 ata_port_schedule_eh(ap);
5912 spin_unlock_irqrestore(ap->lock, flags);
5913
5914
5915 ata_port_wait_eh(ap);
5916
5917
5918 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
5919
5920 cancel_delayed_work_sync(&ap->hotplug_task);
5921
5922 skip_eh:
5923
5924 ata_for_each_link(link, ap, HOST_FIRST) {
5925 ata_for_each_dev(dev, link, ALL) {
5926 if (zpodd_dev_enabled(dev))
5927 zpodd_exit(dev);
5928 }
5929 }
5930 if (ap->pmp_link) {
5931 int i;
5932 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5933 ata_tlink_delete(&ap->pmp_link[i]);
5934 }
5935
5936 scsi_remove_host(ap->scsi_host);
5937 ata_tport_delete(ap);
5938 }
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949 void ata_host_detach(struct ata_host *host)
5950 {
5951 int i;
5952
5953 for (i = 0; i < host->n_ports; i++) {
5954
5955 async_synchronize_cookie(host->ports[i]->cookie + 1);
5956 ata_port_detach(host->ports[i]);
5957 }
5958
5959
5960 ata_acpi_dissociate(host);
5961 }
5962 EXPORT_SYMBOL_GPL(ata_host_detach);
5963
5964 #ifdef CONFIG_PCI
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977 void ata_pci_remove_one(struct pci_dev *pdev)
5978 {
5979 struct ata_host *host = pci_get_drvdata(pdev);
5980
5981 ata_host_detach(host);
5982 }
5983 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5984
5985 void ata_pci_shutdown_one(struct pci_dev *pdev)
5986 {
5987 struct ata_host *host = pci_get_drvdata(pdev);
5988 int i;
5989
5990 for (i = 0; i < host->n_ports; i++) {
5991 struct ata_port *ap = host->ports[i];
5992
5993 ap->pflags |= ATA_PFLAG_FROZEN;
5994
5995
5996 if (ap->ops->freeze)
5997 ap->ops->freeze(ap);
5998
5999
6000 if (ap->ops->port_stop)
6001 ap->ops->port_stop(ap);
6002 }
6003 }
6004 EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6005
6006
6007 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6008 {
6009 unsigned long tmp = 0;
6010
6011 switch (bits->width) {
6012 case 1: {
6013 u8 tmp8 = 0;
6014 pci_read_config_byte(pdev, bits->reg, &tmp8);
6015 tmp = tmp8;
6016 break;
6017 }
6018 case 2: {
6019 u16 tmp16 = 0;
6020 pci_read_config_word(pdev, bits->reg, &tmp16);
6021 tmp = tmp16;
6022 break;
6023 }
6024 case 4: {
6025 u32 tmp32 = 0;
6026 pci_read_config_dword(pdev, bits->reg, &tmp32);
6027 tmp = tmp32;
6028 break;
6029 }
6030
6031 default:
6032 return -EINVAL;
6033 }
6034
6035 tmp &= bits->mask;
6036
6037 return (tmp == bits->val) ? 1 : 0;
6038 }
6039 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6040
6041 #ifdef CONFIG_PM
6042 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6043 {
6044 pci_save_state(pdev);
6045 pci_disable_device(pdev);
6046
6047 if (mesg.event & PM_EVENT_SLEEP)
6048 pci_set_power_state(pdev, PCI_D3hot);
6049 }
6050 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6051
6052 int ata_pci_device_do_resume(struct pci_dev *pdev)
6053 {
6054 int rc;
6055
6056 pci_set_power_state(pdev, PCI_D0);
6057 pci_restore_state(pdev);
6058
6059 rc = pcim_enable_device(pdev);
6060 if (rc) {
6061 dev_err(&pdev->dev,
6062 "failed to enable device after resume (%d)\n", rc);
6063 return rc;
6064 }
6065
6066 pci_set_master(pdev);
6067 return 0;
6068 }
6069 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6070
6071 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6072 {
6073 struct ata_host *host = pci_get_drvdata(pdev);
6074
6075 ata_host_suspend(host, mesg);
6076
6077 ata_pci_device_do_suspend(pdev, mesg);
6078
6079 return 0;
6080 }
6081 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6082
6083 int ata_pci_device_resume(struct pci_dev *pdev)
6084 {
6085 struct ata_host *host = pci_get_drvdata(pdev);
6086 int rc;
6087
6088 rc = ata_pci_device_do_resume(pdev);
6089 if (rc == 0)
6090 ata_host_resume(host);
6091 return rc;
6092 }
6093 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6094 #endif
6095 #endif
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108 int ata_platform_remove_one(struct platform_device *pdev)
6109 {
6110 struct ata_host *host = platform_get_drvdata(pdev);
6111
6112 ata_host_detach(host);
6113
6114 return 0;
6115 }
6116 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6117
6118 #ifdef CONFIG_ATA_FORCE
6119
6120 #define force_cbl(name, flag) \
6121 { #name, .cbl = (flag) }
6122
6123 #define force_spd_limit(spd, val) \
6124 { #spd, .spd_limit = (val) }
6125
6126 #define force_xfer(mode, shift) \
6127 { #mode, .xfer_mask = (1UL << (shift)) }
6128
6129 #define force_lflag_on(name, flags) \
6130 { #name, .lflags_on = (flags) }
6131
6132 #define force_lflag_onoff(name, flags) \
6133 { "no" #name, .lflags_on = (flags) }, \
6134 { #name, .lflags_off = (flags) }
6135
6136 #define force_horkage_on(name, flag) \
6137 { #name, .horkage_on = (flag) }
6138
6139 #define force_horkage_onoff(name, flag) \
6140 { "no" #name, .horkage_on = (flag) }, \
6141 { #name, .horkage_off = (flag) }
6142
6143 static const struct ata_force_param force_tbl[] __initconst = {
6144 force_cbl(40c, ATA_CBL_PATA40),
6145 force_cbl(80c, ATA_CBL_PATA80),
6146 force_cbl(short40c, ATA_CBL_PATA40_SHORT),
6147 force_cbl(unk, ATA_CBL_PATA_UNK),
6148 force_cbl(ign, ATA_CBL_PATA_IGN),
6149 force_cbl(sata, ATA_CBL_SATA),
6150
6151 force_spd_limit(1.5Gbps, 1),
6152 force_spd_limit(3.0Gbps, 2),
6153
6154 force_xfer(pio0, ATA_SHIFT_PIO + 0),
6155 force_xfer(pio1, ATA_SHIFT_PIO + 1),
6156 force_xfer(pio2, ATA_SHIFT_PIO + 2),
6157 force_xfer(pio3, ATA_SHIFT_PIO + 3),
6158 force_xfer(pio4, ATA_SHIFT_PIO + 4),
6159 force_xfer(pio5, ATA_SHIFT_PIO + 5),
6160 force_xfer(pio6, ATA_SHIFT_PIO + 6),
6161 force_xfer(mwdma0, ATA_SHIFT_MWDMA + 0),
6162 force_xfer(mwdma1, ATA_SHIFT_MWDMA + 1),
6163 force_xfer(mwdma2, ATA_SHIFT_MWDMA + 2),
6164 force_xfer(mwdma3, ATA_SHIFT_MWDMA + 3),
6165 force_xfer(mwdma4, ATA_SHIFT_MWDMA + 4),
6166 force_xfer(udma0, ATA_SHIFT_UDMA + 0),
6167 force_xfer(udma16, ATA_SHIFT_UDMA + 0),
6168 force_xfer(udma/16, ATA_SHIFT_UDMA + 0),
6169 force_xfer(udma1, ATA_SHIFT_UDMA + 1),
6170 force_xfer(udma25, ATA_SHIFT_UDMA + 1),
6171 force_xfer(udma/25, ATA_SHIFT_UDMA + 1),
6172 force_xfer(udma2, ATA_SHIFT_UDMA + 2),
6173 force_xfer(udma33, ATA_SHIFT_UDMA + 2),
6174 force_xfer(udma/33, ATA_SHIFT_UDMA + 2),
6175 force_xfer(udma3, ATA_SHIFT_UDMA + 3),
6176 force_xfer(udma44, ATA_SHIFT_UDMA + 3),
6177 force_xfer(udma/44, ATA_SHIFT_UDMA + 3),
6178 force_xfer(udma4, ATA_SHIFT_UDMA + 4),
6179 force_xfer(udma66, ATA_SHIFT_UDMA + 4),
6180 force_xfer(udma/66, ATA_SHIFT_UDMA + 4),
6181 force_xfer(udma5, ATA_SHIFT_UDMA + 5),
6182 force_xfer(udma100, ATA_SHIFT_UDMA + 5),
6183 force_xfer(udma/100, ATA_SHIFT_UDMA + 5),
6184 force_xfer(udma6, ATA_SHIFT_UDMA + 6),
6185 force_xfer(udma133, ATA_SHIFT_UDMA + 6),
6186 force_xfer(udma/133, ATA_SHIFT_UDMA + 6),
6187 force_xfer(udma7, ATA_SHIFT_UDMA + 7),
6188
6189 force_lflag_on(nohrst, ATA_LFLAG_NO_HRST),
6190 force_lflag_on(nosrst, ATA_LFLAG_NO_SRST),
6191 force_lflag_on(norst, ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6192 force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
6193 force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
6194
6195 force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
6196 force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
6197 force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
6198
6199 force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
6200 force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
6201 force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
6202
6203 force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
6204 force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
6205 force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
6206
6207 force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
6208 force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
6209 force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
6210
6211 force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
6212 force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
6213 force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
6214
6215 force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
6216 force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
6217 force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
6218
6219 force_horkage_on(disable, ATA_HORKAGE_DISABLE),
6220 };
6221
6222 static int __init ata_parse_force_one(char **cur,
6223 struct ata_force_ent *force_ent,
6224 const char **reason)
6225 {
6226 char *start = *cur, *p = *cur;
6227 char *id, *val, *endp;
6228 const struct ata_force_param *match_fp = NULL;
6229 int nr_matches = 0, i;
6230
6231
6232 while (*p != '\0' && *p != ',')
6233 p++;
6234
6235 if (*p == '\0')
6236 *cur = p;
6237 else
6238 *cur = p + 1;
6239
6240 *p = '\0';
6241
6242
6243 p = strchr(start, ':');
6244 if (!p) {
6245 val = strstrip(start);
6246 goto parse_val;
6247 }
6248 *p = '\0';
6249
6250 id = strstrip(start);
6251 val = strstrip(p + 1);
6252
6253
6254 p = strchr(id, '.');
6255 if (p) {
6256 *p++ = '\0';
6257 force_ent->device = simple_strtoul(p, &endp, 10);
6258 if (p == endp || *endp != '\0') {
6259 *reason = "invalid device";
6260 return -EINVAL;
6261 }
6262 }
6263
6264 force_ent->port = simple_strtoul(id, &endp, 10);
6265 if (id == endp || *endp != '\0') {
6266 *reason = "invalid port/link";
6267 return -EINVAL;
6268 }
6269
6270 parse_val:
6271
6272 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6273 const struct ata_force_param *fp = &force_tbl[i];
6274
6275 if (strncasecmp(val, fp->name, strlen(val)))
6276 continue;
6277
6278 nr_matches++;
6279 match_fp = fp;
6280
6281 if (strcasecmp(val, fp->name) == 0) {
6282 nr_matches = 1;
6283 break;
6284 }
6285 }
6286
6287 if (!nr_matches) {
6288 *reason = "unknown value";
6289 return -EINVAL;
6290 }
6291 if (nr_matches > 1) {
6292 *reason = "ambiguous value";
6293 return -EINVAL;
6294 }
6295
6296 force_ent->param = *match_fp;
6297
6298 return 0;
6299 }
6300
6301 static void __init ata_parse_force_param(void)
6302 {
6303 int idx = 0, size = 1;
6304 int last_port = -1, last_device = -1;
6305 char *p, *cur, *next;
6306
6307
6308 for (p = ata_force_param_buf; *p; p++)
6309 if (*p == ',')
6310 size++;
6311
6312 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6313 if (!ata_force_tbl) {
6314 printk(KERN_WARNING "ata: failed to extend force table, "
6315 "libata.force ignored\n");
6316 return;
6317 }
6318
6319
6320 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6321 const char *reason = "";
6322 struct ata_force_ent te = { .port = -1, .device = -1 };
6323
6324 next = cur;
6325 if (ata_parse_force_one(&next, &te, &reason)) {
6326 printk(KERN_WARNING "ata: failed to parse force "
6327 "parameter \"%s\" (%s)\n",
6328 cur, reason);
6329 continue;
6330 }
6331
6332 if (te.port == -1) {
6333 te.port = last_port;
6334 te.device = last_device;
6335 }
6336
6337 ata_force_tbl[idx++] = te;
6338
6339 last_port = te.port;
6340 last_device = te.device;
6341 }
6342
6343 ata_force_tbl_size = idx;
6344 }
6345
6346 static void ata_free_force_param(void)
6347 {
6348 kfree(ata_force_tbl);
6349 }
6350 #else
6351 static inline void ata_parse_force_param(void) { }
6352 static inline void ata_free_force_param(void) { }
6353 #endif
6354
6355 static int __init ata_init(void)
6356 {
6357 int rc;
6358
6359 ata_parse_force_param();
6360
6361 rc = ata_sff_init();
6362 if (rc) {
6363 ata_free_force_param();
6364 return rc;
6365 }
6366
6367 libata_transport_init();
6368 ata_scsi_transport_template = ata_attach_transport();
6369 if (!ata_scsi_transport_template) {
6370 ata_sff_exit();
6371 rc = -ENOMEM;
6372 goto err_out;
6373 }
6374
6375 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6376 return 0;
6377
6378 err_out:
6379 return rc;
6380 }
6381
6382 static void __exit ata_exit(void)
6383 {
6384 ata_release_transport(ata_scsi_transport_template);
6385 libata_transport_exit();
6386 ata_sff_exit();
6387 ata_free_force_param();
6388 }
6389
6390 subsys_initcall(ata_init);
6391 module_exit(ata_exit);
6392
6393 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6394
6395 int ata_ratelimit(void)
6396 {
6397 return __ratelimit(&ratelimit);
6398 }
6399 EXPORT_SYMBOL_GPL(ata_ratelimit);
6400
6401
6402
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6416 {
6417 bool owns_eh = ap && ap->host->eh_owner == current;
6418
6419 if (owns_eh)
6420 ata_eh_release(ap);
6421
6422 if (msecs < 20) {
6423 unsigned long usecs = msecs * USEC_PER_MSEC;
6424 usleep_range(usecs, usecs + 50);
6425 } else {
6426 msleep(msecs);
6427 }
6428
6429 if (owns_eh)
6430 ata_eh_acquire(ap);
6431 }
6432 EXPORT_SYMBOL_GPL(ata_msleep);
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6459 unsigned long interval, unsigned long timeout)
6460 {
6461 unsigned long deadline;
6462 u32 tmp;
6463
6464 tmp = ioread32(reg);
6465
6466
6467
6468
6469
6470 deadline = ata_deadline(jiffies, timeout);
6471
6472 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6473 ata_msleep(ap, interval);
6474 tmp = ioread32(reg);
6475 }
6476
6477 return tmp;
6478 }
6479 EXPORT_SYMBOL_GPL(ata_wait_register);
6480
6481
6482
6483
6484 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6485 {
6486 return AC_ERR_SYSTEM;
6487 }
6488
6489 static void ata_dummy_error_handler(struct ata_port *ap)
6490 {
6491
6492 }
6493
6494 struct ata_port_operations ata_dummy_port_ops = {
6495 .qc_prep = ata_noop_qc_prep,
6496 .qc_issue = ata_dummy_qc_issue,
6497 .error_handler = ata_dummy_error_handler,
6498 .sched_eh = ata_std_sched_eh,
6499 .end_eh = ata_std_end_eh,
6500 };
6501 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6502
6503 const struct ata_port_info ata_dummy_port_info = {
6504 .port_ops = &ata_dummy_port_ops,
6505 };
6506 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6507
6508 void ata_print_version(const struct device *dev, const char *version)
6509 {
6510 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6511 }
6512 EXPORT_SYMBOL(ata_print_version);
6513
6514 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6515 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6516 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6517 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6518 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);