0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/compat.h>
0017 #include <linux/slab.h>
0018 #include <linux/kernel.h>
0019 #include <linux/blkdev.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/export.h>
0022 #include <scsi/scsi.h>
0023 #include <scsi/scsi_host.h>
0024 #include <scsi/scsi_cmnd.h>
0025 #include <scsi/scsi_eh.h>
0026 #include <scsi/scsi_device.h>
0027 #include <scsi/scsi_tcq.h>
0028 #include <scsi/scsi_transport.h>
0029 #include <linux/libata.h>
0030 #include <linux/hdreg.h>
0031 #include <linux/uaccess.h>
0032 #include <linux/suspend.h>
0033 #include <asm/unaligned.h>
0034 #include <linux/ioprio.h>
0035 #include <linux/of.h>
0036
0037 #include "libata.h"
0038 #include "libata-transport.h"
0039
0040 #define ATA_SCSI_RBUF_SIZE 576
0041
0042 static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
0043 static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
0044
0045 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
0046
0047 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
0048 const struct scsi_device *scsidev);
0049
0050 #define RW_RECOVERY_MPAGE 0x1
0051 #define RW_RECOVERY_MPAGE_LEN 12
0052 #define CACHE_MPAGE 0x8
0053 #define CACHE_MPAGE_LEN 20
0054 #define CONTROL_MPAGE 0xa
0055 #define CONTROL_MPAGE_LEN 12
0056 #define ALL_MPAGES 0x3f
0057 #define ALL_SUB_MPAGES 0xff
0058
0059
0060 static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
0061 RW_RECOVERY_MPAGE,
0062 RW_RECOVERY_MPAGE_LEN - 2,
0063 (1 << 7),
0064 0,
0065 0, 0, 0, 0,
0066 0,
0067 0, 0, 0
0068 };
0069
0070 static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
0071 CACHE_MPAGE,
0072 CACHE_MPAGE_LEN - 2,
0073 0,
0074 0, 0, 0, 0, 0, 0, 0, 0, 0,
0075 0,
0076 0, 0, 0, 0, 0, 0, 0
0077 };
0078
0079 static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
0080 CONTROL_MPAGE,
0081 CONTROL_MPAGE_LEN - 2,
0082 2,
0083 0,
0084 0, 0, 0, 0, 0xff, 0xff,
0085 0, 30
0086 };
0087
0088 static ssize_t ata_scsi_park_show(struct device *device,
0089 struct device_attribute *attr, char *buf)
0090 {
0091 struct scsi_device *sdev = to_scsi_device(device);
0092 struct ata_port *ap;
0093 struct ata_link *link;
0094 struct ata_device *dev;
0095 unsigned long now;
0096 unsigned int msecs;
0097 int rc = 0;
0098
0099 ap = ata_shost_to_port(sdev->host);
0100
0101 spin_lock_irq(ap->lock);
0102 dev = ata_scsi_find_dev(ap, sdev);
0103 if (!dev) {
0104 rc = -ENODEV;
0105 goto unlock;
0106 }
0107 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
0108 rc = -EOPNOTSUPP;
0109 goto unlock;
0110 }
0111
0112 link = dev->link;
0113 now = jiffies;
0114 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
0115 link->eh_context.unloaded_mask & (1 << dev->devno) &&
0116 time_after(dev->unpark_deadline, now))
0117 msecs = jiffies_to_msecs(dev->unpark_deadline - now);
0118 else
0119 msecs = 0;
0120
0121 unlock:
0122 spin_unlock_irq(ap->lock);
0123
0124 return rc ? rc : sysfs_emit(buf, "%u\n", msecs);
0125 }
0126
0127 static ssize_t ata_scsi_park_store(struct device *device,
0128 struct device_attribute *attr,
0129 const char *buf, size_t len)
0130 {
0131 struct scsi_device *sdev = to_scsi_device(device);
0132 struct ata_port *ap;
0133 struct ata_device *dev;
0134 long int input;
0135 unsigned long flags;
0136 int rc;
0137
0138 rc = kstrtol(buf, 10, &input);
0139 if (rc)
0140 return rc;
0141 if (input < -2)
0142 return -EINVAL;
0143 if (input > ATA_TMOUT_MAX_PARK) {
0144 rc = -EOVERFLOW;
0145 input = ATA_TMOUT_MAX_PARK;
0146 }
0147
0148 ap = ata_shost_to_port(sdev->host);
0149
0150 spin_lock_irqsave(ap->lock, flags);
0151 dev = ata_scsi_find_dev(ap, sdev);
0152 if (unlikely(!dev)) {
0153 rc = -ENODEV;
0154 goto unlock;
0155 }
0156 if (dev->class != ATA_DEV_ATA &&
0157 dev->class != ATA_DEV_ZAC) {
0158 rc = -EOPNOTSUPP;
0159 goto unlock;
0160 }
0161
0162 if (input >= 0) {
0163 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
0164 rc = -EOPNOTSUPP;
0165 goto unlock;
0166 }
0167
0168 dev->unpark_deadline = ata_deadline(jiffies, input);
0169 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
0170 ata_port_schedule_eh(ap);
0171 complete(&ap->park_req_pending);
0172 } else {
0173 switch (input) {
0174 case -1:
0175 dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
0176 break;
0177 case -2:
0178 dev->flags |= ATA_DFLAG_NO_UNLOAD;
0179 break;
0180 }
0181 }
0182 unlock:
0183 spin_unlock_irqrestore(ap->lock, flags);
0184
0185 return rc ? rc : len;
0186 }
0187 DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
0188 ata_scsi_park_show, ata_scsi_park_store);
0189 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
0190
0191 void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
0192 u8 sk, u8 asc, u8 ascq)
0193 {
0194 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
0195
0196 if (!cmd)
0197 return;
0198
0199 scsi_build_sense(cmd, d_sense, sk, asc, ascq);
0200 }
0201
0202 void ata_scsi_set_sense_information(struct ata_device *dev,
0203 struct scsi_cmnd *cmd,
0204 const struct ata_taskfile *tf)
0205 {
0206 u64 information;
0207
0208 if (!cmd)
0209 return;
0210
0211 information = ata_tf_read_block(tf, dev);
0212 if (information == U64_MAX)
0213 return;
0214
0215 scsi_set_sense_information(cmd->sense_buffer,
0216 SCSI_SENSE_BUFFERSIZE, information);
0217 }
0218
0219 static void ata_scsi_set_invalid_field(struct ata_device *dev,
0220 struct scsi_cmnd *cmd, u16 field, u8 bit)
0221 {
0222 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
0223
0224 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
0225 field, bit, 1);
0226 }
0227
0228 static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
0229 struct scsi_cmnd *cmd, u16 field)
0230 {
0231
0232 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0);
0233 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
0234 field, 0xff, 0);
0235 }
0236
0237 static struct attribute *ata_common_sdev_attrs[] = {
0238 &dev_attr_unload_heads.attr,
0239 NULL
0240 };
0241
0242 static const struct attribute_group ata_common_sdev_attr_group = {
0243 .attrs = ata_common_sdev_attrs
0244 };
0245
0246 const struct attribute_group *ata_common_sdev_groups[] = {
0247 &ata_common_sdev_attr_group,
0248 NULL
0249 };
0250 EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
0271 sector_t capacity, int geom[])
0272 {
0273 geom[0] = 255;
0274 geom[1] = 63;
0275 sector_div(capacity, 255*63);
0276 geom[2] = capacity;
0277
0278 return 0;
0279 }
0280 EXPORT_SYMBOL_GPL(ata_std_bios_param);
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
0293 {
0294 struct ata_port *ap = ata_shost_to_port(sdev->host);
0295 struct ata_device *dev;
0296 unsigned long flags;
0297
0298 spin_lock_irqsave(ap->lock, flags);
0299
0300 dev = ata_scsi_find_dev(ap, sdev);
0301 if (dev && dev->n_sectors < dev->n_native_sectors) {
0302 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
0303 dev->link->eh_info.action |= ATA_EH_RESET;
0304 ata_port_schedule_eh(ap);
0305 }
0306
0307 spin_unlock_irqrestore(ap->lock, flags);
0308 ata_port_wait_eh(ap);
0309 }
0310 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
0325 void __user *arg)
0326 {
0327 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
0328 u16 __user *dst = arg;
0329 char buf[40];
0330
0331 if (!dev)
0332 return -ENOMSG;
0333
0334 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
0335 return -EFAULT;
0336
0337 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
0338 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
0339 return -EFAULT;
0340
0341 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
0342 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
0343 return -EFAULT;
0344
0345 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
0346 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
0347 return -EFAULT;
0348
0349 return 0;
0350 }
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
0364 {
0365 int rc = 0;
0366 u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
0367 u8 scsi_cmd[MAX_COMMAND_SIZE];
0368 u8 args[4], *argbuf = NULL;
0369 int argsize = 0;
0370 enum dma_data_direction data_dir;
0371 struct scsi_sense_hdr sshdr;
0372 int cmd_result;
0373
0374 if (arg == NULL)
0375 return -EINVAL;
0376
0377 if (copy_from_user(args, arg, sizeof(args)))
0378 return -EFAULT;
0379
0380 memset(sensebuf, 0, sizeof(sensebuf));
0381 memset(scsi_cmd, 0, sizeof(scsi_cmd));
0382
0383 if (args[3]) {
0384 argsize = ATA_SECT_SIZE * args[3];
0385 argbuf = kmalloc(argsize, GFP_KERNEL);
0386 if (argbuf == NULL) {
0387 rc = -ENOMEM;
0388 goto error;
0389 }
0390
0391 scsi_cmd[1] = (4 << 1);
0392 scsi_cmd[2] = 0x0e;
0393
0394 data_dir = DMA_FROM_DEVICE;
0395 } else {
0396 scsi_cmd[1] = (3 << 1);
0397 scsi_cmd[2] = 0x20;
0398 data_dir = DMA_NONE;
0399 }
0400
0401 scsi_cmd[0] = ATA_16;
0402
0403 scsi_cmd[4] = args[2];
0404 if (args[0] == ATA_CMD_SMART) {
0405 scsi_cmd[6] = args[3];
0406 scsi_cmd[8] = args[1];
0407 scsi_cmd[10] = ATA_SMART_LBAM_PASS;
0408 scsi_cmd[12] = ATA_SMART_LBAH_PASS;
0409 } else {
0410 scsi_cmd[6] = args[1];
0411 }
0412 scsi_cmd[14] = args[0];
0413
0414
0415
0416 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
0417 sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
0418
0419 if (cmd_result < 0) {
0420 rc = cmd_result;
0421 goto error;
0422 }
0423 if (scsi_sense_valid(&sshdr)) {
0424 u8 *desc = sensebuf + 8;
0425
0426
0427
0428 if (scsi_status_is_check_condition(cmd_result)) {
0429 if (sshdr.sense_key == RECOVERED_ERROR &&
0430 sshdr.asc == 0 && sshdr.ascq == 0x1d)
0431 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
0432 }
0433
0434
0435 if (sensebuf[0] == 0x72 &&
0436 desc[0] == 0x09) {
0437 args[0] = desc[13];
0438 args[1] = desc[3];
0439 args[2] = desc[5];
0440 if (copy_to_user(arg, args, sizeof(args)))
0441 rc = -EFAULT;
0442 }
0443 }
0444
0445
0446 if (cmd_result) {
0447 rc = -EIO;
0448 goto error;
0449 }
0450
0451 if ((argbuf)
0452 && copy_to_user(arg + sizeof(args), argbuf, argsize))
0453 rc = -EFAULT;
0454 error:
0455 kfree(argbuf);
0456 return rc;
0457 }
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470 int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
0471 {
0472 int rc = 0;
0473 u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
0474 u8 scsi_cmd[MAX_COMMAND_SIZE];
0475 u8 args[7];
0476 struct scsi_sense_hdr sshdr;
0477 int cmd_result;
0478
0479 if (arg == NULL)
0480 return -EINVAL;
0481
0482 if (copy_from_user(args, arg, sizeof(args)))
0483 return -EFAULT;
0484
0485 memset(sensebuf, 0, sizeof(sensebuf));
0486 memset(scsi_cmd, 0, sizeof(scsi_cmd));
0487 scsi_cmd[0] = ATA_16;
0488 scsi_cmd[1] = (3 << 1);
0489 scsi_cmd[2] = 0x20;
0490 scsi_cmd[4] = args[1];
0491 scsi_cmd[6] = args[2];
0492 scsi_cmd[8] = args[3];
0493 scsi_cmd[10] = args[4];
0494 scsi_cmd[12] = args[5];
0495 scsi_cmd[13] = args[6] & 0x4f;
0496 scsi_cmd[14] = args[0];
0497
0498
0499
0500 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
0501 sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
0502
0503 if (cmd_result < 0) {
0504 rc = cmd_result;
0505 goto error;
0506 }
0507 if (scsi_sense_valid(&sshdr)) {
0508 u8 *desc = sensebuf + 8;
0509
0510
0511
0512 if (cmd_result & SAM_STAT_CHECK_CONDITION) {
0513 if (sshdr.sense_key == RECOVERED_ERROR &&
0514 sshdr.asc == 0 && sshdr.ascq == 0x1d)
0515 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
0516 }
0517
0518
0519 if (sensebuf[0] == 0x72 &&
0520 desc[0] == 0x09) {
0521 args[0] = desc[13];
0522 args[1] = desc[3];
0523 args[2] = desc[5];
0524 args[3] = desc[7];
0525 args[4] = desc[9];
0526 args[5] = desc[11];
0527 args[6] = desc[12];
0528 if (copy_to_user(arg, args, sizeof(args)))
0529 rc = -EFAULT;
0530 }
0531 }
0532
0533 if (cmd_result) {
0534 rc = -EIO;
0535 goto error;
0536 }
0537
0538 error:
0539 return rc;
0540 }
0541
0542 static bool ata_ioc32(struct ata_port *ap)
0543 {
0544 if (ap->flags & ATA_FLAG_PIO_DMA)
0545 return true;
0546 if (ap->pflags & ATA_PFLAG_PIO32)
0547 return true;
0548 return false;
0549 }
0550
0551
0552
0553
0554
0555 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
0556 unsigned int cmd, void __user *arg)
0557 {
0558 unsigned long val;
0559 int rc = -EINVAL;
0560 unsigned long flags;
0561
0562 switch (cmd) {
0563 case HDIO_GET_32BIT:
0564 spin_lock_irqsave(ap->lock, flags);
0565 val = ata_ioc32(ap);
0566 spin_unlock_irqrestore(ap->lock, flags);
0567 #ifdef CONFIG_COMPAT
0568 if (in_compat_syscall())
0569 return put_user(val, (compat_ulong_t __user *)arg);
0570 #endif
0571 return put_user(val, (unsigned long __user *)arg);
0572
0573 case HDIO_SET_32BIT:
0574 val = (unsigned long) arg;
0575 rc = 0;
0576 spin_lock_irqsave(ap->lock, flags);
0577 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
0578 if (val)
0579 ap->pflags |= ATA_PFLAG_PIO32;
0580 else
0581 ap->pflags &= ~ATA_PFLAG_PIO32;
0582 } else {
0583 if (val != ata_ioc32(ap))
0584 rc = -EINVAL;
0585 }
0586 spin_unlock_irqrestore(ap->lock, flags);
0587 return rc;
0588
0589 case HDIO_GET_IDENTITY:
0590 return ata_get_identity(ap, scsidev, arg);
0591
0592 case HDIO_DRIVE_CMD:
0593 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
0594 return -EACCES;
0595 return ata_cmd_ioctl(scsidev, arg);
0596
0597 case HDIO_DRIVE_TASK:
0598 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
0599 return -EACCES;
0600 return ata_task_ioctl(scsidev, arg);
0601
0602 default:
0603 rc = -ENOTTY;
0604 break;
0605 }
0606
0607 return rc;
0608 }
0609 EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
0610
0611 int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd,
0612 void __user *arg)
0613 {
0614 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
0615 scsidev, cmd, arg);
0616 }
0617 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
0639 struct scsi_cmnd *cmd)
0640 {
0641 struct ata_port *ap = dev->link->ap;
0642 struct ata_queued_cmd *qc;
0643 int tag;
0644
0645 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
0646 goto fail;
0647
0648 if (ap->flags & ATA_FLAG_SAS_HOST) {
0649
0650
0651
0652
0653 if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE))
0654 goto fail;
0655 tag = cmd->budget_token;
0656 } else {
0657 tag = scsi_cmd_to_rq(cmd)->tag;
0658 }
0659
0660 qc = __ata_qc_from_tag(ap, tag);
0661 qc->tag = qc->hw_tag = tag;
0662 qc->ap = ap;
0663 qc->dev = dev;
0664
0665 ata_qc_reinit(qc);
0666
0667 qc->scsicmd = cmd;
0668 qc->scsidone = scsi_done;
0669
0670 qc->sg = scsi_sglist(cmd);
0671 qc->n_elem = scsi_sg_count(cmd);
0672
0673 if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
0674 qc->flags |= ATA_QCFLAG_QUIET;
0675
0676 return qc;
0677
0678 fail:
0679 set_host_byte(cmd, DID_OK);
0680 set_status_byte(cmd, SAM_STAT_TASK_SET_FULL);
0681 scsi_done(cmd);
0682 return NULL;
0683 }
0684
0685 static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
0686 {
0687 struct scsi_cmnd *scmd = qc->scsicmd;
0688
0689 qc->extrabytes = scmd->extra_len;
0690 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
0691 }
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
0706 {
0707 u8 stat = tf->status, err = tf->error;
0708
0709 if (stat & ATA_BUSY) {
0710 ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
0711 } else {
0712 ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat,
0713 stat & ATA_DRDY ? "DriveReady " : "",
0714 stat & ATA_DF ? "DeviceFault " : "",
0715 stat & ATA_DSC ? "SeekComplete " : "",
0716 stat & ATA_DRQ ? "DataRequest " : "",
0717 stat & ATA_CORR ? "CorrectedError " : "",
0718 stat & ATA_SENSE ? "Sense " : "",
0719 stat & ATA_ERR ? "Error " : "");
0720 if (err)
0721 ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err,
0722 err & ATA_ABORTED ?
0723 "DriveStatusError " : "",
0724 err & ATA_ICRC ?
0725 (err & ATA_ABORTED ?
0726 "BadCRC " : "Sector ") : "",
0727 err & ATA_UNC ? "UncorrectableError " : "",
0728 err & ATA_IDNF ? "SectorIdNotFound " : "",
0729 err & ATA_TRK0NF ? "TrackZeroNotFound " : "",
0730 err & ATA_AMNF ? "AddrMarkNotFound " : "");
0731 }
0732 }
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751 static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
0752 u8 *asc, u8 *ascq, int verbose)
0753 {
0754 int i;
0755
0756
0757 static const unsigned char sense_table[][4] = {
0758
0759 {0xd1, ABORTED_COMMAND, 0x00, 0x00},
0760
0761
0762 {0xd0, ABORTED_COMMAND, 0x00, 0x00},
0763
0764
0765 {0x61, HARDWARE_ERROR, 0x00, 0x00},
0766
0767
0768 {0x84, ABORTED_COMMAND, 0x47, 0x00},
0769
0770
0771 {0x37, NOT_READY, 0x04, 0x00},
0772
0773
0774 {0x09, NOT_READY, 0x04, 0x00},
0775
0776
0777 {0x01, MEDIUM_ERROR, 0x13, 0x00},
0778
0779
0780 {0x02, HARDWARE_ERROR, 0x00, 0x00},
0781
0782
0783
0784 {0x08, NOT_READY, 0x04, 0x00},
0785
0786
0787 {0x10, ILLEGAL_REQUEST, 0x21, 0x00},
0788
0789
0790 {0x20, UNIT_ATTENTION, 0x28, 0x00},
0791
0792
0793 {0x40, MEDIUM_ERROR, 0x11, 0x04},
0794
0795
0796 {0x80, MEDIUM_ERROR, 0x11, 0x04},
0797
0798 {0xFF, 0xFF, 0xFF, 0xFF},
0799 };
0800 static const unsigned char stat_table[][4] = {
0801
0802 {0x80, ABORTED_COMMAND, 0x47, 0x00},
0803
0804 {0x40, ILLEGAL_REQUEST, 0x21, 0x04},
0805
0806 {0x20, HARDWARE_ERROR, 0x44, 0x00},
0807
0808 {0x08, ABORTED_COMMAND, 0x47, 0x00},
0809
0810 {0x04, RECOVERED_ERROR, 0x11, 0x00},
0811
0812 {0xFF, 0xFF, 0xFF, 0xFF},
0813 };
0814
0815
0816
0817
0818 if (drv_stat & ATA_BUSY) {
0819 drv_err = 0;
0820 }
0821
0822 if (drv_err) {
0823
0824 for (i = 0; sense_table[i][0] != 0xFF; i++) {
0825
0826 if ((sense_table[i][0] & drv_err) ==
0827 sense_table[i][0]) {
0828 *sk = sense_table[i][1];
0829 *asc = sense_table[i][2];
0830 *ascq = sense_table[i][3];
0831 goto translate_done;
0832 }
0833 }
0834 }
0835
0836
0837
0838
0839
0840
0841 for (i = 0; stat_table[i][0] != 0xFF; i++) {
0842 if (stat_table[i][0] & drv_stat) {
0843 *sk = stat_table[i][1];
0844 *asc = stat_table[i][2];
0845 *ascq = stat_table[i][3];
0846 goto translate_done;
0847 }
0848 }
0849
0850
0851
0852
0853
0854 *sk = ABORTED_COMMAND;
0855 *asc = 0x00;
0856 *ascq = 0x00;
0857
0858 translate_done:
0859 if (verbose)
0860 pr_err("ata%u: translated ATA stat/err 0x%02x/%02x to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
0861 id, drv_stat, drv_err, *sk, *asc, *ascq);
0862 return;
0863 }
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882 static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
0883 {
0884 struct scsi_cmnd *cmd = qc->scsicmd;
0885 struct ata_taskfile *tf = &qc->result_tf;
0886 unsigned char *sb = cmd->sense_buffer;
0887 unsigned char *desc = sb + 8;
0888 int verbose = qc->ap->ops->error_handler == NULL;
0889 u8 sense_key, asc, ascq;
0890
0891 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
0892
0893
0894
0895
0896
0897 if (qc->err_mask ||
0898 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
0899 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
0900 &sense_key, &asc, &ascq, verbose);
0901 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
0902 } else {
0903
0904
0905
0906
0907 scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
0908 }
0909
0910 if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
0911 u8 len;
0912
0913
0914 len = sb[7];
0915 desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
0916 if (!desc) {
0917 if (SCSI_SENSE_BUFFERSIZE < len + 14)
0918 return;
0919 sb[7] = len + 14;
0920 desc = sb + 8 + len;
0921 }
0922 desc[0] = 9;
0923 desc[1] = 12;
0924
0925
0926
0927 desc[2] = 0x00;
0928 desc[3] = tf->error;
0929 desc[5] = tf->nsect;
0930 desc[7] = tf->lbal;
0931 desc[9] = tf->lbam;
0932 desc[11] = tf->lbah;
0933 desc[12] = tf->device;
0934 desc[13] = tf->status;
0935
0936
0937
0938
0939
0940 if (tf->flags & ATA_TFLAG_LBA48) {
0941 desc[2] |= 0x01;
0942 desc[4] = tf->hob_nsect;
0943 desc[6] = tf->hob_lbal;
0944 desc[8] = tf->hob_lbam;
0945 desc[10] = tf->hob_lbah;
0946 }
0947 } else {
0948
0949 desc[0] = tf->error;
0950 desc[1] = tf->status;
0951 desc[2] = tf->device;
0952 desc[3] = tf->nsect;
0953 desc[7] = 0;
0954 if (tf->flags & ATA_TFLAG_LBA48) {
0955 desc[8] |= 0x80;
0956 if (tf->hob_nsect)
0957 desc[8] |= 0x40;
0958 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
0959 desc[8] |= 0x20;
0960 }
0961 desc[9] = tf->lbal;
0962 desc[10] = tf->lbam;
0963 desc[11] = tf->lbah;
0964 }
0965 }
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
0978 {
0979 struct ata_device *dev = qc->dev;
0980 struct scsi_cmnd *cmd = qc->scsicmd;
0981 struct ata_taskfile *tf = &qc->result_tf;
0982 unsigned char *sb = cmd->sense_buffer;
0983 int verbose = qc->ap->ops->error_handler == NULL;
0984 u64 block;
0985 u8 sense_key, asc, ascq;
0986
0987 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
0988
0989 if (ata_dev_disabled(dev)) {
0990
0991
0992 ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
0993 return;
0994 }
0995
0996
0997
0998 if (qc->err_mask ||
0999 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
1000 ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
1001 &sense_key, &asc, &ascq, verbose);
1002 ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
1003 } else {
1004
1005 ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
1006 tf->status, qc->err_mask);
1007 ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
1008 return;
1009 }
1010
1011 block = ata_tf_read_block(&qc->result_tf, dev);
1012 if (block == U64_MAX)
1013 return;
1014
1015 scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
1016 }
1017
1018 void ata_scsi_sdev_config(struct scsi_device *sdev)
1019 {
1020 sdev->use_10_for_rw = 1;
1021 sdev->use_10_for_ms = 1;
1022 sdev->no_write_same = 1;
1023
1024
1025
1026
1027
1028
1029 sdev->max_device_blocked = 1;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 bool ata_scsi_dma_need_drain(struct request *rq)
1048 {
1049 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
1050
1051 return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC;
1052 }
1053 EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
1054
1055 int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
1056 {
1057 struct request_queue *q = sdev->request_queue;
1058 int depth = 1;
1059
1060 if (!ata_id_has_unload(dev->id))
1061 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1062
1063
1064 dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
1065 blk_queue_max_hw_sectors(q, dev->max_sectors);
1066
1067 if (dev->class == ATA_DEV_ATAPI) {
1068 sdev->sector_size = ATA_SECT_SIZE;
1069
1070
1071 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
1072
1073
1074 blk_queue_max_segments(q, queue_max_segments(q) - 1);
1075
1076 sdev->dma_drain_len = ATAPI_MAX_DRAIN;
1077 sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
1078 if (!sdev->dma_drain_buf) {
1079 ata_dev_err(dev, "drain buffer allocation failed\n");
1080 return -ENOMEM;
1081 }
1082 } else {
1083 sdev->sector_size = ata_id_logical_sector_size(dev->id);
1084 sdev->manage_start_stop = 1;
1085 }
1086
1087
1088
1089
1090
1091
1092
1093
1094 if (sdev->sector_size > PAGE_SIZE)
1095 ata_dev_warn(dev,
1096 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1097 sdev->sector_size);
1098
1099 blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
1100
1101 if (dev->flags & ATA_DFLAG_AN)
1102 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1103
1104 if (dev->flags & ATA_DFLAG_NCQ)
1105 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1106 depth = min(ATA_MAX_QUEUE, depth);
1107 scsi_change_queue_depth(sdev, depth);
1108
1109 if (dev->flags & ATA_DFLAG_TRUSTED)
1110 sdev->security_supported = 1;
1111
1112 dev->sdev = sdev;
1113 return 0;
1114 }
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 int ata_scsi_slave_config(struct scsi_device *sdev)
1129 {
1130 struct ata_port *ap = ata_shost_to_port(sdev->host);
1131 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
1132 int rc = 0;
1133
1134 ata_scsi_sdev_config(sdev);
1135
1136 if (dev)
1137 rc = ata_scsi_dev_config(sdev, dev);
1138
1139 return rc;
1140 }
1141 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 void ata_scsi_slave_destroy(struct scsi_device *sdev)
1158 {
1159 struct ata_port *ap = ata_shost_to_port(sdev->host);
1160 unsigned long flags;
1161 struct ata_device *dev;
1162
1163 if (!ap->ops->error_handler)
1164 return;
1165
1166 spin_lock_irqsave(ap->lock, flags);
1167 dev = __ata_scsi_find_dev(ap, sdev);
1168 if (dev && dev->sdev) {
1169
1170 dev->sdev = NULL;
1171 dev->flags |= ATA_DFLAG_DETACH;
1172 ata_port_schedule_eh(ap);
1173 }
1174 spin_unlock_irqrestore(ap->lock, flags);
1175
1176 kfree(sdev->dma_drain_buf);
1177 }
1178 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1196 {
1197 struct scsi_cmnd *scmd = qc->scsicmd;
1198 struct ata_taskfile *tf = &qc->tf;
1199 const u8 *cdb = scmd->cmnd;
1200 u16 fp;
1201 u8 bp = 0xff;
1202
1203 if (scmd->cmd_len < 5) {
1204 fp = 4;
1205 goto invalid_fld;
1206 }
1207
1208 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1209 tf->protocol = ATA_PROT_NODATA;
1210 if (cdb[1] & 0x1) {
1211 ;
1212 }
1213 if (cdb[4] & 0x2) {
1214 fp = 4;
1215 bp = 1;
1216 goto invalid_fld;
1217 }
1218 if (((cdb[4] >> 4) & 0xf) != 0) {
1219 fp = 4;
1220 bp = 3;
1221 goto invalid_fld;
1222 }
1223
1224 if (cdb[4] & 0x1) {
1225 tf->nsect = 1;
1226
1227 if (qc->dev->flags & ATA_DFLAG_LBA) {
1228 tf->flags |= ATA_TFLAG_LBA;
1229
1230 tf->lbah = 0x0;
1231 tf->lbam = 0x0;
1232 tf->lbal = 0x0;
1233 tf->device |= ATA_LBA;
1234 } else {
1235
1236 tf->lbal = 0x1;
1237 tf->lbam = 0x0;
1238 tf->lbah = 0x0;
1239 }
1240
1241 tf->command = ATA_CMD_VERIFY;
1242 } else {
1243
1244
1245
1246 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
1247 system_state == SYSTEM_POWER_OFF)
1248 goto skip;
1249
1250 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
1251 system_entering_hibernation())
1252 goto skip;
1253
1254
1255 tf->command = ATA_CMD_STANDBYNOW1;
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265 return 0;
1266
1267 invalid_fld:
1268 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
1269 return 1;
1270 skip:
1271 scmd->result = SAM_STAT_GOOD;
1272 return 1;
1273 }
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
1290 {
1291 struct ata_taskfile *tf = &qc->tf;
1292
1293 tf->flags |= ATA_TFLAG_DEVICE;
1294 tf->protocol = ATA_PROT_NODATA;
1295
1296 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1297 tf->command = ATA_CMD_FLUSH_EXT;
1298 else
1299 tf->command = ATA_CMD_FLUSH;
1300
1301
1302 qc->flags |= ATA_QCFLAG_IO;
1303
1304 return 0;
1305 }
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1318 {
1319 u64 lba = 0;
1320 u32 len;
1321
1322 lba |= ((u64)(cdb[1] & 0x1f)) << 16;
1323 lba |= ((u64)cdb[2]) << 8;
1324 lba |= ((u64)cdb[3]);
1325
1326 len = cdb[4];
1327
1328 *plba = lba;
1329 *plen = len;
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 static inline void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1343 {
1344 *plba = get_unaligned_be32(&cdb[2]);
1345 *plen = get_unaligned_be16(&cdb[7]);
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1359 {
1360 *plba = get_unaligned_be64(&cdb[2]);
1361 *plen = get_unaligned_be32(&cdb[10]);
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
1377 {
1378 struct scsi_cmnd *scmd = qc->scsicmd;
1379 struct ata_taskfile *tf = &qc->tf;
1380 struct ata_device *dev = qc->dev;
1381 u64 dev_sectors = qc->dev->n_sectors;
1382 const u8 *cdb = scmd->cmnd;
1383 u64 block;
1384 u32 n_block;
1385 u16 fp;
1386
1387 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1388 tf->protocol = ATA_PROT_NODATA;
1389
1390 switch (cdb[0]) {
1391 case VERIFY:
1392 if (scmd->cmd_len < 10) {
1393 fp = 9;
1394 goto invalid_fld;
1395 }
1396 scsi_10_lba_len(cdb, &block, &n_block);
1397 break;
1398 case VERIFY_16:
1399 if (scmd->cmd_len < 16) {
1400 fp = 15;
1401 goto invalid_fld;
1402 }
1403 scsi_16_lba_len(cdb, &block, &n_block);
1404 break;
1405 default:
1406 fp = 0;
1407 goto invalid_fld;
1408 }
1409
1410 if (!n_block)
1411 goto nothing_to_do;
1412 if (block >= dev_sectors)
1413 goto out_of_range;
1414 if ((block + n_block) > dev_sectors)
1415 goto out_of_range;
1416
1417 if (dev->flags & ATA_DFLAG_LBA) {
1418 tf->flags |= ATA_TFLAG_LBA;
1419
1420 if (lba_28_ok(block, n_block)) {
1421
1422 tf->command = ATA_CMD_VERIFY;
1423 tf->device |= (block >> 24) & 0xf;
1424 } else if (lba_48_ok(block, n_block)) {
1425 if (!(dev->flags & ATA_DFLAG_LBA48))
1426 goto out_of_range;
1427
1428
1429 tf->flags |= ATA_TFLAG_LBA48;
1430 tf->command = ATA_CMD_VERIFY_EXT;
1431
1432 tf->hob_nsect = (n_block >> 8) & 0xff;
1433
1434 tf->hob_lbah = (block >> 40) & 0xff;
1435 tf->hob_lbam = (block >> 32) & 0xff;
1436 tf->hob_lbal = (block >> 24) & 0xff;
1437 } else
1438
1439 goto out_of_range;
1440
1441 tf->nsect = n_block & 0xff;
1442
1443 tf->lbah = (block >> 16) & 0xff;
1444 tf->lbam = (block >> 8) & 0xff;
1445 tf->lbal = block & 0xff;
1446
1447 tf->device |= ATA_LBA;
1448 } else {
1449
1450 u32 sect, head, cyl, track;
1451
1452 if (!lba_28_ok(block, n_block))
1453 goto out_of_range;
1454
1455
1456 track = (u32)block / dev->sectors;
1457 cyl = track / dev->heads;
1458 head = track % dev->heads;
1459 sect = (u32)block % dev->sectors + 1;
1460
1461
1462
1463
1464
1465 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1466 goto out_of_range;
1467
1468 tf->command = ATA_CMD_VERIFY;
1469 tf->nsect = n_block & 0xff;
1470 tf->lbal = sect;
1471 tf->lbam = cyl;
1472 tf->lbah = cyl >> 8;
1473 tf->device |= head;
1474 }
1475
1476 return 0;
1477
1478 invalid_fld:
1479 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
1480 return 1;
1481
1482 out_of_range:
1483 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1484
1485 return 1;
1486
1487 nothing_to_do:
1488 scmd->result = SAM_STAT_GOOD;
1489 return 1;
1490 }
1491
1492 static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
1493 {
1494 struct request *rq = scsi_cmd_to_rq(scmd);
1495 u32 req_blocks;
1496
1497 if (!blk_rq_is_passthrough(rq))
1498 return true;
1499
1500 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
1501 if (n_blocks > req_blocks)
1502 return false;
1503
1504 return true;
1505 }
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1526 {
1527 struct scsi_cmnd *scmd = qc->scsicmd;
1528 const u8 *cdb = scmd->cmnd;
1529 struct request *rq = scsi_cmd_to_rq(scmd);
1530 int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
1531 unsigned int tf_flags = 0;
1532 u64 block;
1533 u32 n_block;
1534 int rc;
1535 u16 fp = 0;
1536
1537 switch (cdb[0]) {
1538 case WRITE_6:
1539 case WRITE_10:
1540 case WRITE_16:
1541 tf_flags |= ATA_TFLAG_WRITE;
1542 break;
1543 }
1544
1545
1546 switch (cdb[0]) {
1547 case READ_10:
1548 case WRITE_10:
1549 if (unlikely(scmd->cmd_len < 10)) {
1550 fp = 9;
1551 goto invalid_fld;
1552 }
1553 scsi_10_lba_len(cdb, &block, &n_block);
1554 if (cdb[1] & (1 << 3))
1555 tf_flags |= ATA_TFLAG_FUA;
1556 if (!ata_check_nblocks(scmd, n_block))
1557 goto invalid_fld;
1558 break;
1559 case READ_6:
1560 case WRITE_6:
1561 if (unlikely(scmd->cmd_len < 6)) {
1562 fp = 5;
1563 goto invalid_fld;
1564 }
1565 scsi_6_lba_len(cdb, &block, &n_block);
1566
1567
1568
1569
1570 if (!n_block)
1571 n_block = 256;
1572 if (!ata_check_nblocks(scmd, n_block))
1573 goto invalid_fld;
1574 break;
1575 case READ_16:
1576 case WRITE_16:
1577 if (unlikely(scmd->cmd_len < 16)) {
1578 fp = 15;
1579 goto invalid_fld;
1580 }
1581 scsi_16_lba_len(cdb, &block, &n_block);
1582 if (cdb[1] & (1 << 3))
1583 tf_flags |= ATA_TFLAG_FUA;
1584 if (!ata_check_nblocks(scmd, n_block))
1585 goto invalid_fld;
1586 break;
1587 default:
1588 fp = 0;
1589 goto invalid_fld;
1590 }
1591
1592
1593 if (!n_block)
1594
1595
1596
1597
1598
1599
1600
1601 goto nothing_to_do;
1602
1603 qc->flags |= ATA_QCFLAG_IO;
1604 qc->nbytes = n_block * scmd->device->sector_size;
1605
1606 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1607 qc->hw_tag, class);
1608
1609 if (likely(rc == 0))
1610 return 0;
1611
1612 if (rc == -ERANGE)
1613 goto out_of_range;
1614
1615 invalid_fld:
1616 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
1617 return 1;
1618
1619 out_of_range:
1620 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1621
1622 return 1;
1623
1624 nothing_to_do:
1625 scmd->result = SAM_STAT_GOOD;
1626 return 1;
1627 }
1628
1629 static void ata_qc_done(struct ata_queued_cmd *qc)
1630 {
1631 struct scsi_cmnd *cmd = qc->scsicmd;
1632 void (*done)(struct scsi_cmnd *) = qc->scsidone;
1633
1634 ata_qc_free(qc);
1635 done(cmd);
1636 }
1637
1638 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1639 {
1640 struct ata_port *ap = qc->ap;
1641 struct scsi_cmnd *cmd = qc->scsicmd;
1642 u8 *cdb = cmd->cmnd;
1643 int need_sense = (qc->err_mask != 0);
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1655 ((cdb[2] & 0x20) || need_sense))
1656 ata_gen_passthru_sense(qc);
1657 else if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1658 cmd->result = SAM_STAT_CHECK_CONDITION;
1659 else if (need_sense)
1660 ata_gen_ata_sense(qc);
1661 else
1662 cmd->result = SAM_STAT_GOOD;
1663
1664 if (need_sense && !ap->ops->error_handler)
1665 ata_dump_status(ap, &qc->result_tf);
1666
1667 ata_qc_done(qc);
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696 static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1697 ata_xlat_func_t xlat_func)
1698 {
1699 struct ata_port *ap = dev->link->ap;
1700 struct ata_queued_cmd *qc;
1701 int rc;
1702
1703 qc = ata_scsi_qc_new(dev, cmd);
1704 if (!qc)
1705 goto err_mem;
1706
1707
1708 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1709 cmd->sc_data_direction == DMA_TO_DEVICE) {
1710 if (unlikely(scsi_bufflen(cmd) < 1)) {
1711 ata_dev_warn(dev, "WARNING: zero len r/w req\n");
1712 goto err_did;
1713 }
1714
1715 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1716
1717 qc->dma_dir = cmd->sc_data_direction;
1718 }
1719
1720 qc->complete_fn = ata_scsi_qc_complete;
1721
1722 if (xlat_func(qc))
1723 goto early_finish;
1724
1725 if (ap->ops->qc_defer) {
1726 if ((rc = ap->ops->qc_defer(qc)))
1727 goto defer;
1728 }
1729
1730
1731 ata_qc_issue(qc);
1732
1733 return 0;
1734
1735 early_finish:
1736 ata_qc_free(qc);
1737 scsi_done(cmd);
1738 return 0;
1739
1740 err_did:
1741 ata_qc_free(qc);
1742 cmd->result = (DID_ERROR << 16);
1743 scsi_done(cmd);
1744 err_mem:
1745 return 0;
1746
1747 defer:
1748 ata_qc_free(qc);
1749 if (rc == ATA_DEFER_LINK)
1750 return SCSI_MLQUEUE_DEVICE_BUSY;
1751 else
1752 return SCSI_MLQUEUE_HOST_BUSY;
1753 }
1754
1755 struct ata_scsi_args {
1756 struct ata_device *dev;
1757 u16 *id;
1758 struct scsi_cmnd *cmd;
1759 };
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776 static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1777 unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
1778 {
1779 unsigned int rc;
1780 struct scsi_cmnd *cmd = args->cmd;
1781 unsigned long flags;
1782
1783 spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
1784
1785 memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
1786 rc = actor(args, ata_scsi_rbuf);
1787 if (rc == 0)
1788 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1789 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1790
1791 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
1792
1793 if (rc == 0)
1794 cmd->result = SAM_STAT_GOOD;
1795 }
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1809 {
1810 static const u8 versions[] = {
1811 0x00,
1812 0x60,
1813
1814 0x03,
1815 0x20,
1816
1817 0x03,
1818 0x00
1819 };
1820 static const u8 versions_zbc[] = {
1821 0x00,
1822 0xA0,
1823
1824 0x06,
1825 0x00,
1826
1827 0x05,
1828 0xC0,
1829
1830 0x60,
1831 0x24,
1832 };
1833
1834 u8 hdr[] = {
1835 TYPE_DISK,
1836 0,
1837 0x5,
1838 2,
1839 95 - 4,
1840 0,
1841 0,
1842 2
1843 };
1844
1845
1846
1847
1848 if (ata_id_removable(args->id) ||
1849 (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
1850 hdr[1] |= (1 << 7);
1851
1852 if (args->dev->class == ATA_DEV_ZAC) {
1853 hdr[0] = TYPE_ZBC;
1854 hdr[2] = 0x7;
1855 }
1856
1857 memcpy(rbuf, hdr, sizeof(hdr));
1858 memcpy(&rbuf[8], "ATA ", 8);
1859 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1860
1861
1862 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
1863 if (strncmp(&rbuf[32], " ", 4) == 0)
1864 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1865
1866 if (rbuf[32] == 0 || rbuf[32] == ' ')
1867 memcpy(&rbuf[32], "n/a ", 4);
1868
1869 if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
1870 memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
1871 else
1872 memcpy(rbuf + 58, versions, sizeof(versions));
1873
1874 return 0;
1875 }
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887 static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
1888 {
1889 int i, num_pages = 0;
1890 static const u8 pages[] = {
1891 0x00,
1892 0x80,
1893 0x83,
1894 0x89,
1895 0xb0,
1896 0xb1,
1897 0xb2,
1898 0xb6,
1899 0xb9,
1900 };
1901
1902 for (i = 0; i < sizeof(pages); i++) {
1903 if (pages[i] == 0xb6 &&
1904 !(args->dev->flags & ATA_DFLAG_ZAC))
1905 continue;
1906 rbuf[num_pages + 4] = pages[i];
1907 num_pages++;
1908 }
1909 rbuf[3] = num_pages;
1910 return 0;
1911 }
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
1924 {
1925 static const u8 hdr[] = {
1926 0,
1927 0x80,
1928 0,
1929 ATA_ID_SERNO_LEN,
1930 };
1931
1932 memcpy(rbuf, hdr, sizeof(hdr));
1933 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1934 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1935 return 0;
1936 }
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951 static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
1952 {
1953 const int sat_model_serial_desc_len = 68;
1954 int num;
1955
1956 rbuf[1] = 0x83;
1957 num = 4;
1958
1959
1960 rbuf[num + 0] = 2;
1961 rbuf[num + 3] = ATA_ID_SERNO_LEN;
1962 num += 4;
1963 ata_id_string(args->id, (unsigned char *) rbuf + num,
1964 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1965 num += ATA_ID_SERNO_LEN;
1966
1967
1968
1969 rbuf[num + 0] = 2;
1970 rbuf[num + 1] = 1;
1971 rbuf[num + 3] = sat_model_serial_desc_len;
1972 num += 4;
1973 memcpy(rbuf + num, "ATA ", 8);
1974 num += 8;
1975 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
1976 ATA_ID_PROD_LEN);
1977 num += ATA_ID_PROD_LEN;
1978 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
1979 ATA_ID_SERNO_LEN);
1980 num += ATA_ID_SERNO_LEN;
1981
1982 if (ata_id_has_wwn(args->id)) {
1983
1984
1985 rbuf[num + 0] = 1;
1986 rbuf[num + 1] = 3;
1987 rbuf[num + 3] = ATA_ID_WWN_LEN;
1988 num += 4;
1989 ata_id_string(args->id, (unsigned char *) rbuf + num,
1990 ATA_ID_WWN, ATA_ID_WWN_LEN);
1991 num += ATA_ID_WWN_LEN;
1992 }
1993 rbuf[3] = num - 4;
1994 return 0;
1995 }
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007 static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
2008 {
2009 rbuf[1] = 0x89;
2010 rbuf[2] = (0x238 >> 8);
2011 rbuf[3] = (0x238 & 0xff);
2012
2013 memcpy(&rbuf[8], "linux ", 8);
2014 memcpy(&rbuf[16], "libata ", 16);
2015 memcpy(&rbuf[32], DRV_VERSION, 4);
2016
2017 rbuf[36] = 0x34;
2018 rbuf[37] = (1 << 7);
2019
2020
2021
2022 rbuf[38] = ATA_DRDY;
2023 rbuf[40] = 0x1;
2024 rbuf[48] = 0x1;
2025
2026 rbuf[56] = ATA_CMD_ID_ATA;
2027
2028 memcpy(&rbuf[60], &args->id[0], 512);
2029 return 0;
2030 }
2031
2032 static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2033 {
2034 struct ata_device *dev = args->dev;
2035 u16 min_io_sectors;
2036
2037 rbuf[1] = 0xb0;
2038 rbuf[3] = 0x3c;
2039
2040
2041
2042
2043
2044
2045
2046
2047 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
2048 put_unaligned_be16(min_io_sectors, &rbuf[6]);
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059 if (ata_id_has_trim(args->id)) {
2060 u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
2061
2062 if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
2063 max_blocks = 128 << (20 - SECTOR_SHIFT);
2064
2065 put_unaligned_be64(max_blocks, &rbuf[36]);
2066 put_unaligned_be32(1, &rbuf[28]);
2067 }
2068
2069 return 0;
2070 }
2071
2072 static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
2073 {
2074 int form_factor = ata_id_form_factor(args->id);
2075 int media_rotation_rate = ata_id_rotation_rate(args->id);
2076 u8 zoned = ata_id_zoned_cap(args->id);
2077
2078 rbuf[1] = 0xb1;
2079 rbuf[3] = 0x3c;
2080 rbuf[4] = media_rotation_rate >> 8;
2081 rbuf[5] = media_rotation_rate;
2082 rbuf[7] = form_factor;
2083 if (zoned)
2084 rbuf[8] = (zoned << 4);
2085
2086 return 0;
2087 }
2088
2089 static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
2090 {
2091
2092 rbuf[1] = 0xb2;
2093 rbuf[3] = 0x4;
2094 rbuf[5] = 1 << 6;
2095
2096 return 0;
2097 }
2098
2099 static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
2100 {
2101
2102
2103
2104 rbuf[1] = 0xb6;
2105 rbuf[3] = 0x3C;
2106
2107
2108
2109
2110 if (args->dev->zac_zoned_cap & 1)
2111 rbuf[4] |= 1;
2112 put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
2113 put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
2114 put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
2115
2116 return 0;
2117 }
2118
2119 static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
2120 {
2121 struct ata_cpr_log *cpr_log = args->dev->cpr_log;
2122 u8 *desc = &rbuf[64];
2123 int i;
2124
2125
2126 rbuf[1] = 0xb9;
2127 put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
2128
2129 for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
2130 desc[0] = cpr_log->cpr[i].num;
2131 desc[1] = cpr_log->cpr[i].num_storage_elements;
2132 put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
2133 put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
2134 }
2135
2136 return 0;
2137 }
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152 static void modecpy(u8 *dest, const u8 *src, int n, bool changeable)
2153 {
2154 if (changeable) {
2155 memcpy(dest, src, 2);
2156 memset(dest + 2, 0, n - 2);
2157 } else {
2158 memcpy(dest, src, n);
2159 }
2160 }
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
2176 {
2177 modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable);
2178 if (changeable) {
2179 buf[2] |= (1 << 2);
2180 } else {
2181 buf[2] |= (ata_id_wcache_enabled(id) << 2);
2182 buf[12] |= (!ata_id_rahead_enabled(id) << 5);
2183 }
2184 return sizeof(def_cache_mpage);
2185 }
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198 static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
2199 bool changeable)
2200 {
2201 modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable);
2202 if (changeable) {
2203 buf[2] |= (1 << 2);
2204 } else {
2205 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
2206
2207 buf[2] |= (d_sense << 2);
2208 }
2209 return sizeof(def_control_mpage);
2210 }
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222 static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
2223 {
2224 modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage),
2225 changeable);
2226 return sizeof(def_rw_recovery_mpage);
2227 }
2228
2229
2230
2231
2232
2233 static int ata_dev_supports_fua(u16 *id)
2234 {
2235 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
2236
2237 if (!libata_fua)
2238 return 0;
2239 if (!ata_id_has_fua(id))
2240 return 0;
2241
2242 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
2243 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
2244
2245 if (strcmp(model, "Maxtor"))
2246 return 1;
2247 if (strcmp(fw, "BANC1G10"))
2248 return 1;
2249
2250 return 0;
2251 }
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265 static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
2266 {
2267 struct ata_device *dev = args->dev;
2268 u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
2269 static const u8 sat_blk_desc[] = {
2270 0, 0, 0, 0,
2271 0,
2272 0, 0x2, 0x0
2273 };
2274 u8 pg, spg;
2275 unsigned int ebd, page_control, six_byte;
2276 u8 dpofua, bp = 0xff;
2277 u16 fp;
2278
2279 six_byte = (scsicmd[0] == MODE_SENSE);
2280 ebd = !(scsicmd[1] & 0x8);
2281
2282
2283
2284
2285 page_control = scsicmd[2] >> 6;
2286 switch (page_control) {
2287 case 0:
2288 case 1:
2289 case 2:
2290 break;
2291 case 3:
2292 goto saving_not_supp;
2293 default:
2294 fp = 2;
2295 bp = 6;
2296 goto invalid_fld;
2297 }
2298
2299 if (six_byte)
2300 p += 4 + (ebd ? 8 : 0);
2301 else
2302 p += 8 + (ebd ? 8 : 0);
2303
2304 pg = scsicmd[2] & 0x3f;
2305 spg = scsicmd[3];
2306
2307
2308
2309
2310 if (spg && (spg != ALL_SUB_MPAGES)) {
2311 fp = 3;
2312 goto invalid_fld;
2313 }
2314
2315 switch(pg) {
2316 case RW_RECOVERY_MPAGE:
2317 p += ata_msense_rw_recovery(p, page_control == 1);
2318 break;
2319
2320 case CACHE_MPAGE:
2321 p += ata_msense_caching(args->id, p, page_control == 1);
2322 break;
2323
2324 case CONTROL_MPAGE:
2325 p += ata_msense_control(args->dev, p, page_control == 1);
2326 break;
2327
2328 case ALL_MPAGES:
2329 p += ata_msense_rw_recovery(p, page_control == 1);
2330 p += ata_msense_caching(args->id, p, page_control == 1);
2331 p += ata_msense_control(args->dev, p, page_control == 1);
2332 break;
2333
2334 default:
2335 fp = 2;
2336 goto invalid_fld;
2337 }
2338
2339 dpofua = 0;
2340 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2341 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2342 dpofua = 1 << 4;
2343
2344 if (six_byte) {
2345 rbuf[0] = p - rbuf - 1;
2346 rbuf[2] |= dpofua;
2347 if (ebd) {
2348 rbuf[3] = sizeof(sat_blk_desc);
2349 memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
2350 }
2351 } else {
2352 unsigned int output_len = p - rbuf - 2;
2353
2354 rbuf[0] = output_len >> 8;
2355 rbuf[1] = output_len;
2356 rbuf[3] |= dpofua;
2357 if (ebd) {
2358 rbuf[7] = sizeof(sat_blk_desc);
2359 memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
2360 }
2361 }
2362 return 0;
2363
2364 invalid_fld:
2365 ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
2366 return 1;
2367
2368 saving_not_supp:
2369 ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2370
2371 return 1;
2372 }
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2385 {
2386 struct ata_device *dev = args->dev;
2387 u64 last_lba = dev->n_sectors - 1;
2388 u32 sector_size;
2389 u8 log2_per_phys;
2390 u16 lowest_aligned;
2391
2392 sector_size = ata_id_logical_sector_size(dev->id);
2393 log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
2394 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
2395
2396 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2397 if (last_lba >= 0xffffffffULL)
2398 last_lba = 0xffffffff;
2399
2400
2401 rbuf[0] = last_lba >> (8 * 3);
2402 rbuf[1] = last_lba >> (8 * 2);
2403 rbuf[2] = last_lba >> (8 * 1);
2404 rbuf[3] = last_lba;
2405
2406
2407 rbuf[4] = sector_size >> (8 * 3);
2408 rbuf[5] = sector_size >> (8 * 2);
2409 rbuf[6] = sector_size >> (8 * 1);
2410 rbuf[7] = sector_size;
2411 } else {
2412
2413 rbuf[0] = last_lba >> (8 * 7);
2414 rbuf[1] = last_lba >> (8 * 6);
2415 rbuf[2] = last_lba >> (8 * 5);
2416 rbuf[3] = last_lba >> (8 * 4);
2417 rbuf[4] = last_lba >> (8 * 3);
2418 rbuf[5] = last_lba >> (8 * 2);
2419 rbuf[6] = last_lba >> (8 * 1);
2420 rbuf[7] = last_lba;
2421
2422
2423 rbuf[ 8] = sector_size >> (8 * 3);
2424 rbuf[ 9] = sector_size >> (8 * 2);
2425 rbuf[10] = sector_size >> (8 * 1);
2426 rbuf[11] = sector_size;
2427
2428 rbuf[12] = 0;
2429 rbuf[13] = log2_per_phys;
2430 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2431 rbuf[15] = lowest_aligned;
2432
2433 if (ata_id_has_trim(args->id) &&
2434 !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
2435 rbuf[14] |= 0x80;
2436
2437 if (ata_id_has_zero_after_trim(args->id) &&
2438 dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
2439 ata_dev_info(dev, "Enabling discard_zeroes_data\n");
2440 rbuf[14] |= 0x40;
2441 }
2442 }
2443 if (ata_id_zoned_cap(args->id) ||
2444 args->dev->class == ATA_DEV_ZAC)
2445 rbuf[12] = (1 << 4);
2446 }
2447 return 0;
2448 }
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460 static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
2461 {
2462 rbuf[3] = 8;
2463
2464 return 0;
2465 }
2466
2467 static void atapi_sense_complete(struct ata_queued_cmd *qc)
2468 {
2469 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2470
2471
2472
2473
2474
2475 ata_gen_passthru_sense(qc);
2476 }
2477
2478 ata_qc_done(qc);
2479 }
2480
2481
2482 static inline int ata_pio_use_silly(struct ata_port *ap)
2483 {
2484 return (ap->flags & ATA_FLAG_PIO_DMA);
2485 }
2486
2487 static void atapi_request_sense(struct ata_queued_cmd *qc)
2488 {
2489 struct ata_port *ap = qc->ap;
2490 struct scsi_cmnd *cmd = qc->scsicmd;
2491
2492 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2493
2494 #ifdef CONFIG_ATA_SFF
2495 if (ap->ops->sff_tf_read)
2496 ap->ops->sff_tf_read(ap, &qc->tf);
2497 #endif
2498
2499
2500 cmd->sense_buffer[0] = 0x70;
2501 cmd->sense_buffer[2] = qc->tf.error >> 4;
2502
2503 ata_qc_reinit(qc);
2504
2505
2506 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
2507 ata_sg_init(qc, &qc->sgent, 1);
2508 qc->dma_dir = DMA_FROM_DEVICE;
2509
2510 memset(&qc->cdb, 0, qc->dev->cdb_len);
2511 qc->cdb[0] = REQUEST_SENSE;
2512 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2513
2514 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2515 qc->tf.command = ATA_CMD_PACKET;
2516
2517 if (ata_pio_use_silly(ap)) {
2518 qc->tf.protocol = ATAPI_PROT_DMA;
2519 qc->tf.feature |= ATAPI_PKT_DMA;
2520 } else {
2521 qc->tf.protocol = ATAPI_PROT_PIO;
2522 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
2523 qc->tf.lbah = 0;
2524 }
2525 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2526
2527 qc->complete_fn = atapi_sense_complete;
2528
2529 ata_qc_issue(qc);
2530 }
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540 static void atapi_fixup_inquiry(struct scsi_cmnd *cmd)
2541 {
2542 u8 buf[4];
2543
2544 sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
2545 if (buf[2] == 0) {
2546 buf[2] = 0x5;
2547 buf[3] = 0x32;
2548 }
2549 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
2550 }
2551
2552 static void atapi_qc_complete(struct ata_queued_cmd *qc)
2553 {
2554 struct scsi_cmnd *cmd = qc->scsicmd;
2555 unsigned int err_mask = qc->err_mask;
2556
2557
2558 if (unlikely(qc->ap->ops->error_handler &&
2559 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2560
2561 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2562
2563
2564
2565
2566
2567 ata_gen_passthru_sense(qc);
2568 }
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2584 qc->dev->sdev->locked = 0;
2585
2586 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2587 ata_qc_done(qc);
2588 return;
2589 }
2590
2591
2592 if (unlikely(err_mask & AC_ERR_DEV)) {
2593 cmd->result = SAM_STAT_CHECK_CONDITION;
2594 atapi_request_sense(qc);
2595 return;
2596 } else if (unlikely(err_mask)) {
2597
2598
2599
2600
2601
2602 ata_gen_passthru_sense(qc);
2603 } else {
2604 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
2605 atapi_fixup_inquiry(cmd);
2606 cmd->result = SAM_STAT_GOOD;
2607 }
2608
2609 ata_qc_done(qc);
2610 }
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621 static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2622 {
2623 struct scsi_cmnd *scmd = qc->scsicmd;
2624 struct ata_device *dev = qc->dev;
2625 int nodata = (scmd->sc_data_direction == DMA_NONE);
2626 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
2627 unsigned int nbytes;
2628
2629 memset(qc->cdb, 0, dev->cdb_len);
2630 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
2631
2632 qc->complete_fn = atapi_qc_complete;
2633
2634 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2635 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2636 qc->tf.flags |= ATA_TFLAG_WRITE;
2637 }
2638
2639 qc->tf.command = ATA_CMD_PACKET;
2640 ata_qc_set_pc_nbytes(qc);
2641
2642
2643 if (!nodata && !using_pio && atapi_check_dma(qc))
2644 using_pio = 1;
2645
2646
2647
2648
2649
2650
2651 nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677 if (nbytes & 0x1)
2678 nbytes++;
2679
2680 qc->tf.lbam = (nbytes & 0xFF);
2681 qc->tf.lbah = (nbytes >> 8);
2682
2683 if (nodata)
2684 qc->tf.protocol = ATAPI_PROT_NODATA;
2685 else if (using_pio)
2686 qc->tf.protocol = ATAPI_PROT_PIO;
2687 else {
2688
2689 qc->tf.protocol = ATAPI_PROT_DMA;
2690 qc->tf.feature |= ATAPI_PKT_DMA;
2691
2692 if ((dev->flags & ATA_DFLAG_DMADIR) &&
2693 (scmd->sc_data_direction != DMA_TO_DEVICE))
2694
2695 qc->tf.feature |= ATAPI_DMADIR;
2696 }
2697
2698
2699
2700
2701 return 0;
2702 }
2703
2704 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
2705 {
2706 if (!sata_pmp_attached(ap)) {
2707 if (likely(devno >= 0 &&
2708 devno < ata_link_max_devices(&ap->link)))
2709 return &ap->link.device[devno];
2710 } else {
2711 if (likely(devno >= 0 &&
2712 devno < ap->nr_pmp_links))
2713 return &ap->pmp_link[devno].device[0];
2714 }
2715
2716 return NULL;
2717 }
2718
2719 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
2720 const struct scsi_device *scsidev)
2721 {
2722 int devno;
2723
2724
2725 if (!sata_pmp_attached(ap)) {
2726 if (unlikely(scsidev->channel || scsidev->lun))
2727 return NULL;
2728 devno = scsidev->id;
2729 } else {
2730 if (unlikely(scsidev->id || scsidev->lun))
2731 return NULL;
2732 devno = scsidev->channel;
2733 }
2734
2735 return ata_find_dev(ap, devno);
2736 }
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754 struct ata_device *
2755 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2756 {
2757 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2758
2759 if (unlikely(!dev || !ata_dev_enabled(dev)))
2760 return NULL;
2761
2762 return dev;
2763 }
2764
2765
2766
2767
2768
2769
2770
2771
2772 static u8
2773 ata_scsi_map_proto(u8 byte1)
2774 {
2775 switch((byte1 & 0x1e) >> 1) {
2776 case 3:
2777 return ATA_PROT_NODATA;
2778
2779 case 6:
2780 case 10:
2781 case 11:
2782 return ATA_PROT_DMA;
2783
2784 case 4:
2785 case 5:
2786 return ATA_PROT_PIO;
2787
2788 case 12:
2789 return ATA_PROT_NCQ;
2790
2791 case 0:
2792 case 1:
2793 case 8:
2794 case 9:
2795 case 7:
2796 case 15:
2797 default:
2798 break;
2799 }
2800
2801 return ATA_PROT_UNKNOWN;
2802 }
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813 static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2814 {
2815 struct ata_taskfile *tf = &(qc->tf);
2816 struct scsi_cmnd *scmd = qc->scsicmd;
2817 struct ata_device *dev = qc->dev;
2818 const u8 *cdb = scmd->cmnd;
2819 u16 fp;
2820 u16 cdb_offset = 0;
2821
2822
2823 if (cdb[0] == VARIABLE_LENGTH_CMD)
2824 cdb_offset = 9;
2825
2826 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]);
2827 if (tf->protocol == ATA_PROT_UNKNOWN) {
2828 fp = 1;
2829 goto invalid_fld;
2830 }
2831
2832 if ((cdb[2 + cdb_offset] & 0x3) == 0) {
2833
2834
2835
2836
2837 if (scmd->sc_data_direction != DMA_NONE) {
2838 fp = 2 + cdb_offset;
2839 goto invalid_fld;
2840 }
2841
2842 if (ata_is_ncq(tf->protocol))
2843 tf->protocol = ATA_PROT_NCQ_NODATA;
2844 }
2845
2846
2847 tf->flags |= ATA_TFLAG_LBA;
2848
2849
2850
2851
2852
2853 switch (cdb[0]) {
2854 case ATA_16:
2855
2856
2857
2858
2859
2860 if (cdb[1] & 0x01) {
2861 tf->hob_feature = cdb[3];
2862 tf->hob_nsect = cdb[5];
2863 tf->hob_lbal = cdb[7];
2864 tf->hob_lbam = cdb[9];
2865 tf->hob_lbah = cdb[11];
2866 tf->flags |= ATA_TFLAG_LBA48;
2867 } else
2868 tf->flags &= ~ATA_TFLAG_LBA48;
2869
2870
2871
2872
2873 tf->feature = cdb[4];
2874 tf->nsect = cdb[6];
2875 tf->lbal = cdb[8];
2876 tf->lbam = cdb[10];
2877 tf->lbah = cdb[12];
2878 tf->device = cdb[13];
2879 tf->command = cdb[14];
2880 break;
2881 case ATA_12:
2882
2883
2884
2885 tf->flags &= ~ATA_TFLAG_LBA48;
2886
2887 tf->feature = cdb[3];
2888 tf->nsect = cdb[4];
2889 tf->lbal = cdb[5];
2890 tf->lbam = cdb[6];
2891 tf->lbah = cdb[7];
2892 tf->device = cdb[8];
2893 tf->command = cdb[9];
2894 break;
2895 default:
2896
2897
2898
2899
2900
2901 if (cdb[10] & 0x01) {
2902 tf->hob_feature = cdb[20];
2903 tf->hob_nsect = cdb[22];
2904 tf->hob_lbal = cdb[16];
2905 tf->hob_lbam = cdb[15];
2906 tf->hob_lbah = cdb[14];
2907 tf->flags |= ATA_TFLAG_LBA48;
2908 } else
2909 tf->flags &= ~ATA_TFLAG_LBA48;
2910
2911 tf->feature = cdb[21];
2912 tf->nsect = cdb[23];
2913 tf->lbal = cdb[19];
2914 tf->lbam = cdb[18];
2915 tf->lbah = cdb[17];
2916 tf->device = cdb[24];
2917 tf->command = cdb[25];
2918 tf->auxiliary = get_unaligned_be32(&cdb[28]);
2919 break;
2920 }
2921
2922
2923 if (ata_is_ncq(tf->protocol))
2924 tf->nsect = qc->hw_tag << 3;
2925
2926
2927 tf->device = dev->devno ?
2928 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2929
2930 switch (tf->command) {
2931
2932 case ATA_CMD_READ_LONG:
2933 case ATA_CMD_READ_LONG_ONCE:
2934 case ATA_CMD_WRITE_LONG:
2935 case ATA_CMD_WRITE_LONG_ONCE:
2936 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) {
2937 fp = 1;
2938 goto invalid_fld;
2939 }
2940 qc->sect_size = scsi_bufflen(scmd);
2941 break;
2942
2943
2944 case ATA_CMD_CFA_WRITE_NE:
2945 case ATA_CMD_CFA_TRANS_SECT:
2946 case ATA_CMD_CFA_WRITE_MULT_NE:
2947
2948 case ATA_CMD_READ:
2949 case ATA_CMD_READ_EXT:
2950 case ATA_CMD_READ_QUEUED:
2951
2952 case ATA_CMD_FPDMA_READ:
2953 case ATA_CMD_READ_MULTI:
2954 case ATA_CMD_READ_MULTI_EXT:
2955 case ATA_CMD_PIO_READ:
2956 case ATA_CMD_PIO_READ_EXT:
2957 case ATA_CMD_READ_STREAM_DMA_EXT:
2958 case ATA_CMD_READ_STREAM_EXT:
2959 case ATA_CMD_VERIFY:
2960 case ATA_CMD_VERIFY_EXT:
2961 case ATA_CMD_WRITE:
2962 case ATA_CMD_WRITE_EXT:
2963 case ATA_CMD_WRITE_FUA_EXT:
2964 case ATA_CMD_WRITE_QUEUED:
2965 case ATA_CMD_WRITE_QUEUED_FUA_EXT:
2966 case ATA_CMD_FPDMA_WRITE:
2967 case ATA_CMD_WRITE_MULTI:
2968 case ATA_CMD_WRITE_MULTI_EXT:
2969 case ATA_CMD_WRITE_MULTI_FUA_EXT:
2970 case ATA_CMD_PIO_WRITE:
2971 case ATA_CMD_PIO_WRITE_EXT:
2972 case ATA_CMD_WRITE_STREAM_DMA_EXT:
2973 case ATA_CMD_WRITE_STREAM_EXT:
2974 qc->sect_size = scmd->device->sector_size;
2975 break;
2976
2977
2978 default:
2979 qc->sect_size = ATA_SECT_SIZE;
2980 }
2981
2982
2983
2984
2985
2986
2987 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2988 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2989 tf->flags |= ATA_TFLAG_WRITE;
2990
2991 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
2992
2993
2994
2995
2996
2997
2998
2999 ata_qc_set_pc_nbytes(qc);
3000
3001
3002 if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) {
3003 fp = 1;
3004 goto invalid_fld;
3005 }
3006
3007
3008 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
3009 fp = 1;
3010 goto invalid_fld;
3011 }
3012
3013
3014 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
3015 fp = 1;
3016 goto invalid_fld;
3017 }
3018
3019 if (is_multi_taskfile(tf)) {
3020 unsigned int multi_count = 1 << (cdb[1] >> 5);
3021
3022
3023
3024
3025 if (multi_count != dev->multi_count)
3026 ata_dev_warn(dev, "invalid multi_count %u ignored\n",
3027 multi_count);
3028 }
3029
3030
3031
3032
3033
3034
3035
3036
3037 if (tf->command == ATA_CMD_SET_FEATURES &&
3038 tf->feature == SETFEATURES_XFER) {
3039 fp = (cdb[0] == ATA_16) ? 4 : 3;
3040 goto invalid_fld;
3041 }
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) {
3059 fp = (cdb[0] == ATA_16) ? 14 : 9;
3060 goto invalid_fld;
3061 }
3062
3063 return 0;
3064
3065 invalid_fld:
3066 ata_scsi_set_invalid_field(dev, scmd, fp, 0xff);
3067 return 1;
3068 }
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091 static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
3092 u64 sector, u32 count)
3093 {
3094 struct scsi_device *sdp = cmd->device;
3095 size_t len = sdp->sector_size;
3096 size_t r;
3097 __le64 *buf;
3098 u32 i = 0;
3099 unsigned long flags;
3100
3101 WARN_ON(len > ATA_SCSI_RBUF_SIZE);
3102
3103 if (len > ATA_SCSI_RBUF_SIZE)
3104 len = ATA_SCSI_RBUF_SIZE;
3105
3106 spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
3107 buf = ((void *)ata_scsi_rbuf);
3108 memset(buf, 0, len);
3109 while (i < trmax) {
3110 u64 entry = sector |
3111 ((u64)(count > 0xffff ? 0xffff : count) << 48);
3112 buf[i++] = __cpu_to_le64(entry);
3113 if (count <= 0xffff)
3114 break;
3115 count -= 0xffff;
3116 sector += 0xffff;
3117 }
3118 r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
3119 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
3120
3121 return r;
3122 }
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135 static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
3136 {
3137 struct ata_taskfile *tf = &qc->tf;
3138 struct scsi_cmnd *scmd = qc->scsicmd;
3139 struct scsi_device *sdp = scmd->device;
3140 size_t len = sdp->sector_size;
3141 struct ata_device *dev = qc->dev;
3142 const u8 *cdb = scmd->cmnd;
3143 u64 block;
3144 u32 n_block;
3145 const u32 trmax = len >> 3;
3146 u32 size;
3147 u16 fp;
3148 u8 bp = 0xff;
3149 u8 unmap = cdb[1] & 0x8;
3150
3151
3152 if (unlikely(!ata_dma_enabled(dev)))
3153 goto invalid_opcode;
3154
3155
3156
3157
3158
3159
3160 if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))))
3161 goto invalid_opcode;
3162
3163 if (unlikely(scmd->cmd_len < 16)) {
3164 fp = 15;
3165 goto invalid_fld;
3166 }
3167 scsi_16_lba_len(cdb, &block, &n_block);
3168
3169 if (!unmap ||
3170 (dev->horkage & ATA_HORKAGE_NOTRIM) ||
3171 !ata_id_has_trim(dev->id)) {
3172 fp = 1;
3173 bp = 3;
3174 goto invalid_fld;
3175 }
3176
3177 if (n_block > 0xffff * trmax) {
3178 fp = 2;
3179 goto invalid_fld;
3180 }
3181
3182
3183
3184
3185
3186 if (!scsi_sg_count(scmd))
3187 goto invalid_param_len;
3188
3189
3190
3191
3192
3193
3194
3195 size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
3196 if (size != len)
3197 goto invalid_param_len;
3198
3199 if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
3200
3201 tf->protocol = ATA_PROT_NCQ;
3202 tf->command = ATA_CMD_FPDMA_SEND;
3203 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
3204 tf->nsect = qc->hw_tag << 3;
3205 tf->hob_feature = (size / 512) >> 8;
3206 tf->feature = size / 512;
3207
3208 tf->auxiliary = 1;
3209 } else {
3210 tf->protocol = ATA_PROT_DMA;
3211 tf->hob_feature = 0;
3212 tf->feature = ATA_DSM_TRIM;
3213 tf->hob_nsect = (size / 512) >> 8;
3214 tf->nsect = size / 512;
3215 tf->command = ATA_CMD_DSM;
3216 }
3217
3218 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
3219 ATA_TFLAG_WRITE;
3220
3221 ata_qc_set_pc_nbytes(qc);
3222
3223 return 0;
3224
3225 invalid_fld:
3226 ata_scsi_set_invalid_field(dev, scmd, fp, bp);
3227 return 1;
3228 invalid_param_len:
3229
3230 ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
3231 return 1;
3232 invalid_opcode:
3233
3234 ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0);
3235 return 1;
3236 }
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248 static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
3249 {
3250 struct ata_device *dev = args->dev;
3251 u8 *cdb = args->cmd->cmnd;
3252 u8 supported = 0;
3253 unsigned int err = 0;
3254
3255 if (cdb[2] != 1) {
3256 ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
3257 err = 2;
3258 goto out;
3259 }
3260 switch (cdb[3]) {
3261 case INQUIRY:
3262 case MODE_SENSE:
3263 case MODE_SENSE_10:
3264 case READ_CAPACITY:
3265 case SERVICE_ACTION_IN_16:
3266 case REPORT_LUNS:
3267 case REQUEST_SENSE:
3268 case SYNCHRONIZE_CACHE:
3269 case REZERO_UNIT:
3270 case SEEK_6:
3271 case SEEK_10:
3272 case TEST_UNIT_READY:
3273 case SEND_DIAGNOSTIC:
3274 case MAINTENANCE_IN:
3275 case READ_6:
3276 case READ_10:
3277 case READ_16:
3278 case WRITE_6:
3279 case WRITE_10:
3280 case WRITE_16:
3281 case ATA_12:
3282 case ATA_16:
3283 case VERIFY:
3284 case VERIFY_16:
3285 case MODE_SELECT:
3286 case MODE_SELECT_10:
3287 case START_STOP:
3288 supported = 3;
3289 break;
3290 case ZBC_IN:
3291 case ZBC_OUT:
3292 if (ata_id_zoned_cap(dev->id) ||
3293 dev->class == ATA_DEV_ZAC)
3294 supported = 3;
3295 break;
3296 case SECURITY_PROTOCOL_IN:
3297 case SECURITY_PROTOCOL_OUT:
3298 if (dev->flags & ATA_DFLAG_TRUSTED)
3299 supported = 3;
3300 break;
3301 default:
3302 break;
3303 }
3304 out:
3305 rbuf[1] = supported;
3306 return err;
3307 }
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317 static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc)
3318 {
3319 struct scsi_cmnd *scmd = qc->scsicmd;
3320 struct sg_mapping_iter miter;
3321 unsigned long flags;
3322 unsigned int bytes = 0;
3323
3324 sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
3325 SG_MITER_TO_SG | SG_MITER_ATOMIC);
3326
3327 local_irq_save(flags);
3328 while (sg_miter_next(&miter)) {
3329 unsigned int offset = 0;
3330
3331 if (bytes == 0) {
3332 char *hdr;
3333 u32 list_length;
3334 u64 max_lba, opt_lba;
3335 u16 same;
3336
3337
3338 hdr = miter.addr;
3339 list_length = get_unaligned_le32(&hdr[0]);
3340 same = get_unaligned_le16(&hdr[4]);
3341 max_lba = get_unaligned_le64(&hdr[8]);
3342 opt_lba = get_unaligned_le64(&hdr[16]);
3343 put_unaligned_be32(list_length, &hdr[0]);
3344 hdr[4] = same & 0xf;
3345 put_unaligned_be64(max_lba, &hdr[8]);
3346 put_unaligned_be64(opt_lba, &hdr[16]);
3347 offset += 64;
3348 bytes += 64;
3349 }
3350 while (offset < miter.length) {
3351 char *rec;
3352 u8 cond, type, non_seq, reset;
3353 u64 size, start, wp;
3354
3355
3356 rec = miter.addr + offset;
3357 type = rec[0] & 0xf;
3358 cond = (rec[1] >> 4) & 0xf;
3359 non_seq = (rec[1] & 2);
3360 reset = (rec[1] & 1);
3361 size = get_unaligned_le64(&rec[8]);
3362 start = get_unaligned_le64(&rec[16]);
3363 wp = get_unaligned_le64(&rec[24]);
3364 rec[0] = type;
3365 rec[1] = (cond << 4) | non_seq | reset;
3366 put_unaligned_be64(size, &rec[8]);
3367 put_unaligned_be64(start, &rec[16]);
3368 put_unaligned_be64(wp, &rec[24]);
3369 WARN_ON(offset + 64 > miter.length);
3370 offset += 64;
3371 bytes += 64;
3372 }
3373 }
3374 sg_miter_stop(&miter);
3375 local_irq_restore(flags);
3376
3377 ata_scsi_qc_complete(qc);
3378 }
3379
3380 static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
3381 {
3382 struct ata_taskfile *tf = &qc->tf;
3383 struct scsi_cmnd *scmd = qc->scsicmd;
3384 const u8 *cdb = scmd->cmnd;
3385 u16 sect, fp = (u16)-1;
3386 u8 sa, options, bp = 0xff;
3387 u64 block;
3388 u32 n_block;
3389
3390 if (unlikely(scmd->cmd_len < 16)) {
3391 ata_dev_warn(qc->dev, "invalid cdb length %d\n",
3392 scmd->cmd_len);
3393 fp = 15;
3394 goto invalid_fld;
3395 }
3396 scsi_16_lba_len(cdb, &block, &n_block);
3397 if (n_block != scsi_bufflen(scmd)) {
3398 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n",
3399 n_block, scsi_bufflen(scmd));
3400 goto invalid_param_len;
3401 }
3402 sa = cdb[1] & 0x1f;
3403 if (sa != ZI_REPORT_ZONES) {
3404 ata_dev_warn(qc->dev, "invalid service action %d\n", sa);
3405 fp = 1;
3406 goto invalid_fld;
3407 }
3408
3409
3410
3411
3412 if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) {
3413 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block);
3414 goto invalid_param_len;
3415 }
3416 sect = n_block / 512;
3417 options = cdb[14] & 0xbf;
3418
3419 if (ata_ncq_enabled(qc->dev) &&
3420 ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
3421 tf->protocol = ATA_PROT_NCQ;
3422 tf->command = ATA_CMD_FPDMA_RECV;
3423 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f;
3424 tf->nsect = qc->hw_tag << 3;
3425 tf->feature = sect & 0xff;
3426 tf->hob_feature = (sect >> 8) & 0xff;
3427 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8);
3428 } else {
3429 tf->command = ATA_CMD_ZAC_MGMT_IN;
3430 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
3431 tf->protocol = ATA_PROT_DMA;
3432 tf->hob_feature = options;
3433 tf->hob_nsect = (sect >> 8) & 0xff;
3434 tf->nsect = sect & 0xff;
3435 }
3436 tf->device = ATA_LBA;
3437 tf->lbah = (block >> 16) & 0xff;
3438 tf->lbam = (block >> 8) & 0xff;
3439 tf->lbal = block & 0xff;
3440 tf->hob_lbah = (block >> 40) & 0xff;
3441 tf->hob_lbam = (block >> 32) & 0xff;
3442 tf->hob_lbal = (block >> 24) & 0xff;
3443
3444 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
3445 qc->flags |= ATA_QCFLAG_RESULT_TF;
3446
3447 ata_qc_set_pc_nbytes(qc);
3448
3449 qc->complete_fn = ata_scsi_report_zones_complete;
3450
3451 return 0;
3452
3453 invalid_fld:
3454 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
3455 return 1;
3456
3457 invalid_param_len:
3458
3459 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
3460 return 1;
3461 }
3462
3463 static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
3464 {
3465 struct ata_taskfile *tf = &qc->tf;
3466 struct scsi_cmnd *scmd = qc->scsicmd;
3467 struct ata_device *dev = qc->dev;
3468 const u8 *cdb = scmd->cmnd;
3469 u8 all, sa;
3470 u64 block;
3471 u32 n_block;
3472 u16 fp = (u16)-1;
3473
3474 if (unlikely(scmd->cmd_len < 16)) {
3475 fp = 15;
3476 goto invalid_fld;
3477 }
3478
3479 sa = cdb[1] & 0x1f;
3480 if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) &&
3481 (sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) {
3482 fp = 1;
3483 goto invalid_fld;
3484 }
3485
3486 scsi_16_lba_len(cdb, &block, &n_block);
3487 if (n_block) {
3488
3489
3490
3491 goto invalid_param_len;
3492 }
3493
3494 all = cdb[14] & 0x1;
3495 if (all) {
3496
3497
3498
3499 block = 0;
3500 } else if (block >= dev->n_sectors) {
3501
3502
3503
3504 fp = 2;
3505 goto invalid_fld;
3506 }
3507
3508 if (ata_ncq_enabled(qc->dev) &&
3509 ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
3510 tf->protocol = ATA_PROT_NCQ_NODATA;
3511 tf->command = ATA_CMD_NCQ_NON_DATA;
3512 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
3513 tf->nsect = qc->hw_tag << 3;
3514 tf->auxiliary = sa | ((u16)all << 8);
3515 } else {
3516 tf->protocol = ATA_PROT_NODATA;
3517 tf->command = ATA_CMD_ZAC_MGMT_OUT;
3518 tf->feature = sa;
3519 tf->hob_feature = all;
3520 }
3521 tf->lbah = (block >> 16) & 0xff;
3522 tf->lbam = (block >> 8) & 0xff;
3523 tf->lbal = block & 0xff;
3524 tf->hob_lbah = (block >> 40) & 0xff;
3525 tf->hob_lbam = (block >> 32) & 0xff;
3526 tf->hob_lbal = (block >> 24) & 0xff;
3527 tf->device = ATA_LBA;
3528 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
3529
3530 return 0;
3531
3532 invalid_fld:
3533 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
3534 return 1;
3535 invalid_param_len:
3536
3537 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
3538 return 1;
3539 }
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553 static int ata_mselect_caching(struct ata_queued_cmd *qc,
3554 const u8 *buf, int len, u16 *fp)
3555 {
3556 struct ata_taskfile *tf = &qc->tf;
3557 struct ata_device *dev = qc->dev;
3558 u8 mpage[CACHE_MPAGE_LEN];
3559 u8 wce;
3560 int i;
3561
3562
3563
3564
3565
3566
3567 if (len != CACHE_MPAGE_LEN - 2) {
3568 *fp = min(len, CACHE_MPAGE_LEN - 2);
3569 return -EINVAL;
3570 }
3571
3572 wce = buf[0] & (1 << 2);
3573
3574
3575
3576
3577 ata_msense_caching(dev->id, mpage, false);
3578 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) {
3579 if (i == 0)
3580 continue;
3581 if (mpage[i + 2] != buf[i]) {
3582 *fp = i;
3583 return -EINVAL;
3584 }
3585 }
3586
3587 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3588 tf->protocol = ATA_PROT_NODATA;
3589 tf->nsect = 0;
3590 tf->command = ATA_CMD_SET_FEATURES;
3591 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF;
3592 return 0;
3593 }
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607 static int ata_mselect_control(struct ata_queued_cmd *qc,
3608 const u8 *buf, int len, u16 *fp)
3609 {
3610 struct ata_device *dev = qc->dev;
3611 u8 mpage[CONTROL_MPAGE_LEN];
3612 u8 d_sense;
3613 int i;
3614
3615
3616
3617
3618
3619
3620 if (len != CONTROL_MPAGE_LEN - 2) {
3621 *fp = min(len, CONTROL_MPAGE_LEN - 2);
3622 return -EINVAL;
3623 }
3624
3625 d_sense = buf[0] & (1 << 2);
3626
3627
3628
3629
3630 ata_msense_control(dev, mpage, false);
3631 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
3632 if (i == 0)
3633 continue;
3634 if (mpage[2 + i] != buf[i]) {
3635 *fp = i;
3636 return -EINVAL;
3637 }
3638 }
3639 if (d_sense & (1 << 2))
3640 dev->flags |= ATA_DFLAG_D_SENSE;
3641 else
3642 dev->flags &= ~ATA_DFLAG_D_SENSE;
3643 return 0;
3644 }
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657 static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
3658 {
3659 struct scsi_cmnd *scmd = qc->scsicmd;
3660 const u8 *cdb = scmd->cmnd;
3661 u8 pg, spg;
3662 unsigned six_byte, pg_len, hdr_len, bd_len;
3663 int len;
3664 u16 fp = (u16)-1;
3665 u8 bp = 0xff;
3666 u8 buffer[64];
3667 const u8 *p = buffer;
3668
3669 six_byte = (cdb[0] == MODE_SELECT);
3670 if (six_byte) {
3671 if (scmd->cmd_len < 5) {
3672 fp = 4;
3673 goto invalid_fld;
3674 }
3675
3676 len = cdb[4];
3677 hdr_len = 4;
3678 } else {
3679 if (scmd->cmd_len < 9) {
3680 fp = 8;
3681 goto invalid_fld;
3682 }
3683
3684 len = get_unaligned_be16(&cdb[7]);
3685 hdr_len = 8;
3686 }
3687
3688
3689 if ((cdb[1] & 0x11) != 0x10) {
3690 fp = 1;
3691 bp = (cdb[1] & 0x01) ? 1 : 5;
3692 goto invalid_fld;
3693 }
3694
3695
3696 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
3697 goto invalid_param_len;
3698
3699
3700 if (len < hdr_len)
3701 goto invalid_param_len;
3702
3703 if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
3704 buffer, sizeof(buffer)))
3705 goto invalid_param_len;
3706
3707 if (six_byte)
3708 bd_len = p[3];
3709 else
3710 bd_len = get_unaligned_be16(&p[6]);
3711
3712 len -= hdr_len;
3713 p += hdr_len;
3714 if (len < bd_len)
3715 goto invalid_param_len;
3716 if (bd_len != 0 && bd_len != 8) {
3717 fp = (six_byte) ? 3 : 6;
3718 fp += bd_len + hdr_len;
3719 goto invalid_param;
3720 }
3721
3722 len -= bd_len;
3723 p += bd_len;
3724 if (len == 0)
3725 goto skip;
3726
3727
3728 pg = p[0] & 0x3f;
3729 if (p[0] & 0x40) {
3730 if (len < 4)
3731 goto invalid_param_len;
3732
3733 spg = p[1];
3734 pg_len = get_unaligned_be16(&p[2]);
3735 p += 4;
3736 len -= 4;
3737 } else {
3738 if (len < 2)
3739 goto invalid_param_len;
3740
3741 spg = 0;
3742 pg_len = p[1];
3743 p += 2;
3744 len -= 2;
3745 }
3746
3747
3748
3749
3750
3751 if (spg && (spg != ALL_SUB_MPAGES)) {
3752 fp = (p[0] & 0x40) ? 1 : 0;
3753 fp += hdr_len + bd_len;
3754 goto invalid_param;
3755 }
3756 if (pg_len > len)
3757 goto invalid_param_len;
3758
3759 switch (pg) {
3760 case CACHE_MPAGE:
3761 if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) {
3762 fp += hdr_len + bd_len;
3763 goto invalid_param;
3764 }
3765 break;
3766 case CONTROL_MPAGE:
3767 if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
3768 fp += hdr_len + bd_len;
3769 goto invalid_param;
3770 } else {
3771 goto skip;
3772 }
3773 break;
3774 default:
3775 fp = bd_len + hdr_len;
3776 goto invalid_param;
3777 }
3778
3779
3780
3781
3782
3783 if (len > pg_len)
3784 goto invalid_param;
3785
3786 return 0;
3787
3788 invalid_fld:
3789 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
3790 return 1;
3791
3792 invalid_param:
3793 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp);
3794 return 1;
3795
3796 invalid_param_len:
3797
3798 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
3799 return 1;
3800
3801 skip:
3802 scmd->result = SAM_STAT_GOOD;
3803 return 1;
3804 }
3805
3806 static u8 ata_scsi_trusted_op(u32 len, bool send, bool dma)
3807 {
3808 if (len == 0)
3809 return ATA_CMD_TRUSTED_NONDATA;
3810 else if (send)
3811 return dma ? ATA_CMD_TRUSTED_SND_DMA : ATA_CMD_TRUSTED_SND;
3812 else
3813 return dma ? ATA_CMD_TRUSTED_RCV_DMA : ATA_CMD_TRUSTED_RCV;
3814 }
3815
3816 static unsigned int ata_scsi_security_inout_xlat(struct ata_queued_cmd *qc)
3817 {
3818 struct scsi_cmnd *scmd = qc->scsicmd;
3819 const u8 *cdb = scmd->cmnd;
3820 struct ata_taskfile *tf = &qc->tf;
3821 u8 secp = cdb[1];
3822 bool send = (cdb[0] == SECURITY_PROTOCOL_OUT);
3823 u16 spsp = get_unaligned_be16(&cdb[2]);
3824 u32 len = get_unaligned_be32(&cdb[6]);
3825 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO);
3826
3827
3828
3829
3830 if (secp == 0xef) {
3831 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0);
3832 return 1;
3833 }
3834
3835 if (cdb[4] & 7) {
3836 if (len > 0xffff) {
3837 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
3838 return 1;
3839 }
3840 } else {
3841 if (len > 0x01fffe00) {
3842 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
3843 return 1;
3844 }
3845
3846
3847 len = (len + 511) / 512;
3848 }
3849
3850 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO;
3851 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA;
3852 if (send)
3853 tf->flags |= ATA_TFLAG_WRITE;
3854 tf->command = ata_scsi_trusted_op(len, send, dma);
3855 tf->feature = secp;
3856 tf->lbam = spsp & 0xff;
3857 tf->lbah = spsp >> 8;
3858
3859 if (len) {
3860 tf->nsect = len & 0xff;
3861 tf->lbal = len >> 8;
3862 } else {
3863 if (!send)
3864 tf->lbah = (1 << 7);
3865 }
3866
3867 ata_qc_set_pc_nbytes(qc);
3868 return 0;
3869 }
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882 static unsigned int ata_scsi_var_len_cdb_xlat(struct ata_queued_cmd *qc)
3883 {
3884 struct scsi_cmnd *scmd = qc->scsicmd;
3885 const u8 *cdb = scmd->cmnd;
3886 const u16 sa = get_unaligned_be16(&cdb[8]);
3887
3888
3889
3890
3891
3892 if (sa == ATA_32)
3893 return ata_scsi_pass_thru(qc);
3894
3895
3896 return 1;
3897 }
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911 static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
3912 {
3913 switch (cmd) {
3914 case READ_6:
3915 case READ_10:
3916 case READ_16:
3917
3918 case WRITE_6:
3919 case WRITE_10:
3920 case WRITE_16:
3921 return ata_scsi_rw_xlat;
3922
3923 case WRITE_SAME_16:
3924 return ata_scsi_write_same_xlat;
3925
3926 case SYNCHRONIZE_CACHE:
3927 if (ata_try_flush_cache(dev))
3928 return ata_scsi_flush_xlat;
3929 break;
3930
3931 case VERIFY:
3932 case VERIFY_16:
3933 return ata_scsi_verify_xlat;
3934
3935 case ATA_12:
3936 case ATA_16:
3937 return ata_scsi_pass_thru;
3938
3939 case VARIABLE_LENGTH_CMD:
3940 return ata_scsi_var_len_cdb_xlat;
3941
3942 case MODE_SELECT:
3943 case MODE_SELECT_10:
3944 return ata_scsi_mode_select_xlat;
3945
3946 case ZBC_IN:
3947 return ata_scsi_zbc_in_xlat;
3948
3949 case ZBC_OUT:
3950 return ata_scsi_zbc_out_xlat;
3951
3952 case SECURITY_PROTOCOL_IN:
3953 case SECURITY_PROTOCOL_OUT:
3954 if (!(dev->flags & ATA_DFLAG_TRUSTED))
3955 break;
3956 return ata_scsi_security_inout_xlat;
3957
3958 case START_STOP:
3959 return ata_scsi_start_stop_xlat;
3960 }
3961
3962 return NULL;
3963 }
3964
3965 int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
3966 {
3967 u8 scsi_op = scmd->cmnd[0];
3968 ata_xlat_func_t xlat_func;
3969
3970 if (unlikely(!scmd->cmd_len))
3971 goto bad_cdb_len;
3972
3973 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
3974 if (unlikely(scmd->cmd_len > dev->cdb_len))
3975 goto bad_cdb_len;
3976
3977 xlat_func = ata_get_xlat_func(dev, scsi_op);
3978 } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
3979
3980 int len = COMMAND_SIZE(scsi_op);
3981
3982 if (unlikely(len > scmd->cmd_len ||
3983 len > dev->cdb_len ||
3984 scmd->cmd_len > ATAPI_CDB_LEN))
3985 goto bad_cdb_len;
3986
3987 xlat_func = atapi_xlat;
3988 } else {
3989
3990 if (unlikely(scmd->cmd_len > 16))
3991 goto bad_cdb_len;
3992
3993 xlat_func = ata_get_xlat_func(dev, scsi_op);
3994 }
3995
3996 if (xlat_func)
3997 return ata_scsi_translate(dev, scmd, xlat_func);
3998
3999 ata_scsi_simulate(dev, scmd);
4000
4001 return 0;
4002
4003 bad_cdb_len:
4004 scmd->result = DID_ERROR << 16;
4005 scsi_done(scmd);
4006 return 0;
4007 }
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028 int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4029 {
4030 struct ata_port *ap;
4031 struct ata_device *dev;
4032 struct scsi_device *scsidev = cmd->device;
4033 int rc = 0;
4034 unsigned long irq_flags;
4035
4036 ap = ata_shost_to_port(shost);
4037
4038 spin_lock_irqsave(ap->lock, irq_flags);
4039
4040 dev = ata_scsi_find_dev(ap, scsidev);
4041 if (likely(dev))
4042 rc = __ata_scsi_queuecmd(cmd, dev);
4043 else {
4044 cmd->result = (DID_BAD_TARGET << 16);
4045 scsi_done(cmd);
4046 }
4047
4048 spin_unlock_irqrestore(ap->lock, irq_flags);
4049
4050 return rc;
4051 }
4052 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
4067 {
4068 struct ata_scsi_args args;
4069 const u8 *scsicmd = cmd->cmnd;
4070 u8 tmp8;
4071
4072 args.dev = dev;
4073 args.id = dev->id;
4074 args.cmd = cmd;
4075
4076 switch(scsicmd[0]) {
4077 case INQUIRY:
4078 if (scsicmd[1] & 2)
4079 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
4080 else if ((scsicmd[1] & 1) == 0)
4081 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
4082 else switch (scsicmd[2]) {
4083 case 0x00:
4084 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
4085 break;
4086 case 0x80:
4087 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
4088 break;
4089 case 0x83:
4090 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
4091 break;
4092 case 0x89:
4093 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
4094 break;
4095 case 0xb0:
4096 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
4097 break;
4098 case 0xb1:
4099 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
4100 break;
4101 case 0xb2:
4102 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
4103 break;
4104 case 0xb6:
4105 if (dev->flags & ATA_DFLAG_ZAC)
4106 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
4107 else
4108 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
4109 break;
4110 case 0xb9:
4111 if (dev->cpr_log)
4112 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
4113 else
4114 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
4115 break;
4116 default:
4117 ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
4118 break;
4119 }
4120 break;
4121
4122 case MODE_SENSE:
4123 case MODE_SENSE_10:
4124 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
4125 break;
4126
4127 case READ_CAPACITY:
4128 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
4129 break;
4130
4131 case SERVICE_ACTION_IN_16:
4132 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
4133 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
4134 else
4135 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
4136 break;
4137
4138 case REPORT_LUNS:
4139 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
4140 break;
4141
4142 case REQUEST_SENSE:
4143 ata_scsi_set_sense(dev, cmd, 0, 0, 0);
4144 break;
4145
4146
4147
4148
4149 case SYNCHRONIZE_CACHE:
4150 fallthrough;
4151
4152
4153 case REZERO_UNIT:
4154 case SEEK_6:
4155 case SEEK_10:
4156 case TEST_UNIT_READY:
4157 break;
4158
4159 case SEND_DIAGNOSTIC:
4160 tmp8 = scsicmd[1] & ~(1 << 3);
4161 if (tmp8 != 0x4 || scsicmd[3] || scsicmd[4])
4162 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
4163 break;
4164
4165 case MAINTENANCE_IN:
4166 if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
4167 ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
4168 else
4169 ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
4170 break;
4171
4172
4173 default:
4174 ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
4175
4176 break;
4177 }
4178
4179 scsi_done(cmd);
4180 }
4181
4182 int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
4183 {
4184 int i, rc;
4185
4186 for (i = 0; i < host->n_ports; i++) {
4187 struct ata_port *ap = host->ports[i];
4188 struct Scsi_Host *shost;
4189
4190 rc = -ENOMEM;
4191 shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
4192 if (!shost)
4193 goto err_alloc;
4194
4195 shost->eh_noresume = 1;
4196 *(struct ata_port **)&shost->hostdata[0] = ap;
4197 ap->scsi_host = shost;
4198
4199 shost->transportt = ata_scsi_transport_template;
4200 shost->unique_id = ap->print_id;
4201 shost->max_id = 16;
4202 shost->max_lun = 1;
4203 shost->max_channel = 1;
4204 shost->max_cmd_len = 32;
4205
4206
4207
4208
4209
4210
4211 shost->max_host_blocked = 1;
4212
4213 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
4214 if (rc)
4215 goto err_alloc;
4216 }
4217
4218 return 0;
4219
4220 err_alloc:
4221 while (--i >= 0) {
4222 struct Scsi_Host *shost = host->ports[i]->scsi_host;
4223
4224
4225 scsi_remove_host(shost);
4226 }
4227 return rc;
4228 }
4229
4230 #ifdef CONFIG_OF
4231 static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
4232 {
4233 struct scsi_device *sdev = dev->sdev;
4234 struct device *d = ap->host->dev;
4235 struct device_node *np = d->of_node;
4236 struct device_node *child;
4237
4238 for_each_available_child_of_node(np, child) {
4239 int ret;
4240 u32 val;
4241
4242 ret = of_property_read_u32(child, "reg", &val);
4243 if (ret)
4244 continue;
4245 if (val == dev->devno) {
4246 dev_dbg(d, "found matching device node\n");
4247 sdev->sdev_gendev.of_node = child;
4248 return;
4249 }
4250 }
4251 }
4252 #else
4253 static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
4254 {
4255 }
4256 #endif
4257
4258 void ata_scsi_scan_host(struct ata_port *ap, int sync)
4259 {
4260 int tries = 5;
4261 struct ata_device *last_failed_dev = NULL;
4262 struct ata_link *link;
4263 struct ata_device *dev;
4264
4265 repeat:
4266 ata_for_each_link(link, ap, EDGE) {
4267 ata_for_each_dev(dev, link, ENABLED) {
4268 struct scsi_device *sdev;
4269 int channel = 0, id = 0;
4270
4271 if (dev->sdev)
4272 continue;
4273
4274 if (ata_is_host_link(link))
4275 id = dev->devno;
4276 else
4277 channel = link->pmp;
4278
4279 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
4280 NULL);
4281 if (!IS_ERR(sdev)) {
4282 dev->sdev = sdev;
4283 ata_scsi_assign_ofnode(dev, ap);
4284 scsi_device_put(sdev);
4285 } else {
4286 dev->sdev = NULL;
4287 }
4288 }
4289 }
4290
4291
4292
4293
4294
4295 ata_for_each_link(link, ap, EDGE) {
4296 ata_for_each_dev(dev, link, ENABLED) {
4297 if (!dev->sdev)
4298 goto exit_loop;
4299 }
4300 }
4301 exit_loop:
4302 if (!link)
4303 return;
4304
4305
4306 if (sync) {
4307
4308
4309
4310 if (dev != last_failed_dev) {
4311 msleep(100);
4312 last_failed_dev = dev;
4313 goto repeat;
4314 }
4315
4316
4317
4318
4319 if (--tries) {
4320 msleep(100);
4321 goto repeat;
4322 }
4323
4324 ata_port_err(ap,
4325 "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n");
4326 }
4327
4328 queue_delayed_work(system_long_wq, &ap->hotplug_task,
4329 round_jiffies_relative(HZ));
4330 }
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347 int ata_scsi_offline_dev(struct ata_device *dev)
4348 {
4349 if (dev->sdev) {
4350 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
4351 return 1;
4352 }
4353 return 0;
4354 }
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366 static void ata_scsi_remove_dev(struct ata_device *dev)
4367 {
4368 struct ata_port *ap = dev->link->ap;
4369 struct scsi_device *sdev;
4370 unsigned long flags;
4371
4372
4373
4374
4375
4376
4377
4378 mutex_lock(&ap->scsi_host->scan_mutex);
4379 spin_lock_irqsave(ap->lock, flags);
4380
4381
4382 sdev = dev->sdev;
4383 dev->sdev = NULL;
4384
4385 if (sdev) {
4386
4387
4388
4389
4390 if (scsi_device_get(sdev) == 0) {
4391
4392
4393
4394
4395
4396 scsi_device_set_state(sdev, SDEV_OFFLINE);
4397 } else {
4398 WARN_ON(1);
4399 sdev = NULL;
4400 }
4401 }
4402
4403 spin_unlock_irqrestore(ap->lock, flags);
4404 mutex_unlock(&ap->scsi_host->scan_mutex);
4405
4406 if (sdev) {
4407 ata_dev_info(dev, "detaching (SCSI %s)\n",
4408 dev_name(&sdev->sdev_gendev));
4409
4410 scsi_remove_device(sdev);
4411 scsi_device_put(sdev);
4412 }
4413 }
4414
4415 static void ata_scsi_handle_link_detach(struct ata_link *link)
4416 {
4417 struct ata_port *ap = link->ap;
4418 struct ata_device *dev;
4419
4420 ata_for_each_dev(dev, link, ALL) {
4421 unsigned long flags;
4422
4423 if (!(dev->flags & ATA_DFLAG_DETACHED))
4424 continue;
4425
4426 spin_lock_irqsave(ap->lock, flags);
4427 dev->flags &= ~ATA_DFLAG_DETACHED;
4428 spin_unlock_irqrestore(ap->lock, flags);
4429
4430 if (zpodd_dev_enabled(dev))
4431 zpodd_exit(dev);
4432
4433 ata_scsi_remove_dev(dev);
4434 }
4435 }
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447 void ata_scsi_media_change_notify(struct ata_device *dev)
4448 {
4449 if (dev->sdev)
4450 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
4451 GFP_ATOMIC);
4452 }
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466 void ata_scsi_hotplug(struct work_struct *work)
4467 {
4468 struct ata_port *ap =
4469 container_of(work, struct ata_port, hotplug_task.work);
4470 int i;
4471
4472 if (ap->pflags & ATA_PFLAG_UNLOADING)
4473 return;
4474
4475 mutex_lock(&ap->scsi_scan_mutex);
4476
4477
4478
4479
4480
4481 ata_scsi_handle_link_detach(&ap->link);
4482 if (ap->pmp_link)
4483 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
4484 ata_scsi_handle_link_detach(&ap->pmp_link[i]);
4485
4486
4487 ata_scsi_scan_host(ap, 0);
4488
4489 mutex_unlock(&ap->scsi_scan_mutex);
4490 }
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508 int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
4509 unsigned int id, u64 lun)
4510 {
4511 struct ata_port *ap = ata_shost_to_port(shost);
4512 unsigned long flags;
4513 int devno, rc = 0;
4514
4515 if (!ap->ops->error_handler)
4516 return -EOPNOTSUPP;
4517
4518 if (lun != SCAN_WILD_CARD && lun)
4519 return -EINVAL;
4520
4521 if (!sata_pmp_attached(ap)) {
4522 if (channel != SCAN_WILD_CARD && channel)
4523 return -EINVAL;
4524 devno = id;
4525 } else {
4526 if (id != SCAN_WILD_CARD && id)
4527 return -EINVAL;
4528 devno = channel;
4529 }
4530
4531 spin_lock_irqsave(ap->lock, flags);
4532
4533 if (devno == SCAN_WILD_CARD) {
4534 struct ata_link *link;
4535
4536 ata_for_each_link(link, ap, EDGE) {
4537 struct ata_eh_info *ehi = &link->eh_info;
4538 ehi->probe_mask |= ATA_ALL_DEVICES;
4539 ehi->action |= ATA_EH_RESET;
4540 }
4541 } else {
4542 struct ata_device *dev = ata_find_dev(ap, devno);
4543
4544 if (dev) {
4545 struct ata_eh_info *ehi = &dev->link->eh_info;
4546 ehi->probe_mask |= 1 << dev->devno;
4547 ehi->action |= ATA_EH_RESET;
4548 } else
4549 rc = -EINVAL;
4550 }
4551
4552 if (rc == 0) {
4553 ata_port_schedule_eh(ap);
4554 spin_unlock_irqrestore(ap->lock, flags);
4555 ata_port_wait_eh(ap);
4556 } else
4557 spin_unlock_irqrestore(ap->lock, flags);
4558
4559 return rc;
4560 }
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572 void ata_scsi_dev_rescan(struct work_struct *work)
4573 {
4574 struct ata_port *ap =
4575 container_of(work, struct ata_port, scsi_rescan_task);
4576 struct ata_link *link;
4577 struct ata_device *dev;
4578 unsigned long flags;
4579
4580 mutex_lock(&ap->scsi_scan_mutex);
4581 spin_lock_irqsave(ap->lock, flags);
4582
4583 ata_for_each_link(link, ap, EDGE) {
4584 ata_for_each_dev(dev, link, ENABLED) {
4585 struct scsi_device *sdev = dev->sdev;
4586
4587 if (!sdev)
4588 continue;
4589 if (scsi_device_get(sdev))
4590 continue;
4591
4592 spin_unlock_irqrestore(ap->lock, flags);
4593 scsi_rescan_device(&(sdev->sdev_gendev));
4594 scsi_device_put(sdev);
4595 spin_lock_irqsave(ap->lock, flags);
4596 }
4597 }
4598
4599 spin_unlock_irqrestore(ap->lock, flags);
4600 mutex_unlock(&ap->scsi_scan_mutex);
4601 }