0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/kernel.h>
0016 #include <linux/gfp.h>
0017 #include <linux/pci.h>
0018 #include <linux/module.h>
0019 #include <linux/libata.h>
0020 #include <linux/highmem.h>
0021 #include <trace/events/libata.h>
0022 #include "libata.h"
0023
0024 static struct workqueue_struct *ata_sff_wq;
0025
0026 const struct ata_port_operations ata_sff_port_ops = {
0027 .inherits = &ata_base_port_ops,
0028
0029 .qc_prep = ata_noop_qc_prep,
0030 .qc_issue = ata_sff_qc_issue,
0031 .qc_fill_rtf = ata_sff_qc_fill_rtf,
0032
0033 .freeze = ata_sff_freeze,
0034 .thaw = ata_sff_thaw,
0035 .prereset = ata_sff_prereset,
0036 .softreset = ata_sff_softreset,
0037 .hardreset = sata_sff_hardreset,
0038 .postreset = ata_sff_postreset,
0039 .error_handler = ata_sff_error_handler,
0040
0041 .sff_dev_select = ata_sff_dev_select,
0042 .sff_check_status = ata_sff_check_status,
0043 .sff_tf_load = ata_sff_tf_load,
0044 .sff_tf_read = ata_sff_tf_read,
0045 .sff_exec_command = ata_sff_exec_command,
0046 .sff_data_xfer = ata_sff_data_xfer,
0047 .sff_drain_fifo = ata_sff_drain_fifo,
0048
0049 .lost_interrupt = ata_sff_lost_interrupt,
0050 };
0051 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 u8 ata_sff_check_status(struct ata_port *ap)
0065 {
0066 return ioread8(ap->ioaddr.status_addr);
0067 }
0068 EXPORT_SYMBOL_GPL(ata_sff_check_status);
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)
0085 {
0086 u8 tmp;
0087
0088 if (ap->ops->sff_check_altstatus) {
0089 tmp = ap->ops->sff_check_altstatus(ap);
0090 goto read;
0091 }
0092 if (ap->ioaddr.altstatus_addr) {
0093 tmp = ioread8(ap->ioaddr.altstatus_addr);
0094 goto read;
0095 }
0096 return false;
0097
0098 read:
0099 if (status)
0100 *status = tmp;
0101 return true;
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 static u8 ata_sff_irq_status(struct ata_port *ap)
0117 {
0118 u8 status;
0119
0120
0121 if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))
0122 return status;
0123
0124 status = ap->ops->sff_check_status(ap);
0125 return status;
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 static void ata_sff_sync(struct ata_port *ap)
0141 {
0142 ata_sff_altstatus(ap, NULL);
0143 }
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 void ata_sff_pause(struct ata_port *ap)
0158 {
0159 ata_sff_sync(ap);
0160 ndelay(400);
0161 }
0162 EXPORT_SYMBOL_GPL(ata_sff_pause);
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 void ata_sff_dma_pause(struct ata_port *ap)
0173 {
0174
0175
0176
0177
0178 if (ata_sff_altstatus(ap, NULL))
0179 return;
0180
0181
0182
0183 BUG();
0184 }
0185 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 int ata_sff_busy_sleep(struct ata_port *ap,
0203 unsigned long tmout_pat, unsigned long tmout)
0204 {
0205 unsigned long timer_start, timeout;
0206 u8 status;
0207
0208 status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
0209 timer_start = jiffies;
0210 timeout = ata_deadline(timer_start, tmout_pat);
0211 while (status != 0xff && (status & ATA_BUSY) &&
0212 time_before(jiffies, timeout)) {
0213 ata_msleep(ap, 50);
0214 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
0215 }
0216
0217 if (status != 0xff && (status & ATA_BUSY))
0218 ata_port_warn(ap,
0219 "port is slow to respond, please be patient (Status 0x%x)\n",
0220 status);
0221
0222 timeout = ata_deadline(timer_start, tmout);
0223 while (status != 0xff && (status & ATA_BUSY) &&
0224 time_before(jiffies, timeout)) {
0225 ata_msleep(ap, 50);
0226 status = ap->ops->sff_check_status(ap);
0227 }
0228
0229 if (status == 0xff)
0230 return -ENODEV;
0231
0232 if (status & ATA_BUSY) {
0233 ata_port_err(ap,
0234 "port failed to respond (%lu secs, Status 0x%x)\n",
0235 DIV_ROUND_UP(tmout, 1000), status);
0236 return -EBUSY;
0237 }
0238
0239 return 0;
0240 }
0241 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
0242
0243 static int ata_sff_check_ready(struct ata_link *link)
0244 {
0245 u8 status = link->ap->ops->sff_check_status(link->ap);
0246
0247 return ata_check_ready(status);
0248 }
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
0265 {
0266 return ata_wait_ready(link, deadline, ata_sff_check_ready);
0267 }
0268 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
0284 {
0285 if (ap->ops->sff_set_devctl) {
0286 ap->ops->sff_set_devctl(ap, ctl);
0287 return true;
0288 }
0289 if (ap->ioaddr.ctl_addr) {
0290 iowrite8(ctl, ap->ioaddr.ctl_addr);
0291 return true;
0292 }
0293
0294 return false;
0295 }
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
0312 {
0313 u8 tmp;
0314
0315 if (device == 0)
0316 tmp = ATA_DEVICE_OBS;
0317 else
0318 tmp = ATA_DEVICE_OBS | ATA_DEV1;
0319
0320 iowrite8(tmp, ap->ioaddr.device_addr);
0321 ata_sff_pause(ap);
0322 }
0323 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343 static void ata_dev_select(struct ata_port *ap, unsigned int device,
0344 unsigned int wait, unsigned int can_sleep)
0345 {
0346 if (wait)
0347 ata_wait_idle(ap);
0348
0349 ap->ops->sff_dev_select(ap, device);
0350
0351 if (wait) {
0352 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
0353 ata_msleep(ap, 150);
0354 ata_wait_idle(ap);
0355 }
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 void ata_sff_irq_on(struct ata_port *ap)
0372 {
0373 if (ap->ops->sff_irq_on) {
0374 ap->ops->sff_irq_on(ap);
0375 return;
0376 }
0377
0378 ap->ctl &= ~ATA_NIEN;
0379 ap->last_ctl = ap->ctl;
0380
0381 ata_sff_set_devctl(ap, ap->ctl);
0382 ata_wait_idle(ap);
0383
0384 if (ap->ops->sff_irq_clear)
0385 ap->ops->sff_irq_clear(ap);
0386 }
0387 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
0400 {
0401 struct ata_ioports *ioaddr = &ap->ioaddr;
0402 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
0403
0404 if (tf->ctl != ap->last_ctl) {
0405 if (ioaddr->ctl_addr)
0406 iowrite8(tf->ctl, ioaddr->ctl_addr);
0407 ap->last_ctl = tf->ctl;
0408 ata_wait_idle(ap);
0409 }
0410
0411 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
0412 WARN_ON_ONCE(!ioaddr->ctl_addr);
0413 iowrite8(tf->hob_feature, ioaddr->feature_addr);
0414 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
0415 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
0416 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
0417 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
0418 }
0419
0420 if (is_addr) {
0421 iowrite8(tf->feature, ioaddr->feature_addr);
0422 iowrite8(tf->nsect, ioaddr->nsect_addr);
0423 iowrite8(tf->lbal, ioaddr->lbal_addr);
0424 iowrite8(tf->lbam, ioaddr->lbam_addr);
0425 iowrite8(tf->lbah, ioaddr->lbah_addr);
0426 }
0427
0428 if (tf->flags & ATA_TFLAG_DEVICE)
0429 iowrite8(tf->device, ioaddr->device_addr);
0430
0431 ata_wait_idle(ap);
0432 }
0433 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
0449 {
0450 struct ata_ioports *ioaddr = &ap->ioaddr;
0451
0452 tf->status = ata_sff_check_status(ap);
0453 tf->error = ioread8(ioaddr->error_addr);
0454 tf->nsect = ioread8(ioaddr->nsect_addr);
0455 tf->lbal = ioread8(ioaddr->lbal_addr);
0456 tf->lbam = ioread8(ioaddr->lbam_addr);
0457 tf->lbah = ioread8(ioaddr->lbah_addr);
0458 tf->device = ioread8(ioaddr->device_addr);
0459
0460 if (tf->flags & ATA_TFLAG_LBA48) {
0461 if (likely(ioaddr->ctl_addr)) {
0462 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
0463 tf->hob_feature = ioread8(ioaddr->error_addr);
0464 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
0465 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
0466 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
0467 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
0468 iowrite8(tf->ctl, ioaddr->ctl_addr);
0469 ap->last_ctl = tf->ctl;
0470 } else
0471 WARN_ON_ONCE(1);
0472 }
0473 }
0474 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
0488 {
0489 iowrite8(tf->command, ap->ioaddr.command_addr);
0490 ata_sff_pause(ap);
0491 }
0492 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507 static inline void ata_tf_to_host(struct ata_port *ap,
0508 const struct ata_taskfile *tf,
0509 unsigned int tag)
0510 {
0511 trace_ata_tf_load(ap, tf);
0512 ap->ops->sff_tf_load(ap, tf);
0513 trace_ata_exec_command(ap, tf, tag);
0514 ap->ops->sff_exec_command(ap, tf);
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532 unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
0533 unsigned int buflen, int rw)
0534 {
0535 struct ata_port *ap = qc->dev->link->ap;
0536 void __iomem *data_addr = ap->ioaddr.data_addr;
0537 unsigned int words = buflen >> 1;
0538
0539
0540 if (rw == READ)
0541 ioread16_rep(data_addr, buf, words);
0542 else
0543 iowrite16_rep(data_addr, buf, words);
0544
0545
0546 if (unlikely(buflen & 0x01)) {
0547 unsigned char pad[2] = { };
0548
0549
0550 buf += buflen - 1;
0551
0552
0553
0554
0555
0556 if (rw == READ) {
0557 ioread16_rep(data_addr, pad, 1);
0558 *buf = pad[0];
0559 } else {
0560 pad[0] = *buf;
0561 iowrite16_rep(data_addr, pad, 1);
0562 }
0563 words++;
0564 }
0565
0566 return words << 1;
0567 }
0568 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587 unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
0588 unsigned int buflen, int rw)
0589 {
0590 struct ata_device *dev = qc->dev;
0591 struct ata_port *ap = dev->link->ap;
0592 void __iomem *data_addr = ap->ioaddr.data_addr;
0593 unsigned int words = buflen >> 2;
0594 int slop = buflen & 3;
0595
0596 if (!(ap->pflags & ATA_PFLAG_PIO32))
0597 return ata_sff_data_xfer(qc, buf, buflen, rw);
0598
0599
0600 if (rw == READ)
0601 ioread32_rep(data_addr, buf, words);
0602 else
0603 iowrite32_rep(data_addr, buf, words);
0604
0605
0606 if (unlikely(slop)) {
0607 unsigned char pad[4] = { };
0608
0609
0610 buf += buflen - slop;
0611
0612
0613
0614
0615
0616 if (rw == READ) {
0617 if (slop < 3)
0618 ioread16_rep(data_addr, pad, 1);
0619 else
0620 ioread32_rep(data_addr, pad, 1);
0621 memcpy(buf, pad, slop);
0622 } else {
0623 memcpy(pad, buf, slop);
0624 if (slop < 3)
0625 iowrite16_rep(data_addr, pad, 1);
0626 else
0627 iowrite32_rep(data_addr, pad, 1);
0628 }
0629 }
0630 return (buflen + 1) & ~1;
0631 }
0632 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
0633
0634 static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
0635 unsigned int offset, size_t xfer_size)
0636 {
0637 bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
0638 unsigned char *buf;
0639
0640 buf = kmap_atomic(page);
0641 qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
0642 kunmap_atomic(buf);
0643
0644 if (!do_write && !PageSlab(page))
0645 flush_dcache_page(page);
0646 }
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 static void ata_pio_sector(struct ata_queued_cmd *qc)
0658 {
0659 struct ata_port *ap = qc->ap;
0660 struct page *page;
0661 unsigned int offset;
0662
0663 if (!qc->cursg) {
0664 qc->curbytes = qc->nbytes;
0665 return;
0666 }
0667 if (qc->curbytes == qc->nbytes - qc->sect_size)
0668 ap->hsm_task_state = HSM_ST_LAST;
0669
0670 page = sg_page(qc->cursg);
0671 offset = qc->cursg->offset + qc->cursg_ofs;
0672
0673
0674 page = nth_page(page, (offset >> PAGE_SHIFT));
0675 offset %= PAGE_SIZE;
0676
0677 trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
0678
0679
0680
0681
0682
0683 WARN_ON_ONCE(offset % 4);
0684 if (offset + qc->sect_size > PAGE_SIZE) {
0685 unsigned int split_len = PAGE_SIZE - offset;
0686
0687 ata_pio_xfer(qc, page, offset, split_len);
0688 ata_pio_xfer(qc, nth_page(page, 1), 0,
0689 qc->sect_size - split_len);
0690 } else {
0691 ata_pio_xfer(qc, page, offset, qc->sect_size);
0692 }
0693
0694 qc->curbytes += qc->sect_size;
0695 qc->cursg_ofs += qc->sect_size;
0696
0697 if (qc->cursg_ofs == qc->cursg->length) {
0698 qc->cursg = sg_next(qc->cursg);
0699 if (!qc->cursg)
0700 ap->hsm_task_state = HSM_ST_LAST;
0701 qc->cursg_ofs = 0;
0702 }
0703 }
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715 static void ata_pio_sectors(struct ata_queued_cmd *qc)
0716 {
0717 if (is_multi_taskfile(&qc->tf)) {
0718
0719 unsigned int nsect;
0720
0721 WARN_ON_ONCE(qc->dev->multi_count == 0);
0722
0723 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
0724 qc->dev->multi_count);
0725 while (nsect--)
0726 ata_pio_sector(qc);
0727 } else
0728 ata_pio_sector(qc);
0729
0730 ata_sff_sync(qc->ap);
0731 }
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
0745 {
0746
0747 trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
0748 WARN_ON_ONCE(qc->dev->cdb_len < 12);
0749
0750 ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
0751 ata_sff_sync(ap);
0752
0753
0754 switch (qc->tf.protocol) {
0755 case ATAPI_PROT_PIO:
0756 ap->hsm_task_state = HSM_ST;
0757 break;
0758 case ATAPI_PROT_NODATA:
0759 ap->hsm_task_state = HSM_ST_LAST;
0760 break;
0761 #ifdef CONFIG_ATA_BMDMA
0762 case ATAPI_PROT_DMA:
0763 ap->hsm_task_state = HSM_ST_LAST;
0764
0765 trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
0766 ap->ops->bmdma_start(qc);
0767 break;
0768 #endif
0769 default:
0770 BUG();
0771 }
0772 }
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
0786 {
0787 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
0788 struct ata_port *ap = qc->ap;
0789 struct ata_device *dev = qc->dev;
0790 struct ata_eh_info *ehi = &dev->link->eh_info;
0791 struct scatterlist *sg;
0792 struct page *page;
0793 unsigned char *buf;
0794 unsigned int offset, count, consumed;
0795
0796 next_sg:
0797 sg = qc->cursg;
0798 if (unlikely(!sg)) {
0799 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
0800 "buf=%u cur=%u bytes=%u",
0801 qc->nbytes, qc->curbytes, bytes);
0802 return -1;
0803 }
0804
0805 page = sg_page(sg);
0806 offset = sg->offset + qc->cursg_ofs;
0807
0808
0809 page = nth_page(page, (offset >> PAGE_SHIFT));
0810 offset %= PAGE_SIZE;
0811
0812
0813 count = min(sg->length - qc->cursg_ofs, bytes);
0814
0815
0816 count = min(count, (unsigned int)PAGE_SIZE - offset);
0817
0818 trace_atapi_pio_transfer_data(qc, offset, count);
0819
0820
0821 buf = kmap_atomic(page);
0822 consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
0823 kunmap_atomic(buf);
0824
0825 bytes -= min(bytes, consumed);
0826 qc->curbytes += count;
0827 qc->cursg_ofs += count;
0828
0829 if (qc->cursg_ofs == sg->length) {
0830 qc->cursg = sg_next(qc->cursg);
0831 qc->cursg_ofs = 0;
0832 }
0833
0834
0835
0836
0837
0838
0839
0840 if (bytes)
0841 goto next_sg;
0842 return 0;
0843 }
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
0855 {
0856 struct ata_port *ap = qc->ap;
0857 struct ata_device *dev = qc->dev;
0858 struct ata_eh_info *ehi = &dev->link->eh_info;
0859 unsigned int ireason, bc_lo, bc_hi, bytes;
0860 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
0861
0862
0863
0864
0865
0866
0867
0868 ap->ops->sff_tf_read(ap, &qc->result_tf);
0869 ireason = qc->result_tf.nsect;
0870 bc_lo = qc->result_tf.lbam;
0871 bc_hi = qc->result_tf.lbah;
0872 bytes = (bc_hi << 8) | bc_lo;
0873
0874
0875 if (unlikely(ireason & ATAPI_COD))
0876 goto atapi_check;
0877
0878
0879 i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
0880 if (unlikely(do_write != i_write))
0881 goto atapi_check;
0882
0883 if (unlikely(!bytes))
0884 goto atapi_check;
0885
0886 if (unlikely(__atapi_pio_bytes(qc, bytes)))
0887 goto err_out;
0888 ata_sff_sync(ap);
0889
0890 return;
0891
0892 atapi_check:
0893 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
0894 ireason, bytes);
0895 err_out:
0896 qc->err_mask |= AC_ERR_HSM;
0897 ap->hsm_task_state = HSM_ST_ERR;
0898 }
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
0909 struct ata_queued_cmd *qc)
0910 {
0911 if (qc->tf.flags & ATA_TFLAG_POLLING)
0912 return 1;
0913
0914 if (ap->hsm_task_state == HSM_ST_FIRST) {
0915 if (qc->tf.protocol == ATA_PROT_PIO &&
0916 (qc->tf.flags & ATA_TFLAG_WRITE))
0917 return 1;
0918
0919 if (ata_is_atapi(qc->tf.protocol) &&
0920 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
0921 return 1;
0922 }
0923
0924 return 0;
0925 }
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
0939 {
0940 struct ata_port *ap = qc->ap;
0941
0942 if (ap->ops->error_handler) {
0943 if (in_wq) {
0944
0945
0946
0947 qc = ata_qc_from_tag(ap, qc->tag);
0948 if (qc) {
0949 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
0950 ata_sff_irq_on(ap);
0951 ata_qc_complete(qc);
0952 } else
0953 ata_port_freeze(ap);
0954 }
0955 } else {
0956 if (likely(!(qc->err_mask & AC_ERR_HSM)))
0957 ata_qc_complete(qc);
0958 else
0959 ata_port_freeze(ap);
0960 }
0961 } else {
0962 if (in_wq) {
0963 ata_sff_irq_on(ap);
0964 ata_qc_complete(qc);
0965 } else
0966 ata_qc_complete(qc);
0967 }
0968 }
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
0981 u8 status, int in_wq)
0982 {
0983 struct ata_link *link = qc->dev->link;
0984 struct ata_eh_info *ehi = &link->eh_info;
0985 int poll_next;
0986
0987 lockdep_assert_held(ap->lock);
0988
0989 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
0990
0991
0992
0993
0994
0995 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
0996
0997 fsm_start:
0998 trace_ata_sff_hsm_state(qc, status);
0999
1000 switch (ap->hsm_task_state) {
1001 case HSM_ST_FIRST:
1002
1003
1004
1005
1006
1007
1008 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1009
1010
1011 if (unlikely((status & ATA_DRQ) == 0)) {
1012
1013 if (likely(status & (ATA_ERR | ATA_DF)))
1014
1015 qc->err_mask |= AC_ERR_DEV;
1016 else {
1017
1018 ata_ehi_push_desc(ehi,
1019 "ST_FIRST: !(DRQ|ERR|DF)");
1020 qc->err_mask |= AC_ERR_HSM;
1021 }
1022
1023 ap->hsm_task_state = HSM_ST_ERR;
1024 goto fsm_start;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1034
1035
1036
1037
1038
1039 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1040 ata_ehi_push_desc(ehi, "ST_FIRST: "
1041 "DRQ=1 with device error, "
1042 "dev_stat 0x%X", status);
1043 qc->err_mask |= AC_ERR_HSM;
1044 ap->hsm_task_state = HSM_ST_ERR;
1045 goto fsm_start;
1046 }
1047 }
1048
1049 if (qc->tf.protocol == ATA_PROT_PIO) {
1050
1051
1052
1053
1054
1055
1056
1057
1058 ap->hsm_task_state = HSM_ST;
1059 ata_pio_sectors(qc);
1060 } else
1061
1062 atapi_send_cdb(ap, qc);
1063
1064
1065
1066
1067 break;
1068
1069 case HSM_ST:
1070
1071 if (qc->tf.protocol == ATAPI_PROT_PIO) {
1072
1073 if ((status & ATA_DRQ) == 0) {
1074
1075
1076
1077 ap->hsm_task_state = HSM_ST_LAST;
1078 goto fsm_start;
1079 }
1080
1081
1082
1083
1084
1085
1086
1087 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1088 ata_ehi_push_desc(ehi, "ST-ATAPI: "
1089 "DRQ=1 with device error, "
1090 "dev_stat 0x%X", status);
1091 qc->err_mask |= AC_ERR_HSM;
1092 ap->hsm_task_state = HSM_ST_ERR;
1093 goto fsm_start;
1094 }
1095
1096 atapi_pio_bytes(qc);
1097
1098 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1099
1100 goto fsm_start;
1101
1102 } else {
1103
1104 if (unlikely((status & ATA_DRQ) == 0)) {
1105
1106 if (likely(status & (ATA_ERR | ATA_DF))) {
1107
1108 qc->err_mask |= AC_ERR_DEV;
1109
1110
1111
1112
1113
1114 if (qc->dev->horkage &
1115 ATA_HORKAGE_DIAGNOSTIC)
1116 qc->err_mask |=
1117 AC_ERR_NODEV_HINT;
1118 } else {
1119
1120
1121
1122
1123 ata_ehi_push_desc(ehi, "ST-ATA: "
1124 "DRQ=0 without device error, "
1125 "dev_stat 0x%X", status);
1126 qc->err_mask |= AC_ERR_HSM |
1127 AC_ERR_NODEV_HINT;
1128 }
1129
1130 ap->hsm_task_state = HSM_ST_ERR;
1131 goto fsm_start;
1132 }
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1145
1146 qc->err_mask |= AC_ERR_DEV;
1147
1148 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1149 ata_pio_sectors(qc);
1150 status = ata_wait_idle(ap);
1151 }
1152
1153 if (status & (ATA_BUSY | ATA_DRQ)) {
1154 ata_ehi_push_desc(ehi, "ST-ATA: "
1155 "BUSY|DRQ persists on ERR|DF, "
1156 "dev_stat 0x%X", status);
1157 qc->err_mask |= AC_ERR_HSM;
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167 if (status == 0x7f)
1168 qc->err_mask |= AC_ERR_NODEV_HINT;
1169
1170
1171
1172
1173
1174 ap->hsm_task_state = HSM_ST_ERR;
1175 goto fsm_start;
1176 }
1177
1178 ata_pio_sectors(qc);
1179
1180 if (ap->hsm_task_state == HSM_ST_LAST &&
1181 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1182
1183 status = ata_wait_idle(ap);
1184 goto fsm_start;
1185 }
1186 }
1187
1188 poll_next = 1;
1189 break;
1190
1191 case HSM_ST_LAST:
1192 if (unlikely(!ata_ok(status))) {
1193 qc->err_mask |= __ac_err_mask(status);
1194 ap->hsm_task_state = HSM_ST_ERR;
1195 goto fsm_start;
1196 }
1197
1198
1199 trace_ata_sff_hsm_command_complete(qc, status);
1200
1201 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1202
1203 ap->hsm_task_state = HSM_ST_IDLE;
1204
1205
1206 ata_hsm_qc_complete(qc, in_wq);
1207
1208 poll_next = 0;
1209 break;
1210
1211 case HSM_ST_ERR:
1212 ap->hsm_task_state = HSM_ST_IDLE;
1213
1214
1215 ata_hsm_qc_complete(qc, in_wq);
1216
1217 poll_next = 0;
1218 break;
1219 default:
1220 poll_next = 0;
1221 WARN(true, "ata%d: SFF host state machine in invalid state %d",
1222 ap->print_id, ap->hsm_task_state);
1223 }
1224
1225 return poll_next;
1226 }
1227 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1228
1229 void ata_sff_queue_work(struct work_struct *work)
1230 {
1231 queue_work(ata_sff_wq, work);
1232 }
1233 EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1234
1235 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1236 {
1237 queue_delayed_work(ata_sff_wq, dwork, delay);
1238 }
1239 EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1240
1241 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1242 {
1243 struct ata_port *ap = link->ap;
1244
1245 WARN_ON((ap->sff_pio_task_link != NULL) &&
1246 (ap->sff_pio_task_link != link));
1247 ap->sff_pio_task_link = link;
1248
1249
1250 ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1251 }
1252 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1253
1254 void ata_sff_flush_pio_task(struct ata_port *ap)
1255 {
1256 trace_ata_sff_flush_pio_task(ap);
1257
1258 cancel_delayed_work_sync(&ap->sff_pio_task);
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 spin_lock_irq(ap->lock);
1269 ap->hsm_task_state = HSM_ST_IDLE;
1270 spin_unlock_irq(ap->lock);
1271
1272 ap->sff_pio_task_link = NULL;
1273 }
1274
1275 static void ata_sff_pio_task(struct work_struct *work)
1276 {
1277 struct ata_port *ap =
1278 container_of(work, struct ata_port, sff_pio_task.work);
1279 struct ata_link *link = ap->sff_pio_task_link;
1280 struct ata_queued_cmd *qc;
1281 u8 status;
1282 int poll_next;
1283
1284 spin_lock_irq(ap->lock);
1285
1286 BUG_ON(ap->sff_pio_task_link == NULL);
1287
1288 qc = ata_qc_from_tag(ap, link->active_tag);
1289 if (!qc) {
1290 ap->sff_pio_task_link = NULL;
1291 goto out_unlock;
1292 }
1293
1294 fsm_start:
1295 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1296
1297
1298
1299
1300
1301
1302
1303
1304 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1305 if (status & ATA_BUSY) {
1306 spin_unlock_irq(ap->lock);
1307 ata_msleep(ap, 2);
1308 spin_lock_irq(ap->lock);
1309
1310 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1311 if (status & ATA_BUSY) {
1312 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1313 goto out_unlock;
1314 }
1315 }
1316
1317
1318
1319
1320
1321 ap->sff_pio_task_link = NULL;
1322
1323 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1324
1325
1326
1327
1328 if (poll_next)
1329 goto fsm_start;
1330 out_unlock:
1331 spin_unlock_irq(ap->lock);
1332 }
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1348 {
1349 struct ata_port *ap = qc->ap;
1350 struct ata_link *link = qc->dev->link;
1351
1352
1353
1354
1355 if (ap->flags & ATA_FLAG_PIO_POLLING)
1356 qc->tf.flags |= ATA_TFLAG_POLLING;
1357
1358
1359 ata_dev_select(ap, qc->dev->devno, 1, 0);
1360
1361
1362 switch (qc->tf.protocol) {
1363 case ATA_PROT_NODATA:
1364 if (qc->tf.flags & ATA_TFLAG_POLLING)
1365 ata_qc_set_polling(qc);
1366
1367 ata_tf_to_host(ap, &qc->tf, qc->tag);
1368 ap->hsm_task_state = HSM_ST_LAST;
1369
1370 if (qc->tf.flags & ATA_TFLAG_POLLING)
1371 ata_sff_queue_pio_task(link, 0);
1372
1373 break;
1374
1375 case ATA_PROT_PIO:
1376 if (qc->tf.flags & ATA_TFLAG_POLLING)
1377 ata_qc_set_polling(qc);
1378
1379 ata_tf_to_host(ap, &qc->tf, qc->tag);
1380
1381 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1382
1383 ap->hsm_task_state = HSM_ST_FIRST;
1384 ata_sff_queue_pio_task(link, 0);
1385
1386
1387
1388
1389 } else {
1390
1391 ap->hsm_task_state = HSM_ST;
1392
1393 if (qc->tf.flags & ATA_TFLAG_POLLING)
1394 ata_sff_queue_pio_task(link, 0);
1395
1396
1397
1398
1399
1400 }
1401
1402 break;
1403
1404 case ATAPI_PROT_PIO:
1405 case ATAPI_PROT_NODATA:
1406 if (qc->tf.flags & ATA_TFLAG_POLLING)
1407 ata_qc_set_polling(qc);
1408
1409 ata_tf_to_host(ap, &qc->tf, qc->tag);
1410
1411 ap->hsm_task_state = HSM_ST_FIRST;
1412
1413
1414 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1415 (qc->tf.flags & ATA_TFLAG_POLLING))
1416 ata_sff_queue_pio_task(link, 0);
1417 break;
1418
1419 default:
1420 return AC_ERR_SYSTEM;
1421 }
1422
1423 return 0;
1424 }
1425 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1441 {
1442 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1443 return true;
1444 }
1445 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1446
1447 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1448 {
1449 ap->stats.idle_irq++;
1450
1451 #ifdef ATA_IRQ_TRAP
1452 if ((ap->stats.idle_irq % 1000) == 0) {
1453 ap->ops->sff_check_status(ap);
1454 if (ap->ops->sff_irq_clear)
1455 ap->ops->sff_irq_clear(ap);
1456 ata_port_warn(ap, "irq trap\n");
1457 return 1;
1458 }
1459 #endif
1460 return 0;
1461 }
1462
1463 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1464 struct ata_queued_cmd *qc,
1465 bool hsmv_on_idle)
1466 {
1467 u8 status;
1468
1469 trace_ata_sff_port_intr(qc, hsmv_on_idle);
1470
1471
1472 switch (ap->hsm_task_state) {
1473 case HSM_ST_FIRST:
1474
1475
1476
1477
1478
1479
1480
1481
1482 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1483 return ata_sff_idle_irq(ap);
1484 break;
1485 case HSM_ST_IDLE:
1486 return ata_sff_idle_irq(ap);
1487 default:
1488 break;
1489 }
1490
1491
1492 status = ata_sff_irq_status(ap);
1493 if (status & ATA_BUSY) {
1494 if (hsmv_on_idle) {
1495
1496 qc->err_mask |= AC_ERR_HSM;
1497 ap->hsm_task_state = HSM_ST_ERR;
1498 } else
1499 return ata_sff_idle_irq(ap);
1500 }
1501
1502
1503 if (ap->ops->sff_irq_clear)
1504 ap->ops->sff_irq_clear(ap);
1505
1506 ata_sff_hsm_move(ap, qc, status, 0);
1507
1508 return 1;
1509 }
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1525 {
1526 return __ata_sff_port_intr(ap, qc, false);
1527 }
1528 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1529
1530 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1531 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1532 {
1533 struct ata_host *host = dev_instance;
1534 bool retried = false;
1535 unsigned int i;
1536 unsigned int handled, idle, polling;
1537 unsigned long flags;
1538
1539
1540 spin_lock_irqsave(&host->lock, flags);
1541
1542 retry:
1543 handled = idle = polling = 0;
1544 for (i = 0; i < host->n_ports; i++) {
1545 struct ata_port *ap = host->ports[i];
1546 struct ata_queued_cmd *qc;
1547
1548 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1549 if (qc) {
1550 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1551 handled |= port_intr(ap, qc);
1552 else
1553 polling |= 1 << i;
1554 } else
1555 idle |= 1 << i;
1556 }
1557
1558
1559
1560
1561
1562
1563 if (!handled && !retried) {
1564 bool retry = false;
1565
1566 for (i = 0; i < host->n_ports; i++) {
1567 struct ata_port *ap = host->ports[i];
1568
1569 if (polling & (1 << i))
1570 continue;
1571
1572 if (!ap->ops->sff_irq_check ||
1573 !ap->ops->sff_irq_check(ap))
1574 continue;
1575
1576 if (idle & (1 << i)) {
1577 ap->ops->sff_check_status(ap);
1578 if (ap->ops->sff_irq_clear)
1579 ap->ops->sff_irq_clear(ap);
1580 } else {
1581
1582 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1583 retry |= true;
1584
1585
1586
1587
1588 }
1589 }
1590
1591 if (retry) {
1592 retried = true;
1593 goto retry;
1594 }
1595 }
1596
1597 spin_unlock_irqrestore(&host->lock, flags);
1598
1599 return IRQ_RETVAL(handled);
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1617 {
1618 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1619 }
1620 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 void ata_sff_lost_interrupt(struct ata_port *ap)
1636 {
1637 u8 status = 0;
1638 struct ata_queued_cmd *qc;
1639
1640
1641 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1642
1643 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1644 return;
1645
1646
1647 if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))
1648 return;
1649 if (status & ATA_BUSY)
1650 return;
1651
1652
1653
1654 ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);
1655
1656
1657 ata_sff_port_intr(ap, qc);
1658 }
1659 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670 void ata_sff_freeze(struct ata_port *ap)
1671 {
1672 ap->ctl |= ATA_NIEN;
1673 ap->last_ctl = ap->ctl;
1674
1675 ata_sff_set_devctl(ap, ap->ctl);
1676
1677
1678
1679
1680
1681 ap->ops->sff_check_status(ap);
1682
1683 if (ap->ops->sff_irq_clear)
1684 ap->ops->sff_irq_clear(ap);
1685 }
1686 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 void ata_sff_thaw(struct ata_port *ap)
1698 {
1699
1700 ap->ops->sff_check_status(ap);
1701 if (ap->ops->sff_irq_clear)
1702 ap->ops->sff_irq_clear(ap);
1703 ata_sff_irq_on(ap);
1704 }
1705 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1723 {
1724 struct ata_eh_context *ehc = &link->eh_context;
1725 int rc;
1726
1727
1728 ata_std_prereset(link, deadline);
1729
1730
1731 if (ehc->i.action & ATA_EH_HARDRESET)
1732 return 0;
1733
1734
1735 if (!ata_link_offline(link)) {
1736 rc = ata_sff_wait_ready(link, deadline);
1737 if (rc && rc != -ENODEV) {
1738 ata_link_warn(link,
1739 "device not ready (errno=%d), forcing hardreset\n",
1740 rc);
1741 ehc->i.action |= ATA_EH_HARDRESET;
1742 }
1743 }
1744
1745 return 0;
1746 }
1747 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769 static bool ata_devchk(struct ata_port *ap, unsigned int device)
1770 {
1771 struct ata_ioports *ioaddr = &ap->ioaddr;
1772 u8 nsect, lbal;
1773
1774 ap->ops->sff_dev_select(ap, device);
1775
1776 iowrite8(0x55, ioaddr->nsect_addr);
1777 iowrite8(0xaa, ioaddr->lbal_addr);
1778
1779 iowrite8(0xaa, ioaddr->nsect_addr);
1780 iowrite8(0x55, ioaddr->lbal_addr);
1781
1782 iowrite8(0x55, ioaddr->nsect_addr);
1783 iowrite8(0xaa, ioaddr->lbal_addr);
1784
1785 nsect = ioread8(ioaddr->nsect_addr);
1786 lbal = ioread8(ioaddr->lbal_addr);
1787
1788 if ((nsect == 0x55) && (lbal == 0xaa))
1789 return true;
1790
1791 return false;
1792 }
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1816 u8 *r_err)
1817 {
1818 struct ata_port *ap = dev->link->ap;
1819 struct ata_taskfile tf;
1820 unsigned int class;
1821 u8 err;
1822
1823 ap->ops->sff_dev_select(ap, dev->devno);
1824
1825 memset(&tf, 0, sizeof(tf));
1826
1827 ap->ops->sff_tf_read(ap, &tf);
1828 err = tf.error;
1829 if (r_err)
1830 *r_err = err;
1831
1832
1833 if (err == 0)
1834
1835 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1836 else if (err == 1)
1837 ;
1838 else if ((dev->devno == 0) && (err == 0x81))
1839 ;
1840 else
1841 return ATA_DEV_NONE;
1842
1843
1844 class = ata_port_classify(ap, &tf);
1845 switch (class) {
1846 case ATA_DEV_UNKNOWN:
1847
1848
1849
1850
1851
1852
1853
1854 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1855 class = ATA_DEV_ATA;
1856 else
1857 class = ATA_DEV_NONE;
1858 break;
1859 case ATA_DEV_ATA:
1860 if (ap->ops->sff_check_status(ap) == 0)
1861 class = ATA_DEV_NONE;
1862 break;
1863 }
1864 return class;
1865 }
1866 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1886 unsigned long deadline)
1887 {
1888 struct ata_port *ap = link->ap;
1889 struct ata_ioports *ioaddr = &ap->ioaddr;
1890 unsigned int dev0 = devmask & (1 << 0);
1891 unsigned int dev1 = devmask & (1 << 1);
1892 int rc, ret = 0;
1893
1894 ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1895
1896
1897 rc = ata_sff_wait_ready(link, deadline);
1898
1899
1900
1901 if (rc)
1902 return rc;
1903
1904
1905
1906
1907 if (dev1) {
1908 int i;
1909
1910 ap->ops->sff_dev_select(ap, 1);
1911
1912
1913
1914
1915
1916 for (i = 0; i < 2; i++) {
1917 u8 nsect, lbal;
1918
1919 nsect = ioread8(ioaddr->nsect_addr);
1920 lbal = ioread8(ioaddr->lbal_addr);
1921 if ((nsect == 1) && (lbal == 1))
1922 break;
1923 ata_msleep(ap, 50);
1924 }
1925
1926 rc = ata_sff_wait_ready(link, deadline);
1927 if (rc) {
1928 if (rc != -ENODEV)
1929 return rc;
1930 ret = rc;
1931 }
1932 }
1933
1934
1935 ap->ops->sff_dev_select(ap, 0);
1936 if (dev1)
1937 ap->ops->sff_dev_select(ap, 1);
1938 if (dev0)
1939 ap->ops->sff_dev_select(ap, 0);
1940
1941 return ret;
1942 }
1943 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1944
1945 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1946 unsigned long deadline)
1947 {
1948 struct ata_ioports *ioaddr = &ap->ioaddr;
1949
1950 if (ap->ioaddr.ctl_addr) {
1951
1952 iowrite8(ap->ctl, ioaddr->ctl_addr);
1953 udelay(20);
1954 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1955 udelay(20);
1956 iowrite8(ap->ctl, ioaddr->ctl_addr);
1957 ap->last_ctl = ap->ctl;
1958 }
1959
1960
1961 return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1979 unsigned long deadline)
1980 {
1981 struct ata_port *ap = link->ap;
1982 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1983 unsigned int devmask = 0;
1984 int rc;
1985 u8 err;
1986
1987
1988 if (ata_devchk(ap, 0))
1989 devmask |= (1 << 0);
1990 if (slave_possible && ata_devchk(ap, 1))
1991 devmask |= (1 << 1);
1992
1993
1994 ap->ops->sff_dev_select(ap, 0);
1995
1996
1997 rc = ata_bus_softreset(ap, devmask, deadline);
1998
1999 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2000 ata_link_err(link, "SRST failed (errno=%d)\n", rc);
2001 return rc;
2002 }
2003
2004
2005 classes[0] = ata_sff_dev_classify(&link->device[0],
2006 devmask & (1 << 0), &err);
2007 if (slave_possible && err != 0x81)
2008 classes[1] = ata_sff_dev_classify(&link->device[1],
2009 devmask & (1 << 1), &err);
2010
2011 return 0;
2012 }
2013 EXPORT_SYMBOL_GPL(ata_sff_softreset);
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2031 unsigned long deadline)
2032 {
2033 struct ata_eh_context *ehc = &link->eh_context;
2034 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2035 bool online;
2036 int rc;
2037
2038 rc = sata_link_hardreset(link, timing, deadline, &online,
2039 ata_sff_check_ready);
2040 if (online)
2041 *class = ata_sff_dev_classify(link->device, 1, NULL);
2042
2043 return rc;
2044 }
2045 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2060 {
2061 struct ata_port *ap = link->ap;
2062
2063 ata_std_postreset(link, classes);
2064
2065
2066 if (classes[0] != ATA_DEV_NONE)
2067 ap->ops->sff_dev_select(ap, 1);
2068 if (classes[1] != ATA_DEV_NONE)
2069 ap->ops->sff_dev_select(ap, 0);
2070
2071
2072 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
2073 return;
2074
2075
2076 if (ata_sff_set_devctl(ap, ap->ctl))
2077 ap->last_ctl = ap->ctl;
2078 }
2079 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2092 {
2093 int count;
2094 struct ata_port *ap;
2095
2096
2097 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2098 return;
2099
2100 ap = qc->ap;
2101
2102 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2103 && count < 65536; count += 2)
2104 ioread16(ap->ioaddr.data_addr);
2105
2106 if (count)
2107 ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2108
2109 }
2110 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 void ata_sff_error_handler(struct ata_port *ap)
2125 {
2126 ata_reset_fn_t softreset = ap->ops->softreset;
2127 ata_reset_fn_t hardreset = ap->ops->hardreset;
2128 struct ata_queued_cmd *qc;
2129 unsigned long flags;
2130
2131 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2132 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2133 qc = NULL;
2134
2135 spin_lock_irqsave(ap->lock, flags);
2136
2137
2138
2139
2140
2141
2142
2143
2144 if (ap->ops->sff_drain_fifo)
2145 ap->ops->sff_drain_fifo(qc);
2146
2147 spin_unlock_irqrestore(ap->lock, flags);
2148
2149
2150 if ((hardreset == sata_std_hardreset ||
2151 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2152 hardreset = NULL;
2153
2154 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2155 ap->ops->postreset);
2156 }
2157 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2171 {
2172 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2173 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2174 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2175 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2176 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2177 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2178 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2179 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2180 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2181 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2182 }
2183 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2184
2185 #ifdef CONFIG_PCI
2186
2187 static bool ata_resources_present(struct pci_dev *pdev, int port)
2188 {
2189 int i;
2190
2191
2192 port *= 2;
2193 for (i = 0; i < 2; i++) {
2194 if (pci_resource_start(pdev, port + i) == 0 ||
2195 pci_resource_len(pdev, port + i) == 0)
2196 return false;
2197 }
2198 return true;
2199 }
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220 int ata_pci_sff_init_host(struct ata_host *host)
2221 {
2222 struct device *gdev = host->dev;
2223 struct pci_dev *pdev = to_pci_dev(gdev);
2224 unsigned int mask = 0;
2225 int i, rc;
2226
2227
2228 for (i = 0; i < 2; i++) {
2229 struct ata_port *ap = host->ports[i];
2230 int base = i * 2;
2231 void __iomem * const *iomap;
2232
2233 if (ata_port_is_dummy(ap))
2234 continue;
2235
2236
2237
2238
2239
2240 if (!ata_resources_present(pdev, i)) {
2241 ap->ops = &ata_dummy_port_ops;
2242 continue;
2243 }
2244
2245 rc = pcim_iomap_regions(pdev, 0x3 << base,
2246 dev_driver_string(gdev));
2247 if (rc) {
2248 dev_warn(gdev,
2249 "failed to request/iomap BARs for port %d (errno=%d)\n",
2250 i, rc);
2251 if (rc == -EBUSY)
2252 pcim_pin_device(pdev);
2253 ap->ops = &ata_dummy_port_ops;
2254 continue;
2255 }
2256 host->iomap = iomap = pcim_iomap_table(pdev);
2257
2258 ap->ioaddr.cmd_addr = iomap[base];
2259 ap->ioaddr.altstatus_addr =
2260 ap->ioaddr.ctl_addr = (void __iomem *)
2261 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2262 ata_sff_std_ports(&ap->ioaddr);
2263
2264 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2265 (unsigned long long)pci_resource_start(pdev, base),
2266 (unsigned long long)pci_resource_start(pdev, base + 1));
2267
2268 mask |= 1 << i;
2269 }
2270
2271 if (!mask) {
2272 dev_err(gdev, "no available native port\n");
2273 return -ENODEV;
2274 }
2275
2276 return 0;
2277 }
2278 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2296 const struct ata_port_info * const *ppi,
2297 struct ata_host **r_host)
2298 {
2299 struct ata_host *host;
2300 int rc;
2301
2302 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2303 return -ENOMEM;
2304
2305 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2306 if (!host) {
2307 dev_err(&pdev->dev, "failed to allocate ATA host\n");
2308 rc = -ENOMEM;
2309 goto err_out;
2310 }
2311
2312 rc = ata_pci_sff_init_host(host);
2313 if (rc)
2314 goto err_out;
2315
2316 devres_remove_group(&pdev->dev, NULL);
2317 *r_host = host;
2318 return 0;
2319
2320 err_out:
2321 devres_release_group(&pdev->dev, NULL);
2322 return rc;
2323 }
2324 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 int ata_pci_sff_activate_host(struct ata_host *host,
2343 irq_handler_t irq_handler,
2344 struct scsi_host_template *sht)
2345 {
2346 struct device *dev = host->dev;
2347 struct pci_dev *pdev = to_pci_dev(dev);
2348 const char *drv_name = dev_driver_string(host->dev);
2349 int legacy_mode = 0, rc;
2350
2351 rc = ata_host_start(host);
2352 if (rc)
2353 return rc;
2354
2355 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2356 u8 tmp8, mask = 0;
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2367 if (!ata_port_is_dummy(host->ports[0]))
2368 mask |= (1 << 0);
2369 if (!ata_port_is_dummy(host->ports[1]))
2370 mask |= (1 << 2);
2371 if ((tmp8 & mask) != mask)
2372 legacy_mode = 1;
2373 }
2374
2375 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2376 return -ENOMEM;
2377
2378 if (!legacy_mode && pdev->irq) {
2379 int i;
2380
2381 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2382 IRQF_SHARED, drv_name, host);
2383 if (rc)
2384 goto out;
2385
2386 for (i = 0; i < 2; i++) {
2387 if (ata_port_is_dummy(host->ports[i]))
2388 continue;
2389 ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2390 }
2391 } else if (legacy_mode) {
2392 if (!ata_port_is_dummy(host->ports[0])) {
2393 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2394 irq_handler, IRQF_SHARED,
2395 drv_name, host);
2396 if (rc)
2397 goto out;
2398
2399 ata_port_desc(host->ports[0], "irq %d",
2400 ATA_PRIMARY_IRQ(pdev));
2401 }
2402
2403 if (!ata_port_is_dummy(host->ports[1])) {
2404 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2405 irq_handler, IRQF_SHARED,
2406 drv_name, host);
2407 if (rc)
2408 goto out;
2409
2410 ata_port_desc(host->ports[1], "irq %d",
2411 ATA_SECONDARY_IRQ(pdev));
2412 }
2413 }
2414
2415 rc = ata_host_register(host, sht);
2416 out:
2417 if (rc == 0)
2418 devres_remove_group(dev, NULL);
2419 else
2420 devres_release_group(dev, NULL);
2421
2422 return rc;
2423 }
2424 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2425
2426 static const struct ata_port_info *ata_sff_find_valid_pi(
2427 const struct ata_port_info * const *ppi)
2428 {
2429 int i;
2430
2431
2432 for (i = 0; i < 2 && ppi[i]; i++)
2433 if (ppi[i]->port_ops != &ata_dummy_port_ops)
2434 return ppi[i];
2435
2436 return NULL;
2437 }
2438
2439 static int ata_pci_init_one(struct pci_dev *pdev,
2440 const struct ata_port_info * const *ppi,
2441 struct scsi_host_template *sht, void *host_priv,
2442 int hflags, bool bmdma)
2443 {
2444 struct device *dev = &pdev->dev;
2445 const struct ata_port_info *pi;
2446 struct ata_host *host = NULL;
2447 int rc;
2448
2449 pi = ata_sff_find_valid_pi(ppi);
2450 if (!pi) {
2451 dev_err(&pdev->dev, "no valid port_info specified\n");
2452 return -EINVAL;
2453 }
2454
2455 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2456 return -ENOMEM;
2457
2458 rc = pcim_enable_device(pdev);
2459 if (rc)
2460 goto out;
2461
2462 #ifdef CONFIG_ATA_BMDMA
2463 if (bmdma)
2464
2465 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2466 else
2467 #endif
2468
2469 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2470 if (rc)
2471 goto out;
2472 host->private_data = host_priv;
2473 host->flags |= hflags;
2474
2475 #ifdef CONFIG_ATA_BMDMA
2476 if (bmdma) {
2477 pci_set_master(pdev);
2478 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2479 } else
2480 #endif
2481 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2482 out:
2483 if (rc == 0)
2484 devres_remove_group(&pdev->dev, NULL);
2485 else
2486 devres_release_group(&pdev->dev, NULL);
2487
2488 return rc;
2489 }
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513 int ata_pci_sff_init_one(struct pci_dev *pdev,
2514 const struct ata_port_info * const *ppi,
2515 struct scsi_host_template *sht, void *host_priv, int hflag)
2516 {
2517 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2518 }
2519 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2520
2521 #endif
2522
2523
2524
2525
2526
2527 #ifdef CONFIG_ATA_BMDMA
2528
2529 const struct ata_port_operations ata_bmdma_port_ops = {
2530 .inherits = &ata_sff_port_ops,
2531
2532 .error_handler = ata_bmdma_error_handler,
2533 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2534
2535 .qc_prep = ata_bmdma_qc_prep,
2536 .qc_issue = ata_bmdma_qc_issue,
2537
2538 .sff_irq_clear = ata_bmdma_irq_clear,
2539 .bmdma_setup = ata_bmdma_setup,
2540 .bmdma_start = ata_bmdma_start,
2541 .bmdma_stop = ata_bmdma_stop,
2542 .bmdma_status = ata_bmdma_status,
2543
2544 .port_start = ata_bmdma_port_start,
2545 };
2546 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2547
2548 const struct ata_port_operations ata_bmdma32_port_ops = {
2549 .inherits = &ata_bmdma_port_ops,
2550
2551 .sff_data_xfer = ata_sff_data_xfer32,
2552 .port_start = ata_bmdma_port_start32,
2553 };
2554 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2568 {
2569 struct ata_port *ap = qc->ap;
2570 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2571 struct scatterlist *sg;
2572 unsigned int si, pi;
2573
2574 pi = 0;
2575 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2576 u32 addr, offset;
2577 u32 sg_len, len;
2578
2579
2580
2581
2582
2583 addr = (u32) sg_dma_address(sg);
2584 sg_len = sg_dma_len(sg);
2585
2586 while (sg_len) {
2587 offset = addr & 0xffff;
2588 len = sg_len;
2589 if ((offset + sg_len) > 0x10000)
2590 len = 0x10000 - offset;
2591
2592 prd[pi].addr = cpu_to_le32(addr);
2593 prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2594
2595 pi++;
2596 sg_len -= len;
2597 addr += len;
2598 }
2599 }
2600
2601 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2602 }
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2618 {
2619 struct ata_port *ap = qc->ap;
2620 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2621 struct scatterlist *sg;
2622 unsigned int si, pi;
2623
2624 pi = 0;
2625 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2626 u32 addr, offset;
2627 u32 sg_len, len, blen;
2628
2629
2630
2631
2632
2633 addr = (u32) sg_dma_address(sg);
2634 sg_len = sg_dma_len(sg);
2635
2636 while (sg_len) {
2637 offset = addr & 0xffff;
2638 len = sg_len;
2639 if ((offset + sg_len) > 0x10000)
2640 len = 0x10000 - offset;
2641
2642 blen = len & 0xffff;
2643 prd[pi].addr = cpu_to_le32(addr);
2644 if (blen == 0) {
2645
2646
2647
2648 prd[pi].flags_len = cpu_to_le32(0x8000);
2649 blen = 0x8000;
2650 prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2651 }
2652 prd[pi].flags_len = cpu_to_le32(blen);
2653
2654 pi++;
2655 sg_len -= len;
2656 addr += len;
2657 }
2658 }
2659
2660 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2661 }
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672 enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2673 {
2674 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2675 return AC_ERR_OK;
2676
2677 ata_bmdma_fill_sg(qc);
2678
2679 return AC_ERR_OK;
2680 }
2681 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692 enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2693 {
2694 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2695 return AC_ERR_OK;
2696
2697 ata_bmdma_fill_sg_dumb(qc);
2698
2699 return AC_ERR_OK;
2700 }
2701 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2718 {
2719 struct ata_port *ap = qc->ap;
2720 struct ata_link *link = qc->dev->link;
2721
2722
2723 if (!ata_is_dma(qc->tf.protocol))
2724 return ata_sff_qc_issue(qc);
2725
2726
2727 ata_dev_select(ap, qc->dev->devno, 1, 0);
2728
2729
2730 switch (qc->tf.protocol) {
2731 case ATA_PROT_DMA:
2732 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2733
2734 trace_ata_tf_load(ap, &qc->tf);
2735 ap->ops->sff_tf_load(ap, &qc->tf);
2736 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2737 ap->ops->bmdma_setup(qc);
2738 trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
2739 ap->ops->bmdma_start(qc);
2740 ap->hsm_task_state = HSM_ST_LAST;
2741 break;
2742
2743 case ATAPI_PROT_DMA:
2744 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2745
2746 trace_ata_tf_load(ap, &qc->tf);
2747 ap->ops->sff_tf_load(ap, &qc->tf);
2748 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2749 ap->ops->bmdma_setup(qc);
2750 ap->hsm_task_state = HSM_ST_FIRST;
2751
2752
2753 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2754 ata_sff_queue_pio_task(link, 0);
2755 break;
2756
2757 default:
2758 WARN_ON(1);
2759 return AC_ERR_SYSTEM;
2760 }
2761
2762 return 0;
2763 }
2764 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2780 {
2781 struct ata_eh_info *ehi = &ap->link.eh_info;
2782 u8 host_stat = 0;
2783 bool bmdma_stopped = false;
2784 unsigned int handled;
2785
2786 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2787
2788 host_stat = ap->ops->bmdma_status(ap);
2789 trace_ata_bmdma_status(ap, host_stat);
2790
2791
2792 if (!(host_stat & ATA_DMA_INTR))
2793 return ata_sff_idle_irq(ap);
2794
2795
2796 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2797 ap->ops->bmdma_stop(qc);
2798 bmdma_stopped = true;
2799
2800 if (unlikely(host_stat & ATA_DMA_ERR)) {
2801
2802 qc->err_mask |= AC_ERR_HOST_BUS;
2803 ap->hsm_task_state = HSM_ST_ERR;
2804 }
2805 }
2806
2807 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2808
2809 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2810 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2811
2812 return handled;
2813 }
2814 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2831 {
2832 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2833 }
2834 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848 void ata_bmdma_error_handler(struct ata_port *ap)
2849 {
2850 struct ata_queued_cmd *qc;
2851 unsigned long flags;
2852 bool thaw = false;
2853
2854 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2855 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2856 qc = NULL;
2857
2858
2859 spin_lock_irqsave(ap->lock, flags);
2860
2861 if (qc && ata_is_dma(qc->tf.protocol)) {
2862 u8 host_stat;
2863
2864 host_stat = ap->ops->bmdma_status(ap);
2865 trace_ata_bmdma_status(ap, host_stat);
2866
2867
2868
2869
2870
2871
2872 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2873 qc->err_mask = AC_ERR_HOST_BUS;
2874 thaw = true;
2875 }
2876
2877 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2878 ap->ops->bmdma_stop(qc);
2879
2880
2881 if (thaw) {
2882 ap->ops->sff_check_status(ap);
2883 if (ap->ops->sff_irq_clear)
2884 ap->ops->sff_irq_clear(ap);
2885 }
2886 }
2887
2888 spin_unlock_irqrestore(ap->lock, flags);
2889
2890 if (thaw)
2891 ata_eh_thaw_port(ap);
2892
2893 ata_sff_error_handler(ap);
2894 }
2895 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2896
2897
2898
2899
2900
2901
2902
2903
2904 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2905 {
2906 struct ata_port *ap = qc->ap;
2907 unsigned long flags;
2908
2909 if (ata_is_dma(qc->tf.protocol)) {
2910 spin_lock_irqsave(ap->lock, flags);
2911 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2912 ap->ops->bmdma_stop(qc);
2913 spin_unlock_irqrestore(ap->lock, flags);
2914 }
2915 }
2916 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929 void ata_bmdma_irq_clear(struct ata_port *ap)
2930 {
2931 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2932
2933 if (!mmio)
2934 return;
2935
2936 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2937 }
2938 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2939
2940
2941
2942
2943
2944
2945
2946
2947 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2948 {
2949 struct ata_port *ap = qc->ap;
2950 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2951 u8 dmactl;
2952
2953
2954 mb();
2955 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2956
2957
2958 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2959 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2960 if (!rw)
2961 dmactl |= ATA_DMA_WR;
2962 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2963
2964
2965 ap->ops->sff_exec_command(ap, &qc->tf);
2966 }
2967 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2968
2969
2970
2971
2972
2973
2974
2975
2976 void ata_bmdma_start(struct ata_queued_cmd *qc)
2977 {
2978 struct ata_port *ap = qc->ap;
2979 u8 dmactl;
2980
2981
2982 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2983 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999 }
3000 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3014 {
3015 struct ata_port *ap = qc->ap;
3016 void __iomem *mmio = ap->ioaddr.bmdma_addr;
3017
3018
3019 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3020 mmio + ATA_DMA_CMD);
3021
3022
3023 ata_sff_dma_pause(ap);
3024 }
3025 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038 u8 ata_bmdma_status(struct ata_port *ap)
3039 {
3040 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3041 }
3042 EXPORT_SYMBOL_GPL(ata_bmdma_status);
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057 int ata_bmdma_port_start(struct ata_port *ap)
3058 {
3059 if (ap->mwdma_mask || ap->udma_mask) {
3060 ap->bmdma_prd =
3061 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3062 &ap->bmdma_prd_dma, GFP_KERNEL);
3063 if (!ap->bmdma_prd)
3064 return -ENOMEM;
3065 }
3066
3067 return 0;
3068 }
3069 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085 int ata_bmdma_port_start32(struct ata_port *ap)
3086 {
3087 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3088 return ata_bmdma_port_start(ap);
3089 }
3090 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3091
3092 #ifdef CONFIG_PCI
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3104 {
3105 unsigned long bmdma = pci_resource_start(pdev, 4);
3106 u8 simplex;
3107
3108 if (bmdma == 0)
3109 return -ENOENT;
3110
3111 simplex = inb(bmdma + 0x02);
3112 outb(simplex & 0x60, bmdma + 0x02);
3113 simplex = inb(bmdma + 0x02);
3114 if (simplex & 0x80)
3115 return -EOPNOTSUPP;
3116 return 0;
3117 }
3118 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3119
3120 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3121 {
3122 int i;
3123
3124 dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3125
3126 for (i = 0; i < 2; i++) {
3127 host->ports[i]->mwdma_mask = 0;
3128 host->ports[i]->udma_mask = 0;
3129 }
3130 }
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141 void ata_pci_bmdma_init(struct ata_host *host)
3142 {
3143 struct device *gdev = host->dev;
3144 struct pci_dev *pdev = to_pci_dev(gdev);
3145 int i, rc;
3146
3147
3148 if (pci_resource_start(pdev, 4) == 0) {
3149 ata_bmdma_nodma(host, "BAR4 is zero");
3150 return;
3151 }
3152
3153
3154
3155
3156
3157
3158
3159 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
3160 if (rc)
3161 ata_bmdma_nodma(host, "failed to set dma mask");
3162
3163
3164 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3165 if (rc) {
3166 ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3167 return;
3168 }
3169 host->iomap = pcim_iomap_table(pdev);
3170
3171 for (i = 0; i < 2; i++) {
3172 struct ata_port *ap = host->ports[i];
3173 void __iomem *bmdma = host->iomap[4] + 8 * i;
3174
3175 if (ata_port_is_dummy(ap))
3176 continue;
3177
3178 ap->ioaddr.bmdma_addr = bmdma;
3179 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3180 (ioread8(bmdma + 2) & 0x80))
3181 host->flags |= ATA_HOST_SIMPLEX;
3182
3183 ata_port_desc(ap, "bmdma 0x%llx",
3184 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3185 }
3186 }
3187 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3205 const struct ata_port_info * const * ppi,
3206 struct ata_host **r_host)
3207 {
3208 int rc;
3209
3210 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3211 if (rc)
3212 return rc;
3213
3214 ata_pci_bmdma_init(*r_host);
3215 return 0;
3216 }
3217 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3237 const struct ata_port_info * const * ppi,
3238 struct scsi_host_template *sht, void *host_priv,
3239 int hflags)
3240 {
3241 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3242 }
3243 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3244
3245 #endif
3246 #endif
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258 void ata_sff_port_init(struct ata_port *ap)
3259 {
3260 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3261 ap->ctl = ATA_DEVCTL_OBS;
3262 ap->last_ctl = 0xFF;
3263 }
3264
3265 int __init ata_sff_init(void)
3266 {
3267 ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3268 if (!ata_sff_wq)
3269 return -ENOMEM;
3270
3271 return 0;
3272 }
3273
3274 void ata_sff_exit(void)
3275 {
3276 destroy_workqueue(ata_sff_wq);
3277 }