0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 #include <linux/gfp.h>
0054 #include <linux/kernel.h>
0055 #include <linux/module.h>
0056 #include <linux/pci.h>
0057 #include <scsi/scsi_host.h>
0058 #include <linux/libata.h>
0059 #include <linux/blkdev.h>
0060 #include <scsi/scsi_device.h>
0061
0062 #define DRV_NAME "sata_inic162x"
0063 #define DRV_VERSION "0.4"
0064
0065 enum {
0066 MMIO_BAR_PCI = 5,
0067 MMIO_BAR_CARDBUS = 1,
0068
0069 NR_PORTS = 2,
0070
0071 IDMA_CPB_TBL_SIZE = 4 * 32,
0072
0073 INIC_DMA_BOUNDARY = 0xffffff,
0074
0075 HOST_ACTRL = 0x08,
0076 HOST_CTL = 0x7c,
0077 HOST_STAT = 0x7e,
0078 HOST_IRQ_STAT = 0xbc,
0079 HOST_IRQ_MASK = 0xbe,
0080
0081 PORT_SIZE = 0x40,
0082
0083
0084 PORT_TF_DATA = 0x00,
0085 PORT_TF_FEATURE = 0x01,
0086 PORT_TF_NSECT = 0x02,
0087 PORT_TF_LBAL = 0x03,
0088 PORT_TF_LBAM = 0x04,
0089 PORT_TF_LBAH = 0x05,
0090 PORT_TF_DEVICE = 0x06,
0091 PORT_TF_COMMAND = 0x07,
0092 PORT_TF_ALT_STAT = 0x08,
0093 PORT_IRQ_STAT = 0x09,
0094 PORT_IRQ_MASK = 0x0a,
0095 PORT_PRD_CTL = 0x0b,
0096 PORT_PRD_ADDR = 0x0c,
0097 PORT_PRD_XFERLEN = 0x10,
0098 PORT_CPB_CPBLAR = 0x18,
0099 PORT_CPB_PTQFIFO = 0x1c,
0100
0101
0102 PORT_IDMA_CTL = 0x14,
0103 PORT_IDMA_STAT = 0x16,
0104
0105 PORT_RPQ_FIFO = 0x1e,
0106 PORT_RPQ_CNT = 0x1f,
0107
0108 PORT_SCR = 0x20,
0109
0110
0111 HCTL_LEDEN = (1 << 3),
0112 HCTL_IRQOFF = (1 << 8),
0113 HCTL_FTHD0 = (1 << 10),
0114 HCTL_FTHD1 = (1 << 11),
0115 HCTL_PWRDWN = (1 << 12),
0116 HCTL_SOFTRST = (1 << 13),
0117 HCTL_RPGSEL = (1 << 15),
0118
0119 HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
0120 HCTL_RPGSEL,
0121
0122
0123 HIRQ_PORT0 = (1 << 0),
0124 HIRQ_PORT1 = (1 << 1),
0125 HIRQ_SOFT = (1 << 14),
0126 HIRQ_GLOBAL = (1 << 15),
0127
0128
0129 PIRQ_OFFLINE = (1 << 0),
0130 PIRQ_ONLINE = (1 << 1),
0131 PIRQ_COMPLETE = (1 << 2),
0132 PIRQ_FATAL = (1 << 3),
0133 PIRQ_ATA = (1 << 4),
0134 PIRQ_REPLY = (1 << 5),
0135 PIRQ_PENDING = (1 << 7),
0136
0137 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
0138 PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA,
0139 PIRQ_MASK_FREEZE = 0xff,
0140
0141
0142 PRD_CTL_START = (1 << 0),
0143 PRD_CTL_WR = (1 << 3),
0144 PRD_CTL_DMAEN = (1 << 7),
0145
0146
0147 IDMA_CTL_RST_ATA = (1 << 2),
0148 IDMA_CTL_RST_IDMA = (1 << 5),
0149 IDMA_CTL_GO = (1 << 7),
0150 IDMA_CTL_ATA_NIEN = (1 << 8),
0151
0152
0153 IDMA_STAT_PERR = (1 << 0),
0154 IDMA_STAT_CPBERR = (1 << 1),
0155 IDMA_STAT_LGCY = (1 << 3),
0156 IDMA_STAT_UIRQ = (1 << 4),
0157 IDMA_STAT_STPD = (1 << 5),
0158 IDMA_STAT_PSD = (1 << 6),
0159 IDMA_STAT_DONE = (1 << 7),
0160
0161 IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
0162
0163
0164 CPB_CTL_VALID = (1 << 0),
0165 CPB_CTL_QUEUED = (1 << 1),
0166 CPB_CTL_DATA = (1 << 2),
0167 CPB_CTL_IEN = (1 << 3),
0168 CPB_CTL_DEVDIR = (1 << 4),
0169
0170
0171 CPB_RESP_DONE = (1 << 0),
0172 CPB_RESP_REL = (1 << 1),
0173 CPB_RESP_IGNORED = (1 << 2),
0174 CPB_RESP_ATA_ERR = (1 << 3),
0175 CPB_RESP_SPURIOUS = (1 << 4),
0176 CPB_RESP_UNDERFLOW = (1 << 5),
0177 CPB_RESP_OVERFLOW = (1 << 6),
0178 CPB_RESP_CPB_ERR = (1 << 7),
0179
0180
0181 PRD_DRAIN = (1 << 1),
0182 PRD_CDB = (1 << 2),
0183 PRD_DIRECT_INTR = (1 << 3),
0184 PRD_DMA = (1 << 4),
0185 PRD_WRITE = (1 << 5),
0186 PRD_IOM = (1 << 6),
0187 PRD_END = (1 << 7),
0188 };
0189
0190
0191 struct inic_cpb {
0192 u8 resp_flags;
0193 u8 error;
0194 u8 status;
0195 u8 ctl_flags;
0196 __le32 len;
0197 __le32 prd;
0198 u8 rsvd[4];
0199
0200 u8 feature;
0201 u8 hob_feature;
0202 u8 device;
0203 u8 mirctl;
0204 u8 nsect;
0205 u8 hob_nsect;
0206 u8 lbal;
0207 u8 hob_lbal;
0208 u8 lbam;
0209 u8 hob_lbam;
0210 u8 lbah;
0211 u8 hob_lbah;
0212 u8 command;
0213 u8 ctl;
0214 u8 slave_error;
0215 u8 slave_status;
0216
0217 } __packed;
0218
0219
0220 struct inic_prd {
0221 __le32 mad;
0222 __le16 len;
0223 u8 rsvd;
0224 u8 flags;
0225 } __packed;
0226
0227 struct inic_pkt {
0228 struct inic_cpb cpb;
0229 struct inic_prd prd[LIBATA_MAX_PRD + 1];
0230 u8 cdb[ATAPI_CDB_LEN];
0231 } __packed;
0232
0233 struct inic_host_priv {
0234 void __iomem *mmio_base;
0235 u16 cached_hctl;
0236 };
0237
0238 struct inic_port_priv {
0239 struct inic_pkt *pkt;
0240 dma_addr_t pkt_dma;
0241 u32 *cpb_tbl;
0242 dma_addr_t cpb_tbl_dma;
0243 };
0244
0245 static struct scsi_host_template inic_sht = {
0246 ATA_BASE_SHT(DRV_NAME),
0247 .sg_tablesize = LIBATA_MAX_PRD,
0248
0249
0250
0251
0252
0253
0254 .dma_boundary = INIC_DMA_BOUNDARY,
0255 .max_segment_size = 65536 - 512,
0256 };
0257
0258 static const int scr_map[] = {
0259 [SCR_STATUS] = 0,
0260 [SCR_ERROR] = 1,
0261 [SCR_CONTROL] = 2,
0262 };
0263
0264 static void __iomem *inic_port_base(struct ata_port *ap)
0265 {
0266 struct inic_host_priv *hpriv = ap->host->private_data;
0267
0268 return hpriv->mmio_base + ap->port_no * PORT_SIZE;
0269 }
0270
0271 static void inic_reset_port(void __iomem *port_base)
0272 {
0273 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
0274
0275
0276 readw(idma_ctl);
0277 msleep(1);
0278
0279
0280 writew(IDMA_CTL_RST_IDMA, idma_ctl);
0281 readw(idma_ctl);
0282 msleep(1);
0283
0284
0285 writew(0, idma_ctl);
0286
0287
0288 writeb(0xff, port_base + PORT_IRQ_STAT);
0289 }
0290
0291 static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
0292 {
0293 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
0294
0295 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
0296 return -EINVAL;
0297
0298 *val = readl(scr_addr + scr_map[sc_reg] * 4);
0299
0300
0301 if (sc_reg == SCR_ERROR)
0302 *val &= ~SERR_PHYRDY_CHG;
0303 return 0;
0304 }
0305
0306 static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
0307 {
0308 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
0309
0310 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
0311 return -EINVAL;
0312
0313 writel(val, scr_addr + scr_map[sc_reg] * 4);
0314 return 0;
0315 }
0316
0317 static void inic_stop_idma(struct ata_port *ap)
0318 {
0319 void __iomem *port_base = inic_port_base(ap);
0320
0321 readb(port_base + PORT_RPQ_FIFO);
0322 readb(port_base + PORT_RPQ_CNT);
0323 writew(0, port_base + PORT_IDMA_CTL);
0324 }
0325
0326 static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
0327 {
0328 struct ata_eh_info *ehi = &ap->link.eh_info;
0329 struct inic_port_priv *pp = ap->private_data;
0330 struct inic_cpb *cpb = &pp->pkt->cpb;
0331 bool freeze = false;
0332
0333 ata_ehi_clear_desc(ehi);
0334 ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
0335 irq_stat, idma_stat);
0336
0337 inic_stop_idma(ap);
0338
0339 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
0340 ata_ehi_push_desc(ehi, "hotplug");
0341 ata_ehi_hotplugged(ehi);
0342 freeze = true;
0343 }
0344
0345 if (idma_stat & IDMA_STAT_PERR) {
0346 ata_ehi_push_desc(ehi, "PCI error");
0347 freeze = true;
0348 }
0349
0350 if (idma_stat & IDMA_STAT_CPBERR) {
0351 ata_ehi_push_desc(ehi, "CPB error");
0352
0353 if (cpb->resp_flags & CPB_RESP_IGNORED) {
0354 __ata_ehi_push_desc(ehi, " ignored");
0355 ehi->err_mask |= AC_ERR_INVALID;
0356 freeze = true;
0357 }
0358
0359 if (cpb->resp_flags & CPB_RESP_ATA_ERR)
0360 ehi->err_mask |= AC_ERR_DEV;
0361
0362 if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
0363 __ata_ehi_push_desc(ehi, " spurious-intr");
0364 ehi->err_mask |= AC_ERR_HSM;
0365 freeze = true;
0366 }
0367
0368 if (cpb->resp_flags &
0369 (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
0370 __ata_ehi_push_desc(ehi, " data-over/underflow");
0371 ehi->err_mask |= AC_ERR_HSM;
0372 freeze = true;
0373 }
0374 }
0375
0376 if (freeze)
0377 ata_port_freeze(ap);
0378 else
0379 ata_port_abort(ap);
0380 }
0381
0382 static void inic_host_intr(struct ata_port *ap)
0383 {
0384 void __iomem *port_base = inic_port_base(ap);
0385 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
0386 u8 irq_stat;
0387 u16 idma_stat;
0388
0389
0390 irq_stat = readb(port_base + PORT_IRQ_STAT);
0391 writeb(irq_stat, port_base + PORT_IRQ_STAT);
0392 idma_stat = readw(port_base + PORT_IDMA_STAT);
0393
0394 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
0395 inic_host_err_intr(ap, irq_stat, idma_stat);
0396
0397 if (unlikely(!qc))
0398 goto spurious;
0399
0400 if (likely(idma_stat & IDMA_STAT_DONE)) {
0401 inic_stop_idma(ap);
0402
0403
0404
0405
0406 if (unlikely(readb(port_base + PORT_TF_COMMAND) &
0407 (ATA_DF | ATA_ERR)))
0408 qc->err_mask |= AC_ERR_DEV;
0409
0410 ata_qc_complete(qc);
0411 return;
0412 }
0413
0414 spurious:
0415 ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
0416 qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
0417 }
0418
0419 static irqreturn_t inic_interrupt(int irq, void *dev_instance)
0420 {
0421 struct ata_host *host = dev_instance;
0422 struct inic_host_priv *hpriv = host->private_data;
0423 u16 host_irq_stat;
0424 int i, handled = 0;
0425
0426 host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
0427
0428 if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
0429 goto out;
0430
0431 spin_lock(&host->lock);
0432
0433 for (i = 0; i < NR_PORTS; i++)
0434 if (host_irq_stat & (HIRQ_PORT0 << i)) {
0435 inic_host_intr(host->ports[i]);
0436 handled++;
0437 }
0438
0439 spin_unlock(&host->lock);
0440
0441 out:
0442 return IRQ_RETVAL(handled);
0443 }
0444
0445 static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
0446 {
0447
0448
0449
0450
0451
0452
0453 if (atapi_cmd_type(qc->cdb[0]) == READ)
0454 return 0;
0455 return 1;
0456 }
0457
0458 static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
0459 {
0460 struct scatterlist *sg;
0461 unsigned int si;
0462 u8 flags = 0;
0463
0464 if (qc->tf.flags & ATA_TFLAG_WRITE)
0465 flags |= PRD_WRITE;
0466
0467 if (ata_is_dma(qc->tf.protocol))
0468 flags |= PRD_DMA;
0469
0470 for_each_sg(qc->sg, sg, qc->n_elem, si) {
0471 prd->mad = cpu_to_le32(sg_dma_address(sg));
0472 prd->len = cpu_to_le16(sg_dma_len(sg));
0473 prd->flags = flags;
0474 prd++;
0475 }
0476
0477 WARN_ON(!si);
0478 prd[-1].flags |= PRD_END;
0479 }
0480
0481 static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
0482 {
0483 struct inic_port_priv *pp = qc->ap->private_data;
0484 struct inic_pkt *pkt = pp->pkt;
0485 struct inic_cpb *cpb = &pkt->cpb;
0486 struct inic_prd *prd = pkt->prd;
0487 bool is_atapi = ata_is_atapi(qc->tf.protocol);
0488 bool is_data = ata_is_data(qc->tf.protocol);
0489 unsigned int cdb_len = 0;
0490
0491 if (is_atapi)
0492 cdb_len = qc->dev->cdb_len;
0493
0494
0495 memset(pkt, 0, sizeof(struct inic_pkt));
0496
0497 cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
0498 if (is_atapi || is_data)
0499 cpb->ctl_flags |= CPB_CTL_DATA;
0500
0501 cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
0502 cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
0503
0504 cpb->device = qc->tf.device;
0505 cpb->feature = qc->tf.feature;
0506 cpb->nsect = qc->tf.nsect;
0507 cpb->lbal = qc->tf.lbal;
0508 cpb->lbam = qc->tf.lbam;
0509 cpb->lbah = qc->tf.lbah;
0510
0511 if (qc->tf.flags & ATA_TFLAG_LBA48) {
0512 cpb->hob_feature = qc->tf.hob_feature;
0513 cpb->hob_nsect = qc->tf.hob_nsect;
0514 cpb->hob_lbal = qc->tf.hob_lbal;
0515 cpb->hob_lbam = qc->tf.hob_lbam;
0516 cpb->hob_lbah = qc->tf.hob_lbah;
0517 }
0518
0519 cpb->command = qc->tf.command;
0520
0521
0522
0523 if (is_atapi) {
0524 memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
0525 prd->mad = cpu_to_le32(pp->pkt_dma +
0526 offsetof(struct inic_pkt, cdb));
0527 prd->len = cpu_to_le16(cdb_len);
0528 prd->flags = PRD_CDB | PRD_WRITE;
0529 if (!is_data)
0530 prd->flags |= PRD_END;
0531 prd++;
0532 }
0533
0534
0535 if (is_data)
0536 inic_fill_sg(prd, qc);
0537
0538 pp->cpb_tbl[0] = pp->pkt_dma;
0539
0540 return AC_ERR_OK;
0541 }
0542
0543 static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
0544 {
0545 struct ata_port *ap = qc->ap;
0546 void __iomem *port_base = inic_port_base(ap);
0547
0548
0549 writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL);
0550 writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
0551 writeb(0, port_base + PORT_CPB_PTQFIFO);
0552
0553 return 0;
0554 }
0555
0556 static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
0557 {
0558 void __iomem *port_base = inic_port_base(ap);
0559
0560 tf->error = readb(port_base + PORT_TF_FEATURE);
0561 tf->nsect = readb(port_base + PORT_TF_NSECT);
0562 tf->lbal = readb(port_base + PORT_TF_LBAL);
0563 tf->lbam = readb(port_base + PORT_TF_LBAM);
0564 tf->lbah = readb(port_base + PORT_TF_LBAH);
0565 tf->device = readb(port_base + PORT_TF_DEVICE);
0566 tf->status = readb(port_base + PORT_TF_COMMAND);
0567 }
0568
0569 static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
0570 {
0571 struct ata_taskfile *rtf = &qc->result_tf;
0572 struct ata_taskfile tf;
0573
0574
0575
0576
0577
0578
0579
0580
0581 inic_tf_read(qc->ap, &tf);
0582
0583 if (!(tf.status & ATA_ERR))
0584 return false;
0585
0586 rtf->status = tf.status;
0587 rtf->error = tf.error;
0588 return true;
0589 }
0590
0591 static void inic_freeze(struct ata_port *ap)
0592 {
0593 void __iomem *port_base = inic_port_base(ap);
0594
0595 writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
0596 writeb(0xff, port_base + PORT_IRQ_STAT);
0597 }
0598
0599 static void inic_thaw(struct ata_port *ap)
0600 {
0601 void __iomem *port_base = inic_port_base(ap);
0602
0603 writeb(0xff, port_base + PORT_IRQ_STAT);
0604 writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
0605 }
0606
0607 static int inic_check_ready(struct ata_link *link)
0608 {
0609 void __iomem *port_base = inic_port_base(link->ap);
0610
0611 return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
0612 }
0613
0614
0615
0616
0617
0618 static int inic_hardreset(struct ata_link *link, unsigned int *class,
0619 unsigned long deadline)
0620 {
0621 struct ata_port *ap = link->ap;
0622 void __iomem *port_base = inic_port_base(ap);
0623 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
0624 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
0625 int rc;
0626
0627
0628 inic_reset_port(port_base);
0629
0630 writew(IDMA_CTL_RST_ATA, idma_ctl);
0631 readw(idma_ctl);
0632 ata_msleep(ap, 1);
0633 writew(0, idma_ctl);
0634
0635 rc = sata_link_resume(link, timing, deadline);
0636 if (rc) {
0637 ata_link_warn(link,
0638 "failed to resume link after reset (errno=%d)\n",
0639 rc);
0640 return rc;
0641 }
0642
0643 *class = ATA_DEV_NONE;
0644 if (ata_link_online(link)) {
0645 struct ata_taskfile tf;
0646
0647
0648 rc = ata_wait_after_reset(link, deadline, inic_check_ready);
0649
0650 if (rc) {
0651 ata_link_warn(link,
0652 "device not ready after hardreset (errno=%d)\n",
0653 rc);
0654 return rc;
0655 }
0656
0657 inic_tf_read(ap, &tf);
0658 *class = ata_port_classify(ap, &tf);
0659 }
0660
0661 return 0;
0662 }
0663
0664 static void inic_error_handler(struct ata_port *ap)
0665 {
0666 void __iomem *port_base = inic_port_base(ap);
0667
0668 inic_reset_port(port_base);
0669 ata_std_error_handler(ap);
0670 }
0671
0672 static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
0673 {
0674
0675 if (qc->flags & ATA_QCFLAG_FAILED)
0676 inic_reset_port(inic_port_base(qc->ap));
0677 }
0678
0679 static void init_port(struct ata_port *ap)
0680 {
0681 void __iomem *port_base = inic_port_base(ap);
0682 struct inic_port_priv *pp = ap->private_data;
0683
0684
0685 memset(pp->pkt, 0, sizeof(struct inic_pkt));
0686 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
0687
0688
0689 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
0690 }
0691
0692 static int inic_port_resume(struct ata_port *ap)
0693 {
0694 init_port(ap);
0695 return 0;
0696 }
0697
0698 static int inic_port_start(struct ata_port *ap)
0699 {
0700 struct device *dev = ap->host->dev;
0701 struct inic_port_priv *pp;
0702
0703
0704 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
0705 if (!pp)
0706 return -ENOMEM;
0707 ap->private_data = pp;
0708
0709
0710 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
0711 &pp->pkt_dma, GFP_KERNEL);
0712 if (!pp->pkt)
0713 return -ENOMEM;
0714
0715 pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
0716 &pp->cpb_tbl_dma, GFP_KERNEL);
0717 if (!pp->cpb_tbl)
0718 return -ENOMEM;
0719
0720 init_port(ap);
0721
0722 return 0;
0723 }
0724
0725 static struct ata_port_operations inic_port_ops = {
0726 .inherits = &sata_port_ops,
0727
0728 .check_atapi_dma = inic_check_atapi_dma,
0729 .qc_prep = inic_qc_prep,
0730 .qc_issue = inic_qc_issue,
0731 .qc_fill_rtf = inic_qc_fill_rtf,
0732
0733 .freeze = inic_freeze,
0734 .thaw = inic_thaw,
0735 .hardreset = inic_hardreset,
0736 .error_handler = inic_error_handler,
0737 .post_internal_cmd = inic_post_internal_cmd,
0738
0739 .scr_read = inic_scr_read,
0740 .scr_write = inic_scr_write,
0741
0742 .port_resume = inic_port_resume,
0743 .port_start = inic_port_start,
0744 };
0745
0746 static const struct ata_port_info inic_port_info = {
0747 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0748 .pio_mask = ATA_PIO4,
0749 .mwdma_mask = ATA_MWDMA2,
0750 .udma_mask = ATA_UDMA6,
0751 .port_ops = &inic_port_ops
0752 };
0753
0754 static int init_controller(void __iomem *mmio_base, u16 hctl)
0755 {
0756 int i;
0757 u16 val;
0758
0759 hctl &= ~HCTL_KNOWN_BITS;
0760
0761
0762
0763
0764 writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
0765 readw(mmio_base + HOST_CTL);
0766
0767 for (i = 0; i < 10; i++) {
0768 msleep(1);
0769 val = readw(mmio_base + HOST_CTL);
0770 if (!(val & HCTL_SOFTRST))
0771 break;
0772 }
0773
0774 if (val & HCTL_SOFTRST)
0775 return -EIO;
0776
0777
0778 for (i = 0; i < NR_PORTS; i++) {
0779 void __iomem *port_base = mmio_base + i * PORT_SIZE;
0780
0781 writeb(0xff, port_base + PORT_IRQ_MASK);
0782 inic_reset_port(port_base);
0783 }
0784
0785
0786 writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
0787 val = readw(mmio_base + HOST_IRQ_MASK);
0788 val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
0789 writew(val, mmio_base + HOST_IRQ_MASK);
0790
0791 return 0;
0792 }
0793
0794 #ifdef CONFIG_PM_SLEEP
0795 static int inic_pci_device_resume(struct pci_dev *pdev)
0796 {
0797 struct ata_host *host = pci_get_drvdata(pdev);
0798 struct inic_host_priv *hpriv = host->private_data;
0799 int rc;
0800
0801 rc = ata_pci_device_do_resume(pdev);
0802 if (rc)
0803 return rc;
0804
0805 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
0806 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
0807 if (rc)
0808 return rc;
0809 }
0810
0811 ata_host_resume(host);
0812
0813 return 0;
0814 }
0815 #endif
0816
0817 static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0818 {
0819 const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
0820 struct ata_host *host;
0821 struct inic_host_priv *hpriv;
0822 void __iomem * const *iomap;
0823 int mmio_bar;
0824 int i, rc;
0825
0826 ata_print_version_once(&pdev->dev, DRV_VERSION);
0827
0828 dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
0829
0830
0831 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
0832 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
0833 if (!host || !hpriv)
0834 return -ENOMEM;
0835
0836 host->private_data = hpriv;
0837
0838
0839
0840
0841 rc = pcim_enable_device(pdev);
0842 if (rc)
0843 return rc;
0844
0845 if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
0846 mmio_bar = MMIO_BAR_PCI;
0847 else
0848 mmio_bar = MMIO_BAR_CARDBUS;
0849
0850 rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
0851 if (rc)
0852 return rc;
0853 host->iomap = iomap = pcim_iomap_table(pdev);
0854 hpriv->mmio_base = iomap[mmio_bar];
0855 hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
0856
0857 for (i = 0; i < NR_PORTS; i++) {
0858 struct ata_port *ap = host->ports[i];
0859
0860 ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
0861 ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
0862 }
0863
0864
0865 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0866 if (rc) {
0867 dev_err(&pdev->dev, "32-bit DMA enable failed\n");
0868 return rc;
0869 }
0870
0871 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
0872 if (rc) {
0873 dev_err(&pdev->dev, "failed to initialize controller\n");
0874 return rc;
0875 }
0876
0877 pci_set_master(pdev);
0878 return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED,
0879 &inic_sht);
0880 }
0881
0882 static const struct pci_device_id inic_pci_tbl[] = {
0883 { PCI_VDEVICE(INIT, 0x1622), },
0884 { },
0885 };
0886
0887 static struct pci_driver inic_pci_driver = {
0888 .name = DRV_NAME,
0889 .id_table = inic_pci_tbl,
0890 #ifdef CONFIG_PM_SLEEP
0891 .suspend = ata_pci_device_suspend,
0892 .resume = inic_pci_device_resume,
0893 #endif
0894 .probe = inic_init_one,
0895 .remove = ata_pci_remove_one,
0896 };
0897
0898 module_pci_driver(inic_pci_driver);
0899
0900 MODULE_AUTHOR("Tejun Heo");
0901 MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
0902 MODULE_LICENSE("GPL v2");
0903 MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
0904 MODULE_VERSION(DRV_VERSION);