Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * sata_inic162x.c - Driver for Initio 162x SATA controllers
0004  *
0005  * Copyright 2006  SUSE Linux Products GmbH
0006  * Copyright 2006  Tejun Heo <teheo@novell.com>
0007  *
0008  * **** WARNING ****
0009  *
0010  * This driver never worked properly and unfortunately data corruption is
0011  * relatively common.  There isn't anyone working on the driver and there's
0012  * no support from the vendor.  Do not use this driver in any production
0013  * environment.
0014  *
0015  * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
0016  * https://bugzilla.kernel.org/show_bug.cgi?id=60565
0017  *
0018  * *****************
0019  *
0020  * This controller is eccentric and easily locks up if something isn't
0021  * right.  Documentation is available at initio's website but it only
0022  * documents registers (not programming model).
0023  *
0024  * This driver has interesting history.  The first version was written
0025  * from the documentation and a 2.4 IDE driver posted on a Taiwan
0026  * company, which didn't use any IDMA features and couldn't handle
0027  * LBA48.  The resulting driver couldn't handle LBA48 devices either
0028  * making it pretty useless.
0029  *
0030  * After a while, initio picked the driver up, renamed it to
0031  * sata_initio162x, updated it to use IDMA for ATA DMA commands and
0032  * posted it on their website.  It only used ATA_PROT_DMA for IDMA and
0033  * attaching both devices and issuing IDMA and !IDMA commands
0034  * simultaneously broke it due to PIRQ masking interaction but it did
0035  * show how to use the IDMA (ADMA + some initio specific twists)
0036  * engine.
0037  *
0038  * Then, I picked up their changes again and here's the usable driver
0039  * which uses IDMA for everything.  Everything works now including
0040  * LBA48, CD/DVD burning, suspend/resume and hotplug.  There are some
0041  * issues tho.  Result Tf is not resported properly, NCQ isn't
0042  * supported yet and CD/DVD writing works with DMA assisted PIO
0043  * protocol (which, for native SATA devices, shouldn't cause any
0044  * noticeable difference).
0045  *
0046  * Anyways, so, here's finally a working driver for inic162x.  Enjoy!
0047  *
0048  * initio: If you guys wanna improve the driver regarding result TF
0049  * access and other stuff, please feel free to contact me.  I'll be
0050  * happy to assist.
0051  */
0052 
0053 #include <linux/gfp.h>
0054 #include <linux/kernel.h>
0055 #include <linux/module.h>
0056 #include <linux/pci.h>
0057 #include <scsi/scsi_host.h>
0058 #include <linux/libata.h>
0059 #include <linux/blkdev.h>
0060 #include <scsi/scsi_device.h>
0061 
0062 #define DRV_NAME    "sata_inic162x"
0063 #define DRV_VERSION "0.4"
0064 
0065 enum {
0066     MMIO_BAR_PCI        = 5,
0067     MMIO_BAR_CARDBUS    = 1,
0068 
0069     NR_PORTS        = 2,
0070 
0071     IDMA_CPB_TBL_SIZE   = 4 * 32,
0072 
0073     INIC_DMA_BOUNDARY   = 0xffffff,
0074 
0075     HOST_ACTRL      = 0x08,
0076     HOST_CTL        = 0x7c,
0077     HOST_STAT       = 0x7e,
0078     HOST_IRQ_STAT       = 0xbc,
0079     HOST_IRQ_MASK       = 0xbe,
0080 
0081     PORT_SIZE       = 0x40,
0082 
0083     /* registers for ATA TF operation */
0084     PORT_TF_DATA        = 0x00,
0085     PORT_TF_FEATURE     = 0x01,
0086     PORT_TF_NSECT       = 0x02,
0087     PORT_TF_LBAL        = 0x03,
0088     PORT_TF_LBAM        = 0x04,
0089     PORT_TF_LBAH        = 0x05,
0090     PORT_TF_DEVICE      = 0x06,
0091     PORT_TF_COMMAND     = 0x07,
0092     PORT_TF_ALT_STAT    = 0x08,
0093     PORT_IRQ_STAT       = 0x09,
0094     PORT_IRQ_MASK       = 0x0a,
0095     PORT_PRD_CTL        = 0x0b,
0096     PORT_PRD_ADDR       = 0x0c,
0097     PORT_PRD_XFERLEN    = 0x10,
0098     PORT_CPB_CPBLAR     = 0x18,
0099     PORT_CPB_PTQFIFO    = 0x1c,
0100 
0101     /* IDMA register */
0102     PORT_IDMA_CTL       = 0x14,
0103     PORT_IDMA_STAT      = 0x16,
0104 
0105     PORT_RPQ_FIFO       = 0x1e,
0106     PORT_RPQ_CNT        = 0x1f,
0107 
0108     PORT_SCR        = 0x20,
0109 
0110     /* HOST_CTL bits */
0111     HCTL_LEDEN      = (1 << 3),  /* enable LED operation */
0112     HCTL_IRQOFF     = (1 << 8),  /* global IRQ off */
0113     HCTL_FTHD0      = (1 << 10), /* fifo threshold 0 */
0114     HCTL_FTHD1      = (1 << 11), /* fifo threshold 1*/
0115     HCTL_PWRDWN     = (1 << 12), /* power down PHYs */
0116     HCTL_SOFTRST        = (1 << 13), /* global reset (no phy reset) */
0117     HCTL_RPGSEL     = (1 << 15), /* register page select */
0118 
0119     HCTL_KNOWN_BITS     = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
0120                   HCTL_RPGSEL,
0121 
0122     /* HOST_IRQ_(STAT|MASK) bits */
0123     HIRQ_PORT0      = (1 << 0),
0124     HIRQ_PORT1      = (1 << 1),
0125     HIRQ_SOFT       = (1 << 14),
0126     HIRQ_GLOBAL     = (1 << 15), /* STAT only */
0127 
0128     /* PORT_IRQ_(STAT|MASK) bits */
0129     PIRQ_OFFLINE        = (1 << 0),  /* device unplugged */
0130     PIRQ_ONLINE     = (1 << 1),  /* device plugged */
0131     PIRQ_COMPLETE       = (1 << 2),  /* completion interrupt */
0132     PIRQ_FATAL      = (1 << 3),  /* fatal error */
0133     PIRQ_ATA        = (1 << 4),  /* ATA interrupt */
0134     PIRQ_REPLY      = (1 << 5),  /* reply FIFO not empty */
0135     PIRQ_PENDING        = (1 << 7),  /* port IRQ pending (STAT only) */
0136 
0137     PIRQ_ERR        = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
0138     PIRQ_MASK_DEFAULT   = PIRQ_REPLY | PIRQ_ATA,
0139     PIRQ_MASK_FREEZE    = 0xff,
0140 
0141     /* PORT_PRD_CTL bits */
0142     PRD_CTL_START       = (1 << 0),
0143     PRD_CTL_WR      = (1 << 3),
0144     PRD_CTL_DMAEN       = (1 << 7),  /* DMA enable */
0145 
0146     /* PORT_IDMA_CTL bits */
0147     IDMA_CTL_RST_ATA    = (1 << 2),  /* hardreset ATA bus */
0148     IDMA_CTL_RST_IDMA   = (1 << 5),  /* reset IDMA machinery */
0149     IDMA_CTL_GO     = (1 << 7),  /* IDMA mode go */
0150     IDMA_CTL_ATA_NIEN   = (1 << 8),  /* ATA IRQ disable */
0151 
0152     /* PORT_IDMA_STAT bits */
0153     IDMA_STAT_PERR      = (1 << 0),  /* PCI ERROR MODE */
0154     IDMA_STAT_CPBERR    = (1 << 1),  /* ADMA CPB error */
0155     IDMA_STAT_LGCY      = (1 << 3),  /* ADMA legacy */
0156     IDMA_STAT_UIRQ      = (1 << 4),  /* ADMA unsolicited irq */
0157     IDMA_STAT_STPD      = (1 << 5),  /* ADMA stopped */
0158     IDMA_STAT_PSD       = (1 << 6),  /* ADMA pause */
0159     IDMA_STAT_DONE      = (1 << 7),  /* ADMA done */
0160 
0161     IDMA_STAT_ERR       = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
0162 
0163     /* CPB Control Flags*/
0164     CPB_CTL_VALID       = (1 << 0),  /* CPB valid */
0165     CPB_CTL_QUEUED      = (1 << 1),  /* queued command */
0166     CPB_CTL_DATA        = (1 << 2),  /* data, rsvd in datasheet */
0167     CPB_CTL_IEN     = (1 << 3),  /* PCI interrupt enable */
0168     CPB_CTL_DEVDIR      = (1 << 4),  /* device direction control */
0169 
0170     /* CPB Response Flags */
0171     CPB_RESP_DONE       = (1 << 0),  /* ATA command complete */
0172     CPB_RESP_REL        = (1 << 1),  /* ATA release */
0173     CPB_RESP_IGNORED    = (1 << 2),  /* CPB ignored */
0174     CPB_RESP_ATA_ERR    = (1 << 3),  /* ATA command error */
0175     CPB_RESP_SPURIOUS   = (1 << 4),  /* ATA spurious interrupt error */
0176     CPB_RESP_UNDERFLOW  = (1 << 5),  /* APRD deficiency length error */
0177     CPB_RESP_OVERFLOW   = (1 << 6),  /* APRD exccess length error */
0178     CPB_RESP_CPB_ERR    = (1 << 7),  /* CPB error flag */
0179 
0180     /* PRD Control Flags */
0181     PRD_DRAIN       = (1 << 1),  /* ignore data excess */
0182     PRD_CDB         = (1 << 2),  /* atapi packet command pointer */
0183     PRD_DIRECT_INTR     = (1 << 3),  /* direct interrupt */
0184     PRD_DMA         = (1 << 4),  /* data transfer method */
0185     PRD_WRITE       = (1 << 5),  /* data dir, rsvd in datasheet */
0186     PRD_IOM         = (1 << 6),  /* io/memory transfer */
0187     PRD_END         = (1 << 7),  /* APRD chain end */
0188 };
0189 
0190 /* Comman Parameter Block */
0191 struct inic_cpb {
0192     u8      resp_flags; /* Response Flags */
0193     u8      error;      /* ATA Error */
0194     u8      status;     /* ATA Status */
0195     u8      ctl_flags;  /* Control Flags */
0196     __le32      len;        /* Total Transfer Length */
0197     __le32      prd;        /* First PRD pointer */
0198     u8      rsvd[4];
0199     /* 16 bytes */
0200     u8      feature;    /* ATA Feature */
0201     u8      hob_feature;    /* ATA Ex. Feature */
0202     u8      device;     /* ATA Device/Head */
0203     u8      mirctl;     /* Mirror Control */
0204     u8      nsect;      /* ATA Sector Count */
0205     u8      hob_nsect;  /* ATA Ex. Sector Count */
0206     u8      lbal;       /* ATA Sector Number */
0207     u8      hob_lbal;   /* ATA Ex. Sector Number */
0208     u8      lbam;       /* ATA Cylinder Low */
0209     u8      hob_lbam;   /* ATA Ex. Cylinder Low */
0210     u8      lbah;       /* ATA Cylinder High */
0211     u8      hob_lbah;   /* ATA Ex. Cylinder High */
0212     u8      command;    /* ATA Command */
0213     u8      ctl;        /* ATA Control */
0214     u8      slave_error;    /* Slave ATA Error */
0215     u8      slave_status;   /* Slave ATA Status */
0216     /* 32 bytes */
0217 } __packed;
0218 
0219 /* Physical Region Descriptor */
0220 struct inic_prd {
0221     __le32      mad;        /* Physical Memory Address */
0222     __le16      len;        /* Transfer Length */
0223     u8      rsvd;
0224     u8      flags;      /* Control Flags */
0225 } __packed;
0226 
0227 struct inic_pkt {
0228     struct inic_cpb cpb;
0229     struct inic_prd prd[LIBATA_MAX_PRD + 1];    /* + 1 for cdb */
0230     u8      cdb[ATAPI_CDB_LEN];
0231 } __packed;
0232 
0233 struct inic_host_priv {
0234     void __iomem    *mmio_base;
0235     u16     cached_hctl;
0236 };
0237 
0238 struct inic_port_priv {
0239     struct inic_pkt *pkt;
0240     dma_addr_t  pkt_dma;
0241     u32     *cpb_tbl;
0242     dma_addr_t  cpb_tbl_dma;
0243 };
0244 
0245 static struct scsi_host_template inic_sht = {
0246     ATA_BASE_SHT(DRV_NAME),
0247     .sg_tablesize       = LIBATA_MAX_PRD, /* maybe it can be larger? */
0248 
0249     /*
0250      * This controller is braindamaged.  dma_boundary is 0xffff like others
0251      * but it will lock up the whole machine HARD if 65536 byte PRD entry
0252      * is fed.  Reduce maximum segment size.
0253      */
0254     .dma_boundary       = INIC_DMA_BOUNDARY,
0255     .max_segment_size   = 65536 - 512,
0256 };
0257 
0258 static const int scr_map[] = {
0259     [SCR_STATUS]    = 0,
0260     [SCR_ERROR] = 1,
0261     [SCR_CONTROL]   = 2,
0262 };
0263 
0264 static void __iomem *inic_port_base(struct ata_port *ap)
0265 {
0266     struct inic_host_priv *hpriv = ap->host->private_data;
0267 
0268     return hpriv->mmio_base + ap->port_no * PORT_SIZE;
0269 }
0270 
0271 static void inic_reset_port(void __iomem *port_base)
0272 {
0273     void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
0274 
0275     /* stop IDMA engine */
0276     readw(idma_ctl); /* flush */
0277     msleep(1);
0278 
0279     /* mask IRQ and assert reset */
0280     writew(IDMA_CTL_RST_IDMA, idma_ctl);
0281     readw(idma_ctl); /* flush */
0282     msleep(1);
0283 
0284     /* release reset */
0285     writew(0, idma_ctl);
0286 
0287     /* clear irq */
0288     writeb(0xff, port_base + PORT_IRQ_STAT);
0289 }
0290 
0291 static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
0292 {
0293     void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
0294 
0295     if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
0296         return -EINVAL;
0297 
0298     *val = readl(scr_addr + scr_map[sc_reg] * 4);
0299 
0300     /* this controller has stuck DIAG.N, ignore it */
0301     if (sc_reg == SCR_ERROR)
0302         *val &= ~SERR_PHYRDY_CHG;
0303     return 0;
0304 }
0305 
0306 static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
0307 {
0308     void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
0309 
0310     if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
0311         return -EINVAL;
0312 
0313     writel(val, scr_addr + scr_map[sc_reg] * 4);
0314     return 0;
0315 }
0316 
0317 static void inic_stop_idma(struct ata_port *ap)
0318 {
0319     void __iomem *port_base = inic_port_base(ap);
0320 
0321     readb(port_base + PORT_RPQ_FIFO);
0322     readb(port_base + PORT_RPQ_CNT);
0323     writew(0, port_base + PORT_IDMA_CTL);
0324 }
0325 
0326 static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
0327 {
0328     struct ata_eh_info *ehi = &ap->link.eh_info;
0329     struct inic_port_priv *pp = ap->private_data;
0330     struct inic_cpb *cpb = &pp->pkt->cpb;
0331     bool freeze = false;
0332 
0333     ata_ehi_clear_desc(ehi);
0334     ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
0335               irq_stat, idma_stat);
0336 
0337     inic_stop_idma(ap);
0338 
0339     if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
0340         ata_ehi_push_desc(ehi, "hotplug");
0341         ata_ehi_hotplugged(ehi);
0342         freeze = true;
0343     }
0344 
0345     if (idma_stat & IDMA_STAT_PERR) {
0346         ata_ehi_push_desc(ehi, "PCI error");
0347         freeze = true;
0348     }
0349 
0350     if (idma_stat & IDMA_STAT_CPBERR) {
0351         ata_ehi_push_desc(ehi, "CPB error");
0352 
0353         if (cpb->resp_flags & CPB_RESP_IGNORED) {
0354             __ata_ehi_push_desc(ehi, " ignored");
0355             ehi->err_mask |= AC_ERR_INVALID;
0356             freeze = true;
0357         }
0358 
0359         if (cpb->resp_flags & CPB_RESP_ATA_ERR)
0360             ehi->err_mask |= AC_ERR_DEV;
0361 
0362         if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
0363             __ata_ehi_push_desc(ehi, " spurious-intr");
0364             ehi->err_mask |= AC_ERR_HSM;
0365             freeze = true;
0366         }
0367 
0368         if (cpb->resp_flags &
0369             (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
0370             __ata_ehi_push_desc(ehi, " data-over/underflow");
0371             ehi->err_mask |= AC_ERR_HSM;
0372             freeze = true;
0373         }
0374     }
0375 
0376     if (freeze)
0377         ata_port_freeze(ap);
0378     else
0379         ata_port_abort(ap);
0380 }
0381 
0382 static void inic_host_intr(struct ata_port *ap)
0383 {
0384     void __iomem *port_base = inic_port_base(ap);
0385     struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
0386     u8 irq_stat;
0387     u16 idma_stat;
0388 
0389     /* read and clear IRQ status */
0390     irq_stat = readb(port_base + PORT_IRQ_STAT);
0391     writeb(irq_stat, port_base + PORT_IRQ_STAT);
0392     idma_stat = readw(port_base + PORT_IDMA_STAT);
0393 
0394     if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
0395         inic_host_err_intr(ap, irq_stat, idma_stat);
0396 
0397     if (unlikely(!qc))
0398         goto spurious;
0399 
0400     if (likely(idma_stat & IDMA_STAT_DONE)) {
0401         inic_stop_idma(ap);
0402 
0403         /* Depending on circumstances, device error
0404          * isn't reported by IDMA, check it explicitly.
0405          */
0406         if (unlikely(readb(port_base + PORT_TF_COMMAND) &
0407                  (ATA_DF | ATA_ERR)))
0408             qc->err_mask |= AC_ERR_DEV;
0409 
0410         ata_qc_complete(qc);
0411         return;
0412     }
0413 
0414  spurious:
0415     ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
0416               qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
0417 }
0418 
0419 static irqreturn_t inic_interrupt(int irq, void *dev_instance)
0420 {
0421     struct ata_host *host = dev_instance;
0422     struct inic_host_priv *hpriv = host->private_data;
0423     u16 host_irq_stat;
0424     int i, handled = 0;
0425 
0426     host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
0427 
0428     if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
0429         goto out;
0430 
0431     spin_lock(&host->lock);
0432 
0433     for (i = 0; i < NR_PORTS; i++)
0434         if (host_irq_stat & (HIRQ_PORT0 << i)) {
0435             inic_host_intr(host->ports[i]);
0436             handled++;
0437         }
0438 
0439     spin_unlock(&host->lock);
0440 
0441  out:
0442     return IRQ_RETVAL(handled);
0443 }
0444 
0445 static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
0446 {
0447     /* For some reason ATAPI_PROT_DMA doesn't work for some
0448      * commands including writes and other misc ops.  Use PIO
0449      * protocol instead, which BTW is driven by the DMA engine
0450      * anyway, so it shouldn't make much difference for native
0451      * SATA devices.
0452      */
0453     if (atapi_cmd_type(qc->cdb[0]) == READ)
0454         return 0;
0455     return 1;
0456 }
0457 
0458 static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
0459 {
0460     struct scatterlist *sg;
0461     unsigned int si;
0462     u8 flags = 0;
0463 
0464     if (qc->tf.flags & ATA_TFLAG_WRITE)
0465         flags |= PRD_WRITE;
0466 
0467     if (ata_is_dma(qc->tf.protocol))
0468         flags |= PRD_DMA;
0469 
0470     for_each_sg(qc->sg, sg, qc->n_elem, si) {
0471         prd->mad = cpu_to_le32(sg_dma_address(sg));
0472         prd->len = cpu_to_le16(sg_dma_len(sg));
0473         prd->flags = flags;
0474         prd++;
0475     }
0476 
0477     WARN_ON(!si);
0478     prd[-1].flags |= PRD_END;
0479 }
0480 
0481 static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
0482 {
0483     struct inic_port_priv *pp = qc->ap->private_data;
0484     struct inic_pkt *pkt = pp->pkt;
0485     struct inic_cpb *cpb = &pkt->cpb;
0486     struct inic_prd *prd = pkt->prd;
0487     bool is_atapi = ata_is_atapi(qc->tf.protocol);
0488     bool is_data = ata_is_data(qc->tf.protocol);
0489     unsigned int cdb_len = 0;
0490 
0491     if (is_atapi)
0492         cdb_len = qc->dev->cdb_len;
0493 
0494     /* prepare packet, based on initio driver */
0495     memset(pkt, 0, sizeof(struct inic_pkt));
0496 
0497     cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
0498     if (is_atapi || is_data)
0499         cpb->ctl_flags |= CPB_CTL_DATA;
0500 
0501     cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
0502     cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
0503 
0504     cpb->device = qc->tf.device;
0505     cpb->feature = qc->tf.feature;
0506     cpb->nsect = qc->tf.nsect;
0507     cpb->lbal = qc->tf.lbal;
0508     cpb->lbam = qc->tf.lbam;
0509     cpb->lbah = qc->tf.lbah;
0510 
0511     if (qc->tf.flags & ATA_TFLAG_LBA48) {
0512         cpb->hob_feature = qc->tf.hob_feature;
0513         cpb->hob_nsect = qc->tf.hob_nsect;
0514         cpb->hob_lbal = qc->tf.hob_lbal;
0515         cpb->hob_lbam = qc->tf.hob_lbam;
0516         cpb->hob_lbah = qc->tf.hob_lbah;
0517     }
0518 
0519     cpb->command = qc->tf.command;
0520     /* don't load ctl - dunno why.  it's like that in the initio driver */
0521 
0522     /* setup PRD for CDB */
0523     if (is_atapi) {
0524         memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
0525         prd->mad = cpu_to_le32(pp->pkt_dma +
0526                        offsetof(struct inic_pkt, cdb));
0527         prd->len = cpu_to_le16(cdb_len);
0528         prd->flags = PRD_CDB | PRD_WRITE;
0529         if (!is_data)
0530             prd->flags |= PRD_END;
0531         prd++;
0532     }
0533 
0534     /* setup sg table */
0535     if (is_data)
0536         inic_fill_sg(prd, qc);
0537 
0538     pp->cpb_tbl[0] = pp->pkt_dma;
0539 
0540     return AC_ERR_OK;
0541 }
0542 
0543 static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
0544 {
0545     struct ata_port *ap = qc->ap;
0546     void __iomem *port_base = inic_port_base(ap);
0547 
0548     /* fire up the ADMA engine */
0549     writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL);
0550     writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
0551     writeb(0, port_base + PORT_CPB_PTQFIFO);
0552 
0553     return 0;
0554 }
0555 
0556 static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
0557 {
0558     void __iomem *port_base = inic_port_base(ap);
0559 
0560     tf->error   = readb(port_base + PORT_TF_FEATURE);
0561     tf->nsect   = readb(port_base + PORT_TF_NSECT);
0562     tf->lbal    = readb(port_base + PORT_TF_LBAL);
0563     tf->lbam    = readb(port_base + PORT_TF_LBAM);
0564     tf->lbah    = readb(port_base + PORT_TF_LBAH);
0565     tf->device  = readb(port_base + PORT_TF_DEVICE);
0566     tf->status  = readb(port_base + PORT_TF_COMMAND);
0567 }
0568 
0569 static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
0570 {
0571     struct ata_taskfile *rtf = &qc->result_tf;
0572     struct ata_taskfile tf;
0573 
0574     /* FIXME: Except for status and error, result TF access
0575      * doesn't work.  I tried reading from BAR0/2, CPB and BAR5.
0576      * None works regardless of which command interface is used.
0577      * For now return true iff status indicates device error.
0578      * This means that we're reporting bogus sector for RW
0579      * failures.  Eeekk....
0580      */
0581     inic_tf_read(qc->ap, &tf);
0582 
0583     if (!(tf.status & ATA_ERR))
0584         return false;
0585 
0586     rtf->status = tf.status;
0587     rtf->error = tf.error;
0588     return true;
0589 }
0590 
0591 static void inic_freeze(struct ata_port *ap)
0592 {
0593     void __iomem *port_base = inic_port_base(ap);
0594 
0595     writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
0596     writeb(0xff, port_base + PORT_IRQ_STAT);
0597 }
0598 
0599 static void inic_thaw(struct ata_port *ap)
0600 {
0601     void __iomem *port_base = inic_port_base(ap);
0602 
0603     writeb(0xff, port_base + PORT_IRQ_STAT);
0604     writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
0605 }
0606 
0607 static int inic_check_ready(struct ata_link *link)
0608 {
0609     void __iomem *port_base = inic_port_base(link->ap);
0610 
0611     return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
0612 }
0613 
0614 /*
0615  * SRST and SControl hardreset don't give valid signature on this
0616  * controller.  Only controller specific hardreset mechanism works.
0617  */
0618 static int inic_hardreset(struct ata_link *link, unsigned int *class,
0619               unsigned long deadline)
0620 {
0621     struct ata_port *ap = link->ap;
0622     void __iomem *port_base = inic_port_base(ap);
0623     void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
0624     const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
0625     int rc;
0626 
0627     /* hammer it into sane state */
0628     inic_reset_port(port_base);
0629 
0630     writew(IDMA_CTL_RST_ATA, idma_ctl);
0631     readw(idma_ctl);    /* flush */
0632     ata_msleep(ap, 1);
0633     writew(0, idma_ctl);
0634 
0635     rc = sata_link_resume(link, timing, deadline);
0636     if (rc) {
0637         ata_link_warn(link,
0638                   "failed to resume link after reset (errno=%d)\n",
0639                   rc);
0640         return rc;
0641     }
0642 
0643     *class = ATA_DEV_NONE;
0644     if (ata_link_online(link)) {
0645         struct ata_taskfile tf;
0646 
0647         /* wait for link to become ready */
0648         rc = ata_wait_after_reset(link, deadline, inic_check_ready);
0649         /* link occupied, -ENODEV too is an error */
0650         if (rc) {
0651             ata_link_warn(link,
0652                       "device not ready after hardreset (errno=%d)\n",
0653                       rc);
0654             return rc;
0655         }
0656 
0657         inic_tf_read(ap, &tf);
0658         *class = ata_port_classify(ap, &tf);
0659     }
0660 
0661     return 0;
0662 }
0663 
0664 static void inic_error_handler(struct ata_port *ap)
0665 {
0666     void __iomem *port_base = inic_port_base(ap);
0667 
0668     inic_reset_port(port_base);
0669     ata_std_error_handler(ap);
0670 }
0671 
0672 static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
0673 {
0674     /* make DMA engine forget about the failed command */
0675     if (qc->flags & ATA_QCFLAG_FAILED)
0676         inic_reset_port(inic_port_base(qc->ap));
0677 }
0678 
0679 static void init_port(struct ata_port *ap)
0680 {
0681     void __iomem *port_base = inic_port_base(ap);
0682     struct inic_port_priv *pp = ap->private_data;
0683 
0684     /* clear packet and CPB table */
0685     memset(pp->pkt, 0, sizeof(struct inic_pkt));
0686     memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
0687 
0688     /* setup CPB lookup table addresses */
0689     writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
0690 }
0691 
0692 static int inic_port_resume(struct ata_port *ap)
0693 {
0694     init_port(ap);
0695     return 0;
0696 }
0697 
0698 static int inic_port_start(struct ata_port *ap)
0699 {
0700     struct device *dev = ap->host->dev;
0701     struct inic_port_priv *pp;
0702 
0703     /* alloc and initialize private data */
0704     pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
0705     if (!pp)
0706         return -ENOMEM;
0707     ap->private_data = pp;
0708 
0709     /* Alloc resources */
0710     pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
0711                       &pp->pkt_dma, GFP_KERNEL);
0712     if (!pp->pkt)
0713         return -ENOMEM;
0714 
0715     pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
0716                       &pp->cpb_tbl_dma, GFP_KERNEL);
0717     if (!pp->cpb_tbl)
0718         return -ENOMEM;
0719 
0720     init_port(ap);
0721 
0722     return 0;
0723 }
0724 
0725 static struct ata_port_operations inic_port_ops = {
0726     .inherits       = &sata_port_ops,
0727 
0728     .check_atapi_dma    = inic_check_atapi_dma,
0729     .qc_prep        = inic_qc_prep,
0730     .qc_issue       = inic_qc_issue,
0731     .qc_fill_rtf        = inic_qc_fill_rtf,
0732 
0733     .freeze         = inic_freeze,
0734     .thaw           = inic_thaw,
0735     .hardreset      = inic_hardreset,
0736     .error_handler      = inic_error_handler,
0737     .post_internal_cmd  = inic_post_internal_cmd,
0738 
0739     .scr_read       = inic_scr_read,
0740     .scr_write      = inic_scr_write,
0741 
0742     .port_resume        = inic_port_resume,
0743     .port_start     = inic_port_start,
0744 };
0745 
0746 static const struct ata_port_info inic_port_info = {
0747     .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0748     .pio_mask       = ATA_PIO4,
0749     .mwdma_mask     = ATA_MWDMA2,
0750     .udma_mask      = ATA_UDMA6,
0751     .port_ops       = &inic_port_ops
0752 };
0753 
0754 static int init_controller(void __iomem *mmio_base, u16 hctl)
0755 {
0756     int i;
0757     u16 val;
0758 
0759     hctl &= ~HCTL_KNOWN_BITS;
0760 
0761     /* Soft reset whole controller.  Spec says reset duration is 3
0762      * PCI clocks, be generous and give it 10ms.
0763      */
0764     writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
0765     readw(mmio_base + HOST_CTL); /* flush */
0766 
0767     for (i = 0; i < 10; i++) {
0768         msleep(1);
0769         val = readw(mmio_base + HOST_CTL);
0770         if (!(val & HCTL_SOFTRST))
0771             break;
0772     }
0773 
0774     if (val & HCTL_SOFTRST)
0775         return -EIO;
0776 
0777     /* mask all interrupts and reset ports */
0778     for (i = 0; i < NR_PORTS; i++) {
0779         void __iomem *port_base = mmio_base + i * PORT_SIZE;
0780 
0781         writeb(0xff, port_base + PORT_IRQ_MASK);
0782         inic_reset_port(port_base);
0783     }
0784 
0785     /* port IRQ is masked now, unmask global IRQ */
0786     writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
0787     val = readw(mmio_base + HOST_IRQ_MASK);
0788     val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
0789     writew(val, mmio_base + HOST_IRQ_MASK);
0790 
0791     return 0;
0792 }
0793 
0794 #ifdef CONFIG_PM_SLEEP
0795 static int inic_pci_device_resume(struct pci_dev *pdev)
0796 {
0797     struct ata_host *host = pci_get_drvdata(pdev);
0798     struct inic_host_priv *hpriv = host->private_data;
0799     int rc;
0800 
0801     rc = ata_pci_device_do_resume(pdev);
0802     if (rc)
0803         return rc;
0804 
0805     if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
0806         rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
0807         if (rc)
0808             return rc;
0809     }
0810 
0811     ata_host_resume(host);
0812 
0813     return 0;
0814 }
0815 #endif
0816 
0817 static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0818 {
0819     const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
0820     struct ata_host *host;
0821     struct inic_host_priv *hpriv;
0822     void __iomem * const *iomap;
0823     int mmio_bar;
0824     int i, rc;
0825 
0826     ata_print_version_once(&pdev->dev, DRV_VERSION);
0827 
0828     dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
0829 
0830     /* alloc host */
0831     host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
0832     hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
0833     if (!host || !hpriv)
0834         return -ENOMEM;
0835 
0836     host->private_data = hpriv;
0837 
0838     /* Acquire resources and fill host.  Note that PCI and cardbus
0839      * use different BARs.
0840      */
0841     rc = pcim_enable_device(pdev);
0842     if (rc)
0843         return rc;
0844 
0845     if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
0846         mmio_bar = MMIO_BAR_PCI;
0847     else
0848         mmio_bar = MMIO_BAR_CARDBUS;
0849 
0850     rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
0851     if (rc)
0852         return rc;
0853     host->iomap = iomap = pcim_iomap_table(pdev);
0854     hpriv->mmio_base = iomap[mmio_bar];
0855     hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
0856 
0857     for (i = 0; i < NR_PORTS; i++) {
0858         struct ata_port *ap = host->ports[i];
0859 
0860         ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
0861         ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
0862     }
0863 
0864     /* Set dma_mask.  This devices doesn't support 64bit addressing. */
0865     rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0866     if (rc) {
0867         dev_err(&pdev->dev, "32-bit DMA enable failed\n");
0868         return rc;
0869     }
0870 
0871     rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
0872     if (rc) {
0873         dev_err(&pdev->dev, "failed to initialize controller\n");
0874         return rc;
0875     }
0876 
0877     pci_set_master(pdev);
0878     return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED,
0879                  &inic_sht);
0880 }
0881 
0882 static const struct pci_device_id inic_pci_tbl[] = {
0883     { PCI_VDEVICE(INIT, 0x1622), },
0884     { },
0885 };
0886 
0887 static struct pci_driver inic_pci_driver = {
0888     .name       = DRV_NAME,
0889     .id_table   = inic_pci_tbl,
0890 #ifdef CONFIG_PM_SLEEP
0891     .suspend    = ata_pci_device_suspend,
0892     .resume     = inic_pci_device_resume,
0893 #endif
0894     .probe      = inic_init_one,
0895     .remove     = ata_pci_remove_one,
0896 };
0897 
0898 module_pci_driver(inic_pci_driver);
0899 
0900 MODULE_AUTHOR("Tejun Heo");
0901 MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
0902 MODULE_LICENSE("GPL v2");
0903 MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
0904 MODULE_VERSION(DRV_VERSION);