Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  sata_qstor.c - Pacific Digital Corporation QStor SATA
0004  *
0005  *  Maintained by:  Mark Lord <mlord@pobox.com>
0006  *
0007  *  Copyright 2005 Pacific Digital Corporation.
0008  *  (OSL/GPL code release authorized by Jalil Fadavi).
0009  *
0010  *  libata documentation is available via 'make {ps|pdf}docs',
0011  *  as Documentation/driver-api/libata.rst
0012  */
0013 
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/gfp.h>
0017 #include <linux/pci.h>
0018 #include <linux/blkdev.h>
0019 #include <linux/delay.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/device.h>
0022 #include <scsi/scsi_host.h>
0023 #include <linux/libata.h>
0024 
0025 #define DRV_NAME    "sata_qstor"
0026 #define DRV_VERSION "0.09"
0027 
0028 enum {
0029     QS_MMIO_BAR     = 4,
0030 
0031     QS_PORTS        = 4,
0032     QS_MAX_PRD      = LIBATA_MAX_PRD,
0033     QS_CPB_ORDER        = 6,
0034     QS_CPB_BYTES        = (1 << QS_CPB_ORDER),
0035     QS_PRD_BYTES        = QS_MAX_PRD * 16,
0036     QS_PKT_BYTES        = QS_CPB_BYTES + QS_PRD_BYTES,
0037 
0038     /* global register offsets */
0039     QS_HCF_CNFG3        = 0x0003, /* host configuration offset */
0040     QS_HID_HPHY     = 0x0004, /* host physical interface info */
0041     QS_HCT_CTRL     = 0x00e4, /* global interrupt mask offset */
0042     QS_HST_SFF      = 0x0100, /* host status fifo offset */
0043     QS_HVS_SERD3        = 0x0393, /* PHY enable offset */
0044 
0045     /* global control bits */
0046     QS_HPHY_64BIT       = (1 << 1), /* 64-bit bus detected */
0047     QS_CNFG3_GSRST      = 0x01,     /* global chip reset */
0048     QS_SERD3_PHY_ENA    = 0xf0,     /* PHY detection ENAble*/
0049 
0050     /* per-channel register offsets */
0051     QS_CCF_CPBA     = 0x0710, /* chan CPB base address */
0052     QS_CCF_CSEP     = 0x0718, /* chan CPB separation factor */
0053     QS_CFC_HUFT     = 0x0800, /* host upstream fifo threshold */
0054     QS_CFC_HDFT     = 0x0804, /* host downstream fifo threshold */
0055     QS_CFC_DUFT     = 0x0808, /* dev upstream fifo threshold */
0056     QS_CFC_DDFT     = 0x080c, /* dev downstream fifo threshold */
0057     QS_CCT_CTR0     = 0x0900, /* chan control-0 offset */
0058     QS_CCT_CTR1     = 0x0901, /* chan control-1 offset */
0059     QS_CCT_CFF      = 0x0a00, /* chan command fifo offset */
0060 
0061     /* channel control bits */
0062     QS_CTR0_REG     = (1 << 1),   /* register mode (vs. pkt mode) */
0063     QS_CTR0_CLER        = (1 << 2),   /* clear channel errors */
0064     QS_CTR1_RDEV        = (1 << 1),   /* sata phy/comms reset */
0065     QS_CTR1_RCHN        = (1 << 4),   /* reset channel logic */
0066     QS_CCF_RUN_PKT      = 0x107,      /* RUN a new dma PKT */
0067 
0068     /* pkt sub-field headers */
0069     QS_HCB_HDR      = 0x01,   /* Host Control Block header */
0070     QS_DCB_HDR      = 0x02,   /* Device Control Block header */
0071 
0072     /* pkt HCB flag bits */
0073     QS_HF_DIRO      = (1 << 0),   /* data DIRection Out */
0074     QS_HF_DAT       = (1 << 3),   /* DATa pkt */
0075     QS_HF_IEN       = (1 << 4),   /* Interrupt ENable */
0076     QS_HF_VLD       = (1 << 5),   /* VaLiD pkt */
0077 
0078     /* pkt DCB flag bits */
0079     QS_DF_PORD      = (1 << 2),   /* Pio OR Dma */
0080     QS_DF_ELBA      = (1 << 3),   /* Extended LBA (lba48) */
0081 
0082     /* PCI device IDs */
0083     board_2068_idx      = 0,    /* QStor 4-port SATA/RAID */
0084 };
0085 
0086 enum {
0087     QS_DMA_BOUNDARY     = ~0UL
0088 };
0089 
0090 typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
0091 
0092 struct qs_port_priv {
0093     u8          *pkt;
0094     dma_addr_t      pkt_dma;
0095     qs_state_t      state;
0096 };
0097 
0098 static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
0099 static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
0100 static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
0101 static int qs_port_start(struct ata_port *ap);
0102 static void qs_host_stop(struct ata_host *host);
0103 static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
0104 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
0105 static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
0106 static void qs_freeze(struct ata_port *ap);
0107 static void qs_thaw(struct ata_port *ap);
0108 static int qs_prereset(struct ata_link *link, unsigned long deadline);
0109 static void qs_error_handler(struct ata_port *ap);
0110 
0111 static struct scsi_host_template qs_ata_sht = {
0112     ATA_BASE_SHT(DRV_NAME),
0113     .sg_tablesize       = QS_MAX_PRD,
0114     .dma_boundary       = QS_DMA_BOUNDARY,
0115 };
0116 
0117 static struct ata_port_operations qs_ata_ops = {
0118     .inherits       = &ata_sff_port_ops,
0119 
0120     .check_atapi_dma    = qs_check_atapi_dma,
0121     .qc_prep        = qs_qc_prep,
0122     .qc_issue       = qs_qc_issue,
0123 
0124     .freeze         = qs_freeze,
0125     .thaw           = qs_thaw,
0126     .prereset       = qs_prereset,
0127     .softreset      = ATA_OP_NULL,
0128     .error_handler      = qs_error_handler,
0129     .lost_interrupt     = ATA_OP_NULL,
0130 
0131     .scr_read       = qs_scr_read,
0132     .scr_write      = qs_scr_write,
0133 
0134     .port_start     = qs_port_start,
0135     .host_stop      = qs_host_stop,
0136 };
0137 
0138 static const struct ata_port_info qs_port_info[] = {
0139     /* board_2068_idx */
0140     {
0141         .flags      = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
0142         .pio_mask   = ATA_PIO4_ONLY,
0143         .udma_mask  = ATA_UDMA6,
0144         .port_ops   = &qs_ata_ops,
0145     },
0146 };
0147 
0148 static const struct pci_device_id qs_ata_pci_tbl[] = {
0149     { PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
0150 
0151     { } /* terminate list */
0152 };
0153 
0154 static struct pci_driver qs_ata_pci_driver = {
0155     .name           = DRV_NAME,
0156     .id_table       = qs_ata_pci_tbl,
0157     .probe          = qs_ata_init_one,
0158     .remove         = ata_pci_remove_one,
0159 };
0160 
0161 static void __iomem *qs_mmio_base(struct ata_host *host)
0162 {
0163     return host->iomap[QS_MMIO_BAR];
0164 }
0165 
0166 static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
0167 {
0168     return 1;   /* ATAPI DMA not supported */
0169 }
0170 
0171 static inline void qs_enter_reg_mode(struct ata_port *ap)
0172 {
0173     u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
0174     struct qs_port_priv *pp = ap->private_data;
0175 
0176     pp->state = qs_state_mmio;
0177     writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
0178     readb(chan + QS_CCT_CTR0);        /* flush */
0179 }
0180 
0181 static inline void qs_reset_channel_logic(struct ata_port *ap)
0182 {
0183     u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
0184 
0185     writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
0186     readb(chan + QS_CCT_CTR0);        /* flush */
0187     qs_enter_reg_mode(ap);
0188 }
0189 
0190 static void qs_freeze(struct ata_port *ap)
0191 {
0192     u8 __iomem *mmio_base = qs_mmio_base(ap->host);
0193 
0194     writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
0195     qs_enter_reg_mode(ap);
0196 }
0197 
0198 static void qs_thaw(struct ata_port *ap)
0199 {
0200     u8 __iomem *mmio_base = qs_mmio_base(ap->host);
0201 
0202     qs_enter_reg_mode(ap);
0203     writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
0204 }
0205 
0206 static int qs_prereset(struct ata_link *link, unsigned long deadline)
0207 {
0208     struct ata_port *ap = link->ap;
0209 
0210     qs_reset_channel_logic(ap);
0211     return ata_sff_prereset(link, deadline);
0212 }
0213 
0214 static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
0215 {
0216     if (sc_reg > SCR_CONTROL)
0217         return -EINVAL;
0218     *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
0219     return 0;
0220 }
0221 
0222 static void qs_error_handler(struct ata_port *ap)
0223 {
0224     qs_enter_reg_mode(ap);
0225     ata_sff_error_handler(ap);
0226 }
0227 
0228 static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
0229 {
0230     if (sc_reg > SCR_CONTROL)
0231         return -EINVAL;
0232     writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
0233     return 0;
0234 }
0235 
0236 static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
0237 {
0238     struct scatterlist *sg;
0239     struct ata_port *ap = qc->ap;
0240     struct qs_port_priv *pp = ap->private_data;
0241     u8 *prd = pp->pkt + QS_CPB_BYTES;
0242     unsigned int si;
0243 
0244     for_each_sg(qc->sg, sg, qc->n_elem, si) {
0245         u64 addr;
0246         u32 len;
0247 
0248         addr = sg_dma_address(sg);
0249         *(__le64 *)prd = cpu_to_le64(addr);
0250         prd += sizeof(u64);
0251 
0252         len = sg_dma_len(sg);
0253         *(__le32 *)prd = cpu_to_le32(len);
0254         prd += sizeof(u64);
0255     }
0256 
0257     return si;
0258 }
0259 
0260 static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
0261 {
0262     struct qs_port_priv *pp = qc->ap->private_data;
0263     u8 dflags = QS_DF_PORD, *buf = pp->pkt;
0264     u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
0265     u64 addr;
0266     unsigned int nelem;
0267 
0268     qs_enter_reg_mode(qc->ap);
0269     if (qc->tf.protocol != ATA_PROT_DMA)
0270         return AC_ERR_OK;
0271 
0272     nelem = qs_fill_sg(qc);
0273 
0274     if ((qc->tf.flags & ATA_TFLAG_WRITE))
0275         hflags |= QS_HF_DIRO;
0276     if ((qc->tf.flags & ATA_TFLAG_LBA48))
0277         dflags |= QS_DF_ELBA;
0278 
0279     /* host control block (HCB) */
0280     buf[ 0] = QS_HCB_HDR;
0281     buf[ 1] = hflags;
0282     *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
0283     *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
0284     addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
0285     *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
0286 
0287     /* device control block (DCB) */
0288     buf[24] = QS_DCB_HDR;
0289     buf[28] = dflags;
0290 
0291     /* frame information structure (FIS) */
0292     ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
0293 
0294     return AC_ERR_OK;
0295 }
0296 
0297 static inline void qs_packet_start(struct ata_queued_cmd *qc)
0298 {
0299     struct ata_port *ap = qc->ap;
0300     u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
0301 
0302     writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
0303     wmb();                             /* flush PRDs and pkt to memory */
0304     writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
0305     readl(chan + QS_CCT_CFF);          /* flush */
0306 }
0307 
0308 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
0309 {
0310     struct qs_port_priv *pp = qc->ap->private_data;
0311 
0312     switch (qc->tf.protocol) {
0313     case ATA_PROT_DMA:
0314         pp->state = qs_state_pkt;
0315         qs_packet_start(qc);
0316         return 0;
0317 
0318     case ATAPI_PROT_DMA:
0319         BUG();
0320         break;
0321 
0322     default:
0323         break;
0324     }
0325 
0326     pp->state = qs_state_mmio;
0327     return ata_sff_qc_issue(qc);
0328 }
0329 
0330 static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
0331 {
0332     qc->err_mask |= ac_err_mask(status);
0333 
0334     if (!qc->err_mask) {
0335         ata_qc_complete(qc);
0336     } else {
0337         struct ata_port    *ap  = qc->ap;
0338         struct ata_eh_info *ehi = &ap->link.eh_info;
0339 
0340         ata_ehi_clear_desc(ehi);
0341         ata_ehi_push_desc(ehi, "status 0x%02X", status);
0342 
0343         if (qc->err_mask == AC_ERR_DEV)
0344             ata_port_abort(ap);
0345         else
0346             ata_port_freeze(ap);
0347     }
0348 }
0349 
0350 static inline unsigned int qs_intr_pkt(struct ata_host *host)
0351 {
0352     unsigned int handled = 0;
0353     u8 sFFE;
0354     u8 __iomem *mmio_base = qs_mmio_base(host);
0355 
0356     do {
0357         u32 sff0 = readl(mmio_base + QS_HST_SFF);
0358         u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
0359         u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
0360         sFFE  = sff1 >> 31;     /* empty flag */
0361 
0362         if (sEVLD) {
0363             u8 sDST = sff0 >> 16;   /* dev status */
0364             u8 sHST = sff1 & 0x3f;  /* host status */
0365             unsigned int port_no = (sff1 >> 8) & 0x03;
0366             struct ata_port *ap = host->ports[port_no];
0367             struct qs_port_priv *pp = ap->private_data;
0368             struct ata_queued_cmd *qc;
0369 
0370             dev_dbg(host->dev, "SFF=%08x%08x: sHST=%d sDST=%02x\n",
0371                 sff1, sff0, sHST, sDST);
0372             handled = 1;
0373             if (!pp || pp->state != qs_state_pkt)
0374                 continue;
0375             qc = ata_qc_from_tag(ap, ap->link.active_tag);
0376             if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
0377                 switch (sHST) {
0378                 case 0: /* successful CPB */
0379                 case 3: /* device error */
0380                     qs_enter_reg_mode(qc->ap);
0381                     qs_do_or_die(qc, sDST);
0382                     break;
0383                 default:
0384                     break;
0385                 }
0386             }
0387         }
0388     } while (!sFFE);
0389     return handled;
0390 }
0391 
0392 static inline unsigned int qs_intr_mmio(struct ata_host *host)
0393 {
0394     unsigned int handled = 0, port_no;
0395 
0396     for (port_no = 0; port_no < host->n_ports; ++port_no) {
0397         struct ata_port *ap = host->ports[port_no];
0398         struct qs_port_priv *pp = ap->private_data;
0399         struct ata_queued_cmd *qc;
0400 
0401         qc = ata_qc_from_tag(ap, ap->link.active_tag);
0402         if (!qc) {
0403             /*
0404              * The qstor hardware generates spurious
0405              * interrupts from time to time when switching
0406              * in and out of packet mode.  There's no
0407              * obvious way to know if we're here now due
0408              * to that, so just ack the irq and pretend we
0409              * knew it was ours.. (ugh).  This does not
0410              * affect packet mode.
0411              */
0412             ata_sff_check_status(ap);
0413             handled = 1;
0414             continue;
0415         }
0416 
0417         if (!pp || pp->state != qs_state_mmio)
0418             continue;
0419         if (!(qc->tf.flags & ATA_TFLAG_POLLING))
0420             handled |= ata_sff_port_intr(ap, qc);
0421     }
0422     return handled;
0423 }
0424 
0425 static irqreturn_t qs_intr(int irq, void *dev_instance)
0426 {
0427     struct ata_host *host = dev_instance;
0428     unsigned int handled = 0;
0429     unsigned long flags;
0430 
0431     spin_lock_irqsave(&host->lock, flags);
0432     handled  = qs_intr_pkt(host) | qs_intr_mmio(host);
0433     spin_unlock_irqrestore(&host->lock, flags);
0434 
0435     return IRQ_RETVAL(handled);
0436 }
0437 
0438 static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
0439 {
0440     port->cmd_addr      =
0441     port->data_addr     = base + 0x400;
0442     port->error_addr    =
0443     port->feature_addr  = base + 0x408; /* hob_feature = 0x409 */
0444     port->nsect_addr    = base + 0x410; /* hob_nsect   = 0x411 */
0445     port->lbal_addr     = base + 0x418; /* hob_lbal    = 0x419 */
0446     port->lbam_addr     = base + 0x420; /* hob_lbam    = 0x421 */
0447     port->lbah_addr     = base + 0x428; /* hob_lbah    = 0x429 */
0448     port->device_addr   = base + 0x430;
0449     port->status_addr   =
0450     port->command_addr  = base + 0x438;
0451     port->altstatus_addr    =
0452     port->ctl_addr      = base + 0x440;
0453     port->scr_addr      = base + 0xc00;
0454 }
0455 
0456 static int qs_port_start(struct ata_port *ap)
0457 {
0458     struct device *dev = ap->host->dev;
0459     struct qs_port_priv *pp;
0460     void __iomem *mmio_base = qs_mmio_base(ap->host);
0461     void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
0462     u64 addr;
0463 
0464     pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
0465     if (!pp)
0466         return -ENOMEM;
0467     pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
0468                       GFP_KERNEL);
0469     if (!pp->pkt)
0470         return -ENOMEM;
0471     ap->private_data = pp;
0472 
0473     qs_enter_reg_mode(ap);
0474     addr = (u64)pp->pkt_dma;
0475     writel((u32) addr,        chan + QS_CCF_CPBA);
0476     writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
0477     return 0;
0478 }
0479 
0480 static void qs_host_stop(struct ata_host *host)
0481 {
0482     void __iomem *mmio_base = qs_mmio_base(host);
0483 
0484     writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
0485     writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
0486 }
0487 
0488 static void qs_host_init(struct ata_host *host, unsigned int chip_id)
0489 {
0490     void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
0491     unsigned int port_no;
0492 
0493     writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
0494     writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
0495 
0496     /* reset each channel in turn */
0497     for (port_no = 0; port_no < host->n_ports; ++port_no) {
0498         u8 __iomem *chan = mmio_base + (port_no * 0x4000);
0499         writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
0500         writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
0501         readb(chan + QS_CCT_CTR0);        /* flush */
0502     }
0503     writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
0504 
0505     for (port_no = 0; port_no < host->n_ports; ++port_no) {
0506         u8 __iomem *chan = mmio_base + (port_no * 0x4000);
0507         /* set FIFO depths to same settings as Windows driver */
0508         writew(32, chan + QS_CFC_HUFT);
0509         writew(32, chan + QS_CFC_HDFT);
0510         writew(10, chan + QS_CFC_DUFT);
0511         writew( 8, chan + QS_CFC_DDFT);
0512         /* set CPB size in bytes, as a power of two */
0513         writeb(QS_CPB_ORDER,    chan + QS_CCF_CSEP);
0514     }
0515     writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
0516 }
0517 
0518 /*
0519  * The QStor understands 64-bit buses, and uses 64-bit fields
0520  * for DMA pointers regardless of bus width.  We just have to
0521  * make sure our DMA masks are set appropriately for whatever
0522  * bridge lies between us and the QStor, and then the DMA mapping
0523  * code will ensure we only ever "see" appropriate buffer addresses.
0524  * If we're 32-bit limited somewhere, then our 64-bit fields will
0525  * just end up with zeros in the upper 32-bits, without any special
0526  * logic required outside of this routine (below).
0527  */
0528 static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
0529 {
0530     u32 bus_info = readl(mmio_base + QS_HID_HPHY);
0531     int dma_bits = (bus_info & QS_HPHY_64BIT) ? 64 : 32;
0532     int rc;
0533 
0534     rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits));
0535     if (rc)
0536         dev_err(&pdev->dev, "%d-bit DMA enable failed\n", dma_bits);
0537     return rc;
0538 }
0539 
0540 static int qs_ata_init_one(struct pci_dev *pdev,
0541                 const struct pci_device_id *ent)
0542 {
0543     unsigned int board_idx = (unsigned int) ent->driver_data;
0544     const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
0545     struct ata_host *host;
0546     int rc, port_no;
0547 
0548     ata_print_version_once(&pdev->dev, DRV_VERSION);
0549 
0550     /* alloc host */
0551     host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
0552     if (!host)
0553         return -ENOMEM;
0554 
0555     /* acquire resources and fill host */
0556     rc = pcim_enable_device(pdev);
0557     if (rc)
0558         return rc;
0559 
0560     if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
0561         return -ENODEV;
0562 
0563     rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
0564     if (rc)
0565         return rc;
0566     host->iomap = pcim_iomap_table(pdev);
0567 
0568     rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
0569     if (rc)
0570         return rc;
0571 
0572     for (port_no = 0; port_no < host->n_ports; ++port_no) {
0573         struct ata_port *ap = host->ports[port_no];
0574         unsigned int offset = port_no * 0x4000;
0575         void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
0576 
0577         qs_ata_setup_port(&ap->ioaddr, chan);
0578 
0579         ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
0580         ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
0581     }
0582 
0583     /* initialize adapter */
0584     qs_host_init(host, board_idx);
0585 
0586     pci_set_master(pdev);
0587     return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
0588                  &qs_ata_sht);
0589 }
0590 
0591 module_pci_driver(qs_ata_pci_driver);
0592 
0593 MODULE_AUTHOR("Mark Lord");
0594 MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
0595 MODULE_LICENSE("GPL");
0596 MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
0597 MODULE_VERSION(DRV_VERSION);