0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/kernel.h>
0022 #include <linux/module.h>
0023 #include <linux/pci.h>
0024 #include <linux/blkdev.h>
0025 #include <linux/delay.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/device.h>
0028 #include <scsi/scsi_host.h>
0029 #include <linux/libata.h>
0030 #include <linux/dmi.h>
0031
0032 #define DRV_NAME "sata_sil"
0033 #define DRV_VERSION "2.4"
0034
0035 #define SIL_DMA_BOUNDARY 0x7fffffffUL
0036
0037 enum {
0038 SIL_MMIO_BAR = 5,
0039
0040
0041
0042
0043 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
0044 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
0045 SIL_FLAG_MOD15WRITE = (1 << 30),
0046
0047 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
0048
0049
0050
0051
0052 sil_3112 = 0,
0053 sil_3112_no_sata_irq = 1,
0054 sil_3512 = 2,
0055 sil_3114 = 3,
0056
0057
0058
0059
0060 SIL_SYSCFG = 0x48,
0061
0062
0063
0064
0065
0066 SIL_MASK_IDE0_INT = (1 << 22),
0067 SIL_MASK_IDE1_INT = (1 << 23),
0068 SIL_MASK_IDE2_INT = (1 << 24),
0069 SIL_MASK_IDE3_INT = (1 << 25),
0070 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
0071 SIL_MASK_4PORT = SIL_MASK_2PORT |
0072 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
0073
0074
0075 SIL_INTR_STEERING = (1 << 1),
0076
0077 SIL_DMA_ENABLE = (1 << 0),
0078 SIL_DMA_RDWR = (1 << 3),
0079 SIL_DMA_SATA_IRQ = (1 << 4),
0080 SIL_DMA_ACTIVE = (1 << 16),
0081 SIL_DMA_ERROR = (1 << 17),
0082 SIL_DMA_COMPLETE = (1 << 18),
0083 SIL_DMA_N_SATA_IRQ = (1 << 6),
0084 SIL_DMA_N_ACTIVE = (1 << 24),
0085 SIL_DMA_N_ERROR = (1 << 25),
0086 SIL_DMA_N_COMPLETE = (1 << 26),
0087
0088
0089 SIL_SIEN_N = (1 << 16),
0090
0091
0092
0093
0094 SIL_QUIRK_MOD15WRITE = (1 << 0),
0095 SIL_QUIRK_UDMA5MAX = (1 << 1),
0096 };
0097
0098 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
0099 #ifdef CONFIG_PM_SLEEP
0100 static int sil_pci_device_resume(struct pci_dev *pdev);
0101 #endif
0102 static void sil_dev_config(struct ata_device *dev);
0103 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
0104 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
0105 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
0106 static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
0107 static void sil_bmdma_setup(struct ata_queued_cmd *qc);
0108 static void sil_bmdma_start(struct ata_queued_cmd *qc);
0109 static void sil_bmdma_stop(struct ata_queued_cmd *qc);
0110 static void sil_freeze(struct ata_port *ap);
0111 static void sil_thaw(struct ata_port *ap);
0112
0113
0114 static const struct pci_device_id sil_pci_tbl[] = {
0115 { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
0116 { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
0117 { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
0118 { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
0119 { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
0120 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
0121 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
0122
0123 { }
0124 };
0125
0126
0127
0128 static const struct sil_drivelist {
0129 const char *product;
0130 unsigned int quirk;
0131 } sil_blacklist [] = {
0132 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
0133 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
0134 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
0135 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
0136 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
0137 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
0138 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
0139 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
0140 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
0141 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
0142 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
0143 { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
0144 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
0145 { }
0146 };
0147
0148 static struct pci_driver sil_pci_driver = {
0149 .name = DRV_NAME,
0150 .id_table = sil_pci_tbl,
0151 .probe = sil_init_one,
0152 .remove = ata_pci_remove_one,
0153 #ifdef CONFIG_PM_SLEEP
0154 .suspend = ata_pci_device_suspend,
0155 .resume = sil_pci_device_resume,
0156 #endif
0157 };
0158
0159 static struct scsi_host_template sil_sht = {
0160 ATA_BASE_SHT(DRV_NAME),
0161
0162
0163
0164 .dma_boundary = SIL_DMA_BOUNDARY,
0165 .sg_tablesize = ATA_MAX_PRD
0166 };
0167
0168 static struct ata_port_operations sil_ops = {
0169 .inherits = &ata_bmdma32_port_ops,
0170 .dev_config = sil_dev_config,
0171 .set_mode = sil_set_mode,
0172 .bmdma_setup = sil_bmdma_setup,
0173 .bmdma_start = sil_bmdma_start,
0174 .bmdma_stop = sil_bmdma_stop,
0175 .qc_prep = sil_qc_prep,
0176 .freeze = sil_freeze,
0177 .thaw = sil_thaw,
0178 .scr_read = sil_scr_read,
0179 .scr_write = sil_scr_write,
0180 };
0181
0182 static const struct ata_port_info sil_port_info[] = {
0183
0184 {
0185 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
0186 .pio_mask = ATA_PIO4,
0187 .mwdma_mask = ATA_MWDMA2,
0188 .udma_mask = ATA_UDMA5,
0189 .port_ops = &sil_ops,
0190 },
0191
0192 {
0193 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
0194 SIL_FLAG_NO_SATA_IRQ,
0195 .pio_mask = ATA_PIO4,
0196 .mwdma_mask = ATA_MWDMA2,
0197 .udma_mask = ATA_UDMA5,
0198 .port_ops = &sil_ops,
0199 },
0200
0201 {
0202 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
0203 .pio_mask = ATA_PIO4,
0204 .mwdma_mask = ATA_MWDMA2,
0205 .udma_mask = ATA_UDMA5,
0206 .port_ops = &sil_ops,
0207 },
0208
0209 {
0210 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
0211 .pio_mask = ATA_PIO4,
0212 .mwdma_mask = ATA_MWDMA2,
0213 .udma_mask = ATA_UDMA5,
0214 .port_ops = &sil_ops,
0215 },
0216 };
0217
0218
0219
0220 static const struct {
0221 unsigned long tf;
0222 unsigned long ctl;
0223 unsigned long bmdma;
0224 unsigned long bmdma2;
0225 unsigned long fifo_cfg;
0226 unsigned long scr;
0227 unsigned long sien;
0228 unsigned long xfer_mode;
0229 unsigned long sfis_cfg;
0230 } sil_port[] = {
0231
0232
0233 { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
0234 { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
0235 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
0236 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
0237
0238 };
0239
0240 MODULE_AUTHOR("Jeff Garzik");
0241 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
0242 MODULE_LICENSE("GPL");
0243 MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
0244 MODULE_VERSION(DRV_VERSION);
0245
0246 static int slow_down;
0247 module_param(slow_down, int, 0444);
0248 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
0249
0250
0251 static void sil_bmdma_stop(struct ata_queued_cmd *qc)
0252 {
0253 struct ata_port *ap = qc->ap;
0254 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
0255 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
0256
0257
0258 iowrite8(0, bmdma2);
0259
0260
0261 ata_sff_dma_pause(ap);
0262 }
0263
0264 static void sil_bmdma_setup(struct ata_queued_cmd *qc)
0265 {
0266 struct ata_port *ap = qc->ap;
0267 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
0268
0269
0270 iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
0271
0272
0273 ap->ops->sff_exec_command(ap, &qc->tf);
0274 }
0275
0276 static void sil_bmdma_start(struct ata_queued_cmd *qc)
0277 {
0278 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
0279 struct ata_port *ap = qc->ap;
0280 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
0281 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
0282 u8 dmactl = ATA_DMA_START;
0283
0284
0285
0286
0287 if (!rw)
0288 dmactl |= ATA_DMA_WR;
0289 iowrite8(dmactl, bmdma2);
0290 }
0291
0292
0293 static void sil_fill_sg(struct ata_queued_cmd *qc)
0294 {
0295 struct scatterlist *sg;
0296 struct ata_port *ap = qc->ap;
0297 struct ata_bmdma_prd *prd, *last_prd = NULL;
0298 unsigned int si;
0299
0300 prd = &ap->bmdma_prd[0];
0301 for_each_sg(qc->sg, sg, qc->n_elem, si) {
0302
0303
0304
0305 u32 addr = (u32) sg_dma_address(sg);
0306 u32 sg_len = sg_dma_len(sg);
0307
0308 prd->addr = cpu_to_le32(addr);
0309 prd->flags_len = cpu_to_le32(sg_len);
0310
0311 last_prd = prd;
0312 prd++;
0313 }
0314
0315 if (likely(last_prd))
0316 last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
0317 }
0318
0319 static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
0320 {
0321 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
0322 return AC_ERR_OK;
0323
0324 sil_fill_sg(qc);
0325
0326 return AC_ERR_OK;
0327 }
0328
0329 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
0330 {
0331 u8 cache_line = 0;
0332 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
0333 return cache_line;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
0346 {
0347 struct ata_port *ap = link->ap;
0348 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
0349 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
0350 struct ata_device *dev;
0351 u32 tmp, dev_mode[2] = { };
0352 int rc;
0353
0354 rc = ata_do_set_mode(link, r_failed);
0355 if (rc)
0356 return rc;
0357
0358 ata_for_each_dev(dev, link, ALL) {
0359 if (!ata_dev_enabled(dev))
0360 dev_mode[dev->devno] = 0;
0361 else if (dev->flags & ATA_DFLAG_PIO)
0362 dev_mode[dev->devno] = 1;
0363 else
0364 dev_mode[dev->devno] = 3;
0365
0366 }
0367
0368 tmp = readl(addr);
0369 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
0370 tmp |= dev_mode[0];
0371 tmp |= (dev_mode[1] << 4);
0372 writel(tmp, addr);
0373 readl(addr);
0374 return 0;
0375 }
0376
0377 static inline void __iomem *sil_scr_addr(struct ata_port *ap,
0378 unsigned int sc_reg)
0379 {
0380 void __iomem *offset = ap->ioaddr.scr_addr;
0381
0382 switch (sc_reg) {
0383 case SCR_STATUS:
0384 return offset + 4;
0385 case SCR_ERROR:
0386 return offset + 8;
0387 case SCR_CONTROL:
0388 return offset;
0389 default:
0390
0391 break;
0392 }
0393
0394 return NULL;
0395 }
0396
0397 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
0398 {
0399 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
0400
0401 if (mmio) {
0402 *val = readl(mmio);
0403 return 0;
0404 }
0405 return -EINVAL;
0406 }
0407
0408 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
0409 {
0410 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
0411
0412 if (mmio) {
0413 writel(val, mmio);
0414 return 0;
0415 }
0416 return -EINVAL;
0417 }
0418
0419 static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
0420 {
0421 struct ata_eh_info *ehi = &ap->link.eh_info;
0422 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
0423 u8 status;
0424
0425 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
0426 u32 serror = 0xffffffff;
0427
0428
0429
0430
0431
0432 sil_scr_read(&ap->link, SCR_ERROR, &serror);
0433 sil_scr_write(&ap->link, SCR_ERROR, serror);
0434
0435
0436
0437
0438 if (serror & SERR_PHYRDY_CHG) {
0439 ap->link.eh_info.serror |= serror;
0440 goto freeze;
0441 }
0442
0443 if (!(bmdma2 & SIL_DMA_COMPLETE))
0444 return;
0445 }
0446
0447 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
0448
0449 ap->ops->sff_check_status(ap);
0450 return;
0451 }
0452
0453
0454 switch (ap->hsm_task_state) {
0455 case HSM_ST_FIRST:
0456
0457
0458
0459
0460
0461
0462
0463
0464 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
0465 goto err_hsm;
0466 break;
0467 case HSM_ST_LAST:
0468 if (ata_is_dma(qc->tf.protocol)) {
0469
0470 ap->ops->bmdma_stop(qc);
0471
0472 if (bmdma2 & SIL_DMA_ERROR) {
0473 qc->err_mask |= AC_ERR_HOST_BUS;
0474 ap->hsm_task_state = HSM_ST_ERR;
0475 }
0476 }
0477 break;
0478 case HSM_ST:
0479 break;
0480 default:
0481 goto err_hsm;
0482 }
0483
0484
0485 status = ap->ops->sff_check_status(ap);
0486 if (unlikely(status & ATA_BUSY))
0487 goto err_hsm;
0488
0489
0490 ata_bmdma_irq_clear(ap);
0491
0492
0493 ata_sff_hsm_move(ap, qc, status, 0);
0494
0495 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
0496 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
0497
0498 return;
0499
0500 err_hsm:
0501 qc->err_mask |= AC_ERR_HSM;
0502 freeze:
0503 ata_port_freeze(ap);
0504 }
0505
0506 static irqreturn_t sil_interrupt(int irq, void *dev_instance)
0507 {
0508 struct ata_host *host = dev_instance;
0509 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
0510 int handled = 0;
0511 int i;
0512
0513 spin_lock(&host->lock);
0514
0515 for (i = 0; i < host->n_ports; i++) {
0516 struct ata_port *ap = host->ports[i];
0517 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
0518
0519
0520 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
0521 bmdma2 &= ~SIL_DMA_SATA_IRQ;
0522
0523 if (bmdma2 == 0xffffffff ||
0524 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
0525 continue;
0526
0527 sil_host_intr(ap, bmdma2);
0528 handled = 1;
0529 }
0530
0531 spin_unlock(&host->lock);
0532
0533 return IRQ_RETVAL(handled);
0534 }
0535
0536 static void sil_freeze(struct ata_port *ap)
0537 {
0538 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
0539 u32 tmp;
0540
0541
0542 writel(0, mmio_base + sil_port[ap->port_no].sien);
0543
0544
0545 tmp = readl(mmio_base + SIL_SYSCFG);
0546 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
0547 writel(tmp, mmio_base + SIL_SYSCFG);
0548 readl(mmio_base + SIL_SYSCFG);
0549
0550
0551
0552
0553
0554
0555 iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
0556 ap->ioaddr.bmdma_addr);
0557
0558
0559
0560
0561 ioread8(ap->ioaddr.bmdma_addr);
0562 }
0563
0564 static void sil_thaw(struct ata_port *ap)
0565 {
0566 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
0567 u32 tmp;
0568
0569
0570 ap->ops->sff_check_status(ap);
0571 ata_bmdma_irq_clear(ap);
0572
0573
0574 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
0575 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
0576
0577
0578 tmp = readl(mmio_base + SIL_SYSCFG);
0579 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
0580 writel(tmp, mmio_base + SIL_SYSCFG);
0581 }
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 static void sil_dev_config(struct ata_device *dev)
0612 {
0613 struct ata_port *ap = dev->link->ap;
0614 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
0615 unsigned int n, quirks = 0;
0616 unsigned char model_num[ATA_ID_PROD_LEN + 1];
0617
0618
0619 dev->horkage |= ATA_HORKAGE_NOTRIM;
0620
0621 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
0622
0623 for (n = 0; sil_blacklist[n].product; n++)
0624 if (!strcmp(sil_blacklist[n].product, model_num)) {
0625 quirks = sil_blacklist[n].quirk;
0626 break;
0627 }
0628
0629
0630 if (slow_down ||
0631 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
0632 (quirks & SIL_QUIRK_MOD15WRITE))) {
0633 if (print_info)
0634 ata_dev_info(dev,
0635 "applying Seagate errata fix (mod15write workaround)\n");
0636 dev->max_sectors = 15;
0637 return;
0638 }
0639
0640
0641 if (quirks & SIL_QUIRK_UDMA5MAX) {
0642 if (print_info)
0643 ata_dev_info(dev, "applying Maxtor errata fix %s\n",
0644 model_num);
0645 dev->udma_mask &= ATA_UDMA5;
0646 return;
0647 }
0648 }
0649
0650 static void sil_init_controller(struct ata_host *host)
0651 {
0652 struct pci_dev *pdev = to_pci_dev(host->dev);
0653 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
0654 u8 cls;
0655 u32 tmp;
0656 int i;
0657
0658
0659 cls = sil_get_device_cache_line(pdev);
0660 if (cls) {
0661 cls >>= 3;
0662 cls++;
0663 for (i = 0; i < host->n_ports; i++)
0664 writew(cls << 8 | cls,
0665 mmio_base + sil_port[i].fifo_cfg);
0666 } else
0667 dev_warn(&pdev->dev,
0668 "cache line size not set. Driver may not function\n");
0669
0670
0671 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
0672 int cnt;
0673
0674 for (i = 0, cnt = 0; i < host->n_ports; i++) {
0675 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
0676 if ((tmp & 0x3) != 0x01)
0677 continue;
0678 if (!cnt)
0679 dev_info(&pdev->dev,
0680 "Applying R_ERR on DMA activate FIS errata fix\n");
0681 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
0682 cnt++;
0683 }
0684 }
0685
0686 if (host->n_ports == 4) {
0687
0688 tmp = readl(mmio_base + sil_port[2].bmdma);
0689 if ((tmp & SIL_INTR_STEERING) == 0)
0690 writel(tmp | SIL_INTR_STEERING,
0691 mmio_base + sil_port[2].bmdma);
0692 }
0693 }
0694
0695 static bool sil_broken_system_poweroff(struct pci_dev *pdev)
0696 {
0697 static const struct dmi_system_id broken_systems[] = {
0698 {
0699 .ident = "HP Compaq nx6325",
0700 .matches = {
0701 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
0702 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
0703 },
0704
0705 .driver_data = (void *)0x12UL,
0706 },
0707
0708 { }
0709 };
0710 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
0711
0712 if (dmi) {
0713 unsigned long slot = (unsigned long)dmi->driver_data;
0714
0715 return slot == PCI_SLOT(pdev->devfn);
0716 }
0717
0718 return false;
0719 }
0720
0721 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0722 {
0723 int board_id = ent->driver_data;
0724 struct ata_port_info pi = sil_port_info[board_id];
0725 const struct ata_port_info *ppi[] = { &pi, NULL };
0726 struct ata_host *host;
0727 void __iomem *mmio_base;
0728 int n_ports, rc;
0729 unsigned int i;
0730
0731 ata_print_version_once(&pdev->dev, DRV_VERSION);
0732
0733
0734 n_ports = 2;
0735 if (board_id == sil_3114)
0736 n_ports = 4;
0737
0738 if (sil_broken_system_poweroff(pdev)) {
0739 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
0740 ATA_FLAG_NO_HIBERNATE_SPINDOWN;
0741 dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
0742 "on poweroff and hibernation\n");
0743 }
0744
0745 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
0746 if (!host)
0747 return -ENOMEM;
0748
0749
0750 rc = pcim_enable_device(pdev);
0751 if (rc)
0752 return rc;
0753
0754 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
0755 if (rc == -EBUSY)
0756 pcim_pin_device(pdev);
0757 if (rc)
0758 return rc;
0759 host->iomap = pcim_iomap_table(pdev);
0760
0761 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
0762 if (rc)
0763 return rc;
0764
0765 mmio_base = host->iomap[SIL_MMIO_BAR];
0766
0767 for (i = 0; i < host->n_ports; i++) {
0768 struct ata_port *ap = host->ports[i];
0769 struct ata_ioports *ioaddr = &ap->ioaddr;
0770
0771 ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
0772 ioaddr->altstatus_addr =
0773 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
0774 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
0775 ioaddr->scr_addr = mmio_base + sil_port[i].scr;
0776 ata_sff_std_ports(ioaddr);
0777
0778 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
0779 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
0780 }
0781
0782
0783 sil_init_controller(host);
0784
0785 pci_set_master(pdev);
0786 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
0787 &sil_sht);
0788 }
0789
0790 #ifdef CONFIG_PM_SLEEP
0791 static int sil_pci_device_resume(struct pci_dev *pdev)
0792 {
0793 struct ata_host *host = pci_get_drvdata(pdev);
0794 int rc;
0795
0796 rc = ata_pci_device_do_resume(pdev);
0797 if (rc)
0798 return rc;
0799
0800 sil_init_controller(host);
0801 ata_host_resume(host);
0802
0803 return 0;
0804 }
0805 #endif
0806
0807 module_pci_driver(sil_pci_driver);