0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/kernel.h>
0024 #include <linux/module.h>
0025 #include <linux/gfp.h>
0026 #include <linux/pci.h>
0027 #include <linux/blkdev.h>
0028 #include <linux/delay.h>
0029 #include <linux/interrupt.h>
0030 #include <linux/device.h>
0031 #include <scsi/scsi_host.h>
0032 #include <scsi/scsi_device.h>
0033 #include <linux/libata.h>
0034 #include <trace/events/libata.h>
0035
0036 #define DRV_NAME "sata_nv"
0037 #define DRV_VERSION "3.5"
0038
0039 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
0040
0041 enum {
0042 NV_MMIO_BAR = 5,
0043
0044 NV_PORTS = 2,
0045 NV_PIO_MASK = ATA_PIO4,
0046 NV_MWDMA_MASK = ATA_MWDMA2,
0047 NV_UDMA_MASK = ATA_UDMA6,
0048 NV_PORT0_SCR_REG_OFFSET = 0x00,
0049 NV_PORT1_SCR_REG_OFFSET = 0x40,
0050
0051
0052 NV_INT_STATUS = 0x10,
0053 NV_INT_ENABLE = 0x11,
0054 NV_INT_STATUS_CK804 = 0x440,
0055 NV_INT_ENABLE_CK804 = 0x441,
0056
0057
0058 NV_INT_DEV = 0x01,
0059 NV_INT_PM = 0x02,
0060 NV_INT_ADDED = 0x04,
0061 NV_INT_REMOVED = 0x08,
0062
0063 NV_INT_PORT_SHIFT = 4,
0064
0065 NV_INT_ALL = 0x0f,
0066 NV_INT_MASK = NV_INT_DEV |
0067 NV_INT_ADDED | NV_INT_REMOVED,
0068
0069
0070 NV_INT_CONFIG = 0x12,
0071 NV_INT_CONFIG_METHD = 0x01,
0072
0073
0074 NV_MCP_SATA_CFG_20 = 0x50,
0075 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
0076 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
0077 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
0078 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
0079 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
0080
0081 NV_ADMA_MAX_CPBS = 32,
0082 NV_ADMA_CPB_SZ = 128,
0083 NV_ADMA_APRD_SZ = 16,
0084 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
0085 NV_ADMA_APRD_SZ,
0086 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
0087 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
0088 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
0089 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
0090
0091
0092 NV_ADMA_GEN = 0x400,
0093 NV_ADMA_GEN_CTL = 0x00,
0094 NV_ADMA_NOTIFIER_CLEAR = 0x30,
0095
0096
0097 NV_ADMA_PORT = 0x480,
0098
0099
0100 NV_ADMA_PORT_SIZE = 0x100,
0101
0102
0103 NV_ADMA_CTL = 0x40,
0104 NV_ADMA_CPB_COUNT = 0x42,
0105 NV_ADMA_NEXT_CPB_IDX = 0x43,
0106 NV_ADMA_STAT = 0x44,
0107 NV_ADMA_CPB_BASE_LOW = 0x48,
0108 NV_ADMA_CPB_BASE_HIGH = 0x4C,
0109 NV_ADMA_APPEND = 0x50,
0110 NV_ADMA_NOTIFIER = 0x68,
0111 NV_ADMA_NOTIFIER_ERROR = 0x6C,
0112
0113
0114 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
0115 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
0116 NV_ADMA_CTL_GO = (1 << 7),
0117 NV_ADMA_CTL_AIEN = (1 << 8),
0118 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
0119 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
0120
0121
0122 NV_CPB_RESP_DONE = (1 << 0),
0123 NV_CPB_RESP_ATA_ERR = (1 << 3),
0124 NV_CPB_RESP_CMD_ERR = (1 << 4),
0125 NV_CPB_RESP_CPB_ERR = (1 << 7),
0126
0127
0128 NV_CPB_CTL_CPB_VALID = (1 << 0),
0129 NV_CPB_CTL_QUEUE = (1 << 1),
0130 NV_CPB_CTL_APRD_VALID = (1 << 2),
0131 NV_CPB_CTL_IEN = (1 << 3),
0132 NV_CPB_CTL_FPDMA = (1 << 4),
0133
0134
0135 NV_APRD_WRITE = (1 << 1),
0136 NV_APRD_END = (1 << 2),
0137 NV_APRD_CONT = (1 << 3),
0138
0139
0140 NV_ADMA_STAT_TIMEOUT = (1 << 0),
0141 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
0142 NV_ADMA_STAT_HOTPLUG = (1 << 2),
0143 NV_ADMA_STAT_CPBERR = (1 << 4),
0144 NV_ADMA_STAT_SERROR = (1 << 5),
0145 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
0146 NV_ADMA_STAT_IDLE = (1 << 8),
0147 NV_ADMA_STAT_LEGACY = (1 << 9),
0148 NV_ADMA_STAT_STOPPED = (1 << 10),
0149 NV_ADMA_STAT_DONE = (1 << 12),
0150 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
0151 NV_ADMA_STAT_TIMEOUT,
0152
0153
0154 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
0155 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
0156
0157
0158 NV_CTL_MCP55 = 0x400,
0159 NV_INT_STATUS_MCP55 = 0x440,
0160 NV_INT_ENABLE_MCP55 = 0x444,
0161 NV_NCQ_REG_MCP55 = 0x448,
0162
0163
0164 NV_INT_ALL_MCP55 = 0xffff,
0165 NV_INT_PORT_SHIFT_MCP55 = 16,
0166 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
0167
0168
0169 NV_CTL_PRI_SWNCQ = 0x02,
0170 NV_CTL_SEC_SWNCQ = 0x04,
0171
0172
0173 NV_SWNCQ_IRQ_DEV = (1 << 0),
0174 NV_SWNCQ_IRQ_PM = (1 << 1),
0175 NV_SWNCQ_IRQ_ADDED = (1 << 2),
0176 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
0177
0178 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
0179 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
0180 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
0181 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
0182
0183 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
0184 NV_SWNCQ_IRQ_REMOVED,
0185
0186 };
0187
0188
0189 struct nv_adma_prd {
0190 __le64 addr;
0191 __le32 len;
0192 u8 flags;
0193 u8 packet_len;
0194 __le16 reserved;
0195 };
0196
0197 enum nv_adma_regbits {
0198 CMDEND = (1 << 15),
0199 WNB = (1 << 14),
0200 IGN = (1 << 13),
0201 CS1n = (1 << (4 + 8)),
0202 DA2 = (1 << (2 + 8)),
0203 DA1 = (1 << (1 + 8)),
0204 DA0 = (1 << (0 + 8)),
0205 };
0206
0207
0208
0209
0210
0211 struct nv_adma_cpb {
0212 u8 resp_flags;
0213 u8 reserved1;
0214 u8 ctl_flags;
0215
0216 u8 len;
0217 u8 tag;
0218 u8 next_cpb_idx;
0219 __le16 reserved2;
0220 __le16 tf[12];
0221 struct nv_adma_prd aprd[5];
0222 __le64 next_aprd;
0223 __le64 reserved3;
0224 };
0225
0226
0227 struct nv_adma_port_priv {
0228 struct nv_adma_cpb *cpb;
0229 dma_addr_t cpb_dma;
0230 struct nv_adma_prd *aprd;
0231 dma_addr_t aprd_dma;
0232 void __iomem *ctl_block;
0233 void __iomem *gen_block;
0234 void __iomem *notifier_clear_block;
0235 u64 adma_dma_mask;
0236 u8 flags;
0237 int last_issue_ncq;
0238 };
0239
0240 struct nv_host_priv {
0241 unsigned long type;
0242 };
0243
0244 struct defer_queue {
0245 u32 defer_bits;
0246 unsigned int head;
0247 unsigned int tail;
0248 unsigned int tag[ATA_MAX_QUEUE];
0249 };
0250
0251 enum ncq_saw_flag_list {
0252 ncq_saw_d2h = (1U << 0),
0253 ncq_saw_dmas = (1U << 1),
0254 ncq_saw_sdb = (1U << 2),
0255 ncq_saw_backout = (1U << 3),
0256 };
0257
0258 struct nv_swncq_port_priv {
0259 struct ata_bmdma_prd *prd;
0260 dma_addr_t prd_dma;
0261 void __iomem *sactive_block;
0262 void __iomem *irq_block;
0263 void __iomem *tag_block;
0264 u32 qc_active;
0265
0266 unsigned int last_issue_tag;
0267
0268
0269 struct defer_queue defer_queue;
0270
0271
0272 u32 dhfis_bits;
0273 u32 dmafis_bits;
0274 u32 sdbfis_bits;
0275
0276 unsigned int ncq_flags;
0277 };
0278
0279
0280 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
0281
0282 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
0283 #ifdef CONFIG_PM_SLEEP
0284 static int nv_pci_device_resume(struct pci_dev *pdev);
0285 #endif
0286 static void nv_ck804_host_stop(struct ata_host *host);
0287 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
0288 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
0289 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
0290 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
0291 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
0292
0293 static int nv_hardreset(struct ata_link *link, unsigned int *class,
0294 unsigned long deadline);
0295 static void nv_nf2_freeze(struct ata_port *ap);
0296 static void nv_nf2_thaw(struct ata_port *ap);
0297 static void nv_ck804_freeze(struct ata_port *ap);
0298 static void nv_ck804_thaw(struct ata_port *ap);
0299 static int nv_adma_slave_config(struct scsi_device *sdev);
0300 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
0301 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
0302 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
0303 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
0304 static void nv_adma_irq_clear(struct ata_port *ap);
0305 static int nv_adma_port_start(struct ata_port *ap);
0306 static void nv_adma_port_stop(struct ata_port *ap);
0307 #ifdef CONFIG_PM
0308 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
0309 static int nv_adma_port_resume(struct ata_port *ap);
0310 #endif
0311 static void nv_adma_freeze(struct ata_port *ap);
0312 static void nv_adma_thaw(struct ata_port *ap);
0313 static void nv_adma_error_handler(struct ata_port *ap);
0314 static void nv_adma_host_stop(struct ata_host *host);
0315 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
0316 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
0317
0318 static void nv_mcp55_thaw(struct ata_port *ap);
0319 static void nv_mcp55_freeze(struct ata_port *ap);
0320 static void nv_swncq_error_handler(struct ata_port *ap);
0321 static int nv_swncq_slave_config(struct scsi_device *sdev);
0322 static int nv_swncq_port_start(struct ata_port *ap);
0323 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
0324 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
0325 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
0326 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
0327 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
0328 #ifdef CONFIG_PM
0329 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
0330 static int nv_swncq_port_resume(struct ata_port *ap);
0331 #endif
0332
0333 enum nv_host_type
0334 {
0335 GENERIC,
0336 NFORCE2,
0337 NFORCE3 = NFORCE2,
0338 CK804,
0339 ADMA,
0340 MCP5x,
0341 SWNCQ,
0342 };
0343
0344 static const struct pci_device_id nv_pci_tbl[] = {
0345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
0346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
0347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
0348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
0349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
0350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
0351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
0352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
0353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
0354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
0355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
0356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
0357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
0358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
0359
0360 { }
0361 };
0362
0363 static struct pci_driver nv_pci_driver = {
0364 .name = DRV_NAME,
0365 .id_table = nv_pci_tbl,
0366 .probe = nv_init_one,
0367 #ifdef CONFIG_PM_SLEEP
0368 .suspend = ata_pci_device_suspend,
0369 .resume = nv_pci_device_resume,
0370 #endif
0371 .remove = ata_pci_remove_one,
0372 };
0373
0374 static struct scsi_host_template nv_sht = {
0375 ATA_BMDMA_SHT(DRV_NAME),
0376 };
0377
0378 static struct scsi_host_template nv_adma_sht = {
0379 __ATA_BASE_SHT(DRV_NAME),
0380 .can_queue = NV_ADMA_MAX_CPBS,
0381 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
0382 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
0383 .slave_configure = nv_adma_slave_config,
0384 .sdev_groups = ata_ncq_sdev_groups,
0385 .change_queue_depth = ata_scsi_change_queue_depth,
0386 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
0387 };
0388
0389 static struct scsi_host_template nv_swncq_sht = {
0390 __ATA_BASE_SHT(DRV_NAME),
0391 .can_queue = ATA_MAX_QUEUE - 1,
0392 .sg_tablesize = LIBATA_MAX_PRD,
0393 .dma_boundary = ATA_DMA_BOUNDARY,
0394 .slave_configure = nv_swncq_slave_config,
0395 .sdev_groups = ata_ncq_sdev_groups,
0396 .change_queue_depth = ata_scsi_change_queue_depth,
0397 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
0398 };
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458 static struct ata_port_operations nv_generic_ops = {
0459 .inherits = &ata_bmdma_port_ops,
0460 .lost_interrupt = ATA_OP_NULL,
0461 .scr_read = nv_scr_read,
0462 .scr_write = nv_scr_write,
0463 .hardreset = nv_hardreset,
0464 };
0465
0466 static struct ata_port_operations nv_nf2_ops = {
0467 .inherits = &nv_generic_ops,
0468 .freeze = nv_nf2_freeze,
0469 .thaw = nv_nf2_thaw,
0470 };
0471
0472 static struct ata_port_operations nv_ck804_ops = {
0473 .inherits = &nv_generic_ops,
0474 .freeze = nv_ck804_freeze,
0475 .thaw = nv_ck804_thaw,
0476 .host_stop = nv_ck804_host_stop,
0477 };
0478
0479 static struct ata_port_operations nv_adma_ops = {
0480 .inherits = &nv_ck804_ops,
0481
0482 .check_atapi_dma = nv_adma_check_atapi_dma,
0483 .sff_tf_read = nv_adma_tf_read,
0484 .qc_defer = ata_std_qc_defer,
0485 .qc_prep = nv_adma_qc_prep,
0486 .qc_issue = nv_adma_qc_issue,
0487 .sff_irq_clear = nv_adma_irq_clear,
0488
0489 .freeze = nv_adma_freeze,
0490 .thaw = nv_adma_thaw,
0491 .error_handler = nv_adma_error_handler,
0492 .post_internal_cmd = nv_adma_post_internal_cmd,
0493
0494 .port_start = nv_adma_port_start,
0495 .port_stop = nv_adma_port_stop,
0496 #ifdef CONFIG_PM
0497 .port_suspend = nv_adma_port_suspend,
0498 .port_resume = nv_adma_port_resume,
0499 #endif
0500 .host_stop = nv_adma_host_stop,
0501 };
0502
0503 static struct ata_port_operations nv_swncq_ops = {
0504 .inherits = &nv_generic_ops,
0505
0506 .qc_defer = ata_std_qc_defer,
0507 .qc_prep = nv_swncq_qc_prep,
0508 .qc_issue = nv_swncq_qc_issue,
0509
0510 .freeze = nv_mcp55_freeze,
0511 .thaw = nv_mcp55_thaw,
0512 .error_handler = nv_swncq_error_handler,
0513
0514 #ifdef CONFIG_PM
0515 .port_suspend = nv_swncq_port_suspend,
0516 .port_resume = nv_swncq_port_resume,
0517 #endif
0518 .port_start = nv_swncq_port_start,
0519 };
0520
0521 struct nv_pi_priv {
0522 irq_handler_t irq_handler;
0523 struct scsi_host_template *sht;
0524 };
0525
0526 #define NV_PI_PRIV(_irq_handler, _sht) \
0527 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
0528
0529 static const struct ata_port_info nv_port_info[] = {
0530
0531 {
0532 .flags = ATA_FLAG_SATA,
0533 .pio_mask = NV_PIO_MASK,
0534 .mwdma_mask = NV_MWDMA_MASK,
0535 .udma_mask = NV_UDMA_MASK,
0536 .port_ops = &nv_generic_ops,
0537 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
0538 },
0539
0540 {
0541 .flags = ATA_FLAG_SATA,
0542 .pio_mask = NV_PIO_MASK,
0543 .mwdma_mask = NV_MWDMA_MASK,
0544 .udma_mask = NV_UDMA_MASK,
0545 .port_ops = &nv_nf2_ops,
0546 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
0547 },
0548
0549 {
0550 .flags = ATA_FLAG_SATA,
0551 .pio_mask = NV_PIO_MASK,
0552 .mwdma_mask = NV_MWDMA_MASK,
0553 .udma_mask = NV_UDMA_MASK,
0554 .port_ops = &nv_ck804_ops,
0555 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
0556 },
0557
0558 {
0559 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
0560 .pio_mask = NV_PIO_MASK,
0561 .mwdma_mask = NV_MWDMA_MASK,
0562 .udma_mask = NV_UDMA_MASK,
0563 .port_ops = &nv_adma_ops,
0564 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
0565 },
0566
0567 {
0568 .flags = ATA_FLAG_SATA,
0569 .pio_mask = NV_PIO_MASK,
0570 .mwdma_mask = NV_MWDMA_MASK,
0571 .udma_mask = NV_UDMA_MASK,
0572 .port_ops = &nv_generic_ops,
0573 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
0574 },
0575
0576 {
0577 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
0578 .pio_mask = NV_PIO_MASK,
0579 .mwdma_mask = NV_MWDMA_MASK,
0580 .udma_mask = NV_UDMA_MASK,
0581 .port_ops = &nv_swncq_ops,
0582 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
0583 },
0584 };
0585
0586 MODULE_AUTHOR("NVIDIA");
0587 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
0588 MODULE_LICENSE("GPL");
0589 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
0590 MODULE_VERSION(DRV_VERSION);
0591
0592 static bool adma_enabled;
0593 static bool swncq_enabled = true;
0594 static bool msi_enabled;
0595
0596 static void nv_adma_register_mode(struct ata_port *ap)
0597 {
0598 struct nv_adma_port_priv *pp = ap->private_data;
0599 void __iomem *mmio = pp->ctl_block;
0600 u16 tmp, status;
0601 int count = 0;
0602
0603 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
0604 return;
0605
0606 status = readw(mmio + NV_ADMA_STAT);
0607 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
0608 ndelay(50);
0609 status = readw(mmio + NV_ADMA_STAT);
0610 count++;
0611 }
0612 if (count == 20)
0613 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
0614 status);
0615
0616 tmp = readw(mmio + NV_ADMA_CTL);
0617 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
0618
0619 count = 0;
0620 status = readw(mmio + NV_ADMA_STAT);
0621 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
0622 ndelay(50);
0623 status = readw(mmio + NV_ADMA_STAT);
0624 count++;
0625 }
0626 if (count == 20)
0627 ata_port_warn(ap,
0628 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
0629 status);
0630
0631 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
0632 }
0633
0634 static void nv_adma_mode(struct ata_port *ap)
0635 {
0636 struct nv_adma_port_priv *pp = ap->private_data;
0637 void __iomem *mmio = pp->ctl_block;
0638 u16 tmp, status;
0639 int count = 0;
0640
0641 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
0642 return;
0643
0644 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
0645
0646 tmp = readw(mmio + NV_ADMA_CTL);
0647 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
0648
0649 status = readw(mmio + NV_ADMA_STAT);
0650 while (((status & NV_ADMA_STAT_LEGACY) ||
0651 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
0652 ndelay(50);
0653 status = readw(mmio + NV_ADMA_STAT);
0654 count++;
0655 }
0656 if (count == 20)
0657 ata_port_warn(ap,
0658 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
0659 status);
0660
0661 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
0662 }
0663
0664 static int nv_adma_slave_config(struct scsi_device *sdev)
0665 {
0666 struct ata_port *ap = ata_shost_to_port(sdev->host);
0667 struct nv_adma_port_priv *pp = ap->private_data;
0668 struct nv_adma_port_priv *port0, *port1;
0669 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
0670 unsigned long segment_boundary, flags;
0671 unsigned short sg_tablesize;
0672 int rc;
0673 int adma_enable;
0674 u32 current_reg, new_reg, config_mask;
0675
0676 rc = ata_scsi_slave_config(sdev);
0677
0678 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
0679
0680 return rc;
0681
0682 spin_lock_irqsave(ap->lock, flags);
0683
0684 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
0685
0686
0687
0688
0689
0690
0691
0692 segment_boundary = ATA_DMA_BOUNDARY;
0693
0694
0695 sg_tablesize = LIBATA_MAX_PRD - 1;
0696
0697
0698
0699 adma_enable = 0;
0700 nv_adma_register_mode(ap);
0701 } else {
0702 segment_boundary = NV_ADMA_DMA_BOUNDARY;
0703 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
0704 adma_enable = 1;
0705 }
0706
0707 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
0708
0709 if (ap->port_no == 1)
0710 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
0711 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
0712 else
0713 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
0714 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
0715
0716 if (adma_enable) {
0717 new_reg = current_reg | config_mask;
0718 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
0719 } else {
0720 new_reg = current_reg & ~config_mask;
0721 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
0722 }
0723
0724 if (current_reg != new_reg)
0725 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
0726
0727 port0 = ap->host->ports[0]->private_data;
0728 port1 = ap->host->ports[1]->private_data;
0729 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
0730 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
0731
0732
0733
0734
0735
0736
0737
0738 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
0739 } else {
0740 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
0741 }
0742
0743 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
0744 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
0745 ata_port_info(ap,
0746 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
0747 (unsigned long long)*ap->host->dev->dma_mask,
0748 segment_boundary, sg_tablesize);
0749
0750 spin_unlock_irqrestore(ap->lock, flags);
0751
0752 return rc;
0753 }
0754
0755 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
0756 {
0757 struct nv_adma_port_priv *pp = qc->ap->private_data;
0758 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
0759 }
0760
0761 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
0762 {
0763
0764
0765
0766
0767
0768
0769
0770 nv_adma_register_mode(ap);
0771
0772 ata_sff_tf_read(ap, tf);
0773 }
0774
0775 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
0776 {
0777 unsigned int idx = 0;
0778
0779 if (tf->flags & ATA_TFLAG_ISADDR) {
0780 if (tf->flags & ATA_TFLAG_LBA48) {
0781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
0782 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
0783 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
0784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
0785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
0786 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
0787 } else
0788 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
0789
0790 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
0791 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
0792 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
0793 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
0794 }
0795
0796 if (tf->flags & ATA_TFLAG_DEVICE)
0797 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
0798
0799 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
0800
0801 while (idx < 12)
0802 cpb[idx++] = cpu_to_le16(IGN);
0803
0804 return idx;
0805 }
0806
0807 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
0808 {
0809 struct nv_adma_port_priv *pp = ap->private_data;
0810 u8 flags = pp->cpb[cpb_num].resp_flags;
0811
0812 ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
0813
0814 if (unlikely((force_err ||
0815 flags & (NV_CPB_RESP_ATA_ERR |
0816 NV_CPB_RESP_CMD_ERR |
0817 NV_CPB_RESP_CPB_ERR)))) {
0818 struct ata_eh_info *ehi = &ap->link.eh_info;
0819 int freeze = 0;
0820
0821 ata_ehi_clear_desc(ehi);
0822 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
0823 if (flags & NV_CPB_RESP_ATA_ERR) {
0824 ata_ehi_push_desc(ehi, "ATA error");
0825 ehi->err_mask |= AC_ERR_DEV;
0826 } else if (flags & NV_CPB_RESP_CMD_ERR) {
0827 ata_ehi_push_desc(ehi, "CMD error");
0828 ehi->err_mask |= AC_ERR_DEV;
0829 } else if (flags & NV_CPB_RESP_CPB_ERR) {
0830 ata_ehi_push_desc(ehi, "CPB error");
0831 ehi->err_mask |= AC_ERR_SYSTEM;
0832 freeze = 1;
0833 } else {
0834
0835 ata_ehi_push_desc(ehi, "unknown");
0836 ehi->err_mask |= AC_ERR_OTHER;
0837 freeze = 1;
0838 }
0839
0840 if (freeze)
0841 ata_port_freeze(ap);
0842 else
0843 ata_port_abort(ap);
0844 return -1;
0845 }
0846
0847 if (likely(flags & NV_CPB_RESP_DONE))
0848 return 1;
0849 return 0;
0850 }
0851
0852 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
0853 {
0854 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
0855
0856
0857 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
0858 ata_port_freeze(ap);
0859 return 1;
0860 }
0861
0862
0863 if (!(irq_stat & NV_INT_DEV))
0864 return 0;
0865
0866
0867 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
0868 ata_sff_check_status(ap);
0869 return 1;
0870 }
0871
0872
0873 return ata_bmdma_port_intr(ap, qc);
0874 }
0875
0876 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
0877 {
0878 struct ata_host *host = dev_instance;
0879 int i, handled = 0;
0880 u32 notifier_clears[2];
0881
0882 spin_lock(&host->lock);
0883
0884 for (i = 0; i < host->n_ports; i++) {
0885 struct ata_port *ap = host->ports[i];
0886 struct nv_adma_port_priv *pp = ap->private_data;
0887 void __iomem *mmio = pp->ctl_block;
0888 u16 status;
0889 u32 gen_ctl;
0890 u32 notifier, notifier_error;
0891
0892 notifier_clears[i] = 0;
0893
0894
0895 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
0896 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
0897 >> (NV_INT_PORT_SHIFT * i);
0898 handled += nv_host_intr(ap, irq_stat);
0899 continue;
0900 }
0901
0902
0903 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0904 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
0905 >> (NV_INT_PORT_SHIFT * i);
0906 if (ata_tag_valid(ap->link.active_tag))
0907
0908
0909
0910
0911 irq_stat |= NV_INT_DEV;
0912 handled += nv_host_intr(ap, irq_stat);
0913 }
0914
0915 notifier = readl(mmio + NV_ADMA_NOTIFIER);
0916 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
0917 notifier_clears[i] = notifier | notifier_error;
0918
0919 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
0920
0921 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
0922 !notifier_error)
0923
0924 continue;
0925
0926 status = readw(mmio + NV_ADMA_STAT);
0927
0928
0929
0930
0931
0932
0933
0934 writew(status, mmio + NV_ADMA_STAT);
0935 readw(mmio + NV_ADMA_STAT);
0936 rmb();
0937
0938 handled++;
0939
0940
0941 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
0942 NV_ADMA_STAT_HOTUNPLUG |
0943 NV_ADMA_STAT_TIMEOUT |
0944 NV_ADMA_STAT_SERROR))) {
0945 struct ata_eh_info *ehi = &ap->link.eh_info;
0946
0947 ata_ehi_clear_desc(ehi);
0948 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
0949 if (status & NV_ADMA_STAT_TIMEOUT) {
0950 ehi->err_mask |= AC_ERR_SYSTEM;
0951 ata_ehi_push_desc(ehi, "timeout");
0952 } else if (status & NV_ADMA_STAT_HOTPLUG) {
0953 ata_ehi_hotplugged(ehi);
0954 ata_ehi_push_desc(ehi, "hotplug");
0955 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
0956 ata_ehi_hotplugged(ehi);
0957 ata_ehi_push_desc(ehi, "hot unplug");
0958 } else if (status & NV_ADMA_STAT_SERROR) {
0959
0960 ata_ehi_push_desc(ehi, "SError");
0961 } else
0962 ata_ehi_push_desc(ehi, "unknown");
0963 ata_port_freeze(ap);
0964 continue;
0965 }
0966
0967 if (status & (NV_ADMA_STAT_DONE |
0968 NV_ADMA_STAT_CPBERR |
0969 NV_ADMA_STAT_CMD_COMPLETE)) {
0970 u32 check_commands = notifier_clears[i];
0971 u32 done_mask = 0;
0972 int pos, rc;
0973
0974 if (status & NV_ADMA_STAT_CPBERR) {
0975
0976 if (ata_tag_valid(ap->link.active_tag))
0977 check_commands = 1 <<
0978 ap->link.active_tag;
0979 else
0980 check_commands = ap->link.sactive;
0981 }
0982
0983
0984 while ((pos = ffs(check_commands))) {
0985 pos--;
0986 rc = nv_adma_check_cpb(ap, pos,
0987 notifier_error & (1 << pos));
0988 if (rc > 0)
0989 done_mask |= 1 << pos;
0990 else if (unlikely(rc < 0))
0991 check_commands = 0;
0992 check_commands &= ~(1 << pos);
0993 }
0994 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
0995 }
0996 }
0997
0998 if (notifier_clears[0] || notifier_clears[1]) {
0999
1000
1001 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1002 writel(notifier_clears[0], pp->notifier_clear_block);
1003 pp = host->ports[1]->private_data;
1004 writel(notifier_clears[1], pp->notifier_clear_block);
1005 }
1006
1007 spin_unlock(&host->lock);
1008
1009 return IRQ_RETVAL(handled);
1010 }
1011
1012 static void nv_adma_freeze(struct ata_port *ap)
1013 {
1014 struct nv_adma_port_priv *pp = ap->private_data;
1015 void __iomem *mmio = pp->ctl_block;
1016 u16 tmp;
1017
1018 nv_ck804_freeze(ap);
1019
1020 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1021 return;
1022
1023
1024 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1025 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1026
1027
1028 tmp = readw(mmio + NV_ADMA_CTL);
1029 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1030 mmio + NV_ADMA_CTL);
1031 readw(mmio + NV_ADMA_CTL);
1032 }
1033
1034 static void nv_adma_thaw(struct ata_port *ap)
1035 {
1036 struct nv_adma_port_priv *pp = ap->private_data;
1037 void __iomem *mmio = pp->ctl_block;
1038 u16 tmp;
1039
1040 nv_ck804_thaw(ap);
1041
1042 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1043 return;
1044
1045
1046 tmp = readw(mmio + NV_ADMA_CTL);
1047 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1048 mmio + NV_ADMA_CTL);
1049 readw(mmio + NV_ADMA_CTL);
1050 }
1051
1052 static void nv_adma_irq_clear(struct ata_port *ap)
1053 {
1054 struct nv_adma_port_priv *pp = ap->private_data;
1055 void __iomem *mmio = pp->ctl_block;
1056 u32 notifier_clears[2];
1057
1058 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1059 ata_bmdma_irq_clear(ap);
1060 return;
1061 }
1062
1063
1064 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1065 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1066
1067
1068 writew(0xffff, mmio + NV_ADMA_STAT);
1069
1070
1071
1072 if (ap->port_no == 0) {
1073 notifier_clears[0] = 0xFFFFFFFF;
1074 notifier_clears[1] = 0;
1075 } else {
1076 notifier_clears[0] = 0;
1077 notifier_clears[1] = 0xFFFFFFFF;
1078 }
1079 pp = ap->host->ports[0]->private_data;
1080 writel(notifier_clears[0], pp->notifier_clear_block);
1081 pp = ap->host->ports[1]->private_data;
1082 writel(notifier_clears[1], pp->notifier_clear_block);
1083 }
1084
1085 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1086 {
1087 struct nv_adma_port_priv *pp = qc->ap->private_data;
1088
1089 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1090 ata_bmdma_post_internal_cmd(qc);
1091 }
1092
1093 static int nv_adma_port_start(struct ata_port *ap)
1094 {
1095 struct device *dev = ap->host->dev;
1096 struct nv_adma_port_priv *pp;
1097 int rc;
1098 void *mem;
1099 dma_addr_t mem_dma;
1100 void __iomem *mmio;
1101 struct pci_dev *pdev = to_pci_dev(dev);
1102 u16 tmp;
1103
1104
1105
1106
1107
1108 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1109 if (rc)
1110 return rc;
1111
1112
1113 rc = ata_bmdma_port_start(ap);
1114 if (rc)
1115 return rc;
1116
1117 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1118 if (!pp)
1119 return -ENOMEM;
1120
1121 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1122 ap->port_no * NV_ADMA_PORT_SIZE;
1123 pp->ctl_block = mmio;
1124 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1125 pp->notifier_clear_block = pp->gen_block +
1126 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1127
1128
1129
1130
1131
1132 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1133
1134 pp->adma_dma_mask = *dev->dma_mask;
1135
1136 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1137 &mem_dma, GFP_KERNEL);
1138 if (!mem)
1139 return -ENOMEM;
1140
1141
1142
1143
1144
1145
1146 pp->cpb = mem;
1147 pp->cpb_dma = mem_dma;
1148
1149 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1150 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1151
1152 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154
1155
1156
1157
1158 pp->aprd = mem;
1159 pp->aprd_dma = mem_dma;
1160
1161 ap->private_data = pp;
1162
1163
1164 writew(0xffff, mmio + NV_ADMA_STAT);
1165
1166
1167 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168
1169
1170 writew(0, mmio + NV_ADMA_CPB_COUNT);
1171
1172
1173 tmp = readw(mmio + NV_ADMA_CTL);
1174 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1175 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1176
1177 tmp = readw(mmio + NV_ADMA_CTL);
1178 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1179 readw(mmio + NV_ADMA_CTL);
1180 udelay(1);
1181 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1182 readw(mmio + NV_ADMA_CTL);
1183
1184 return 0;
1185 }
1186
1187 static void nv_adma_port_stop(struct ata_port *ap)
1188 {
1189 struct nv_adma_port_priv *pp = ap->private_data;
1190 void __iomem *mmio = pp->ctl_block;
1191
1192 writew(0, mmio + NV_ADMA_CTL);
1193 }
1194
1195 #ifdef CONFIG_PM
1196 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1197 {
1198 struct nv_adma_port_priv *pp = ap->private_data;
1199 void __iomem *mmio = pp->ctl_block;
1200
1201
1202 nv_adma_register_mode(ap);
1203
1204
1205 writew(0, mmio + NV_ADMA_CPB_COUNT);
1206
1207
1208 writew(0, mmio + NV_ADMA_CTL);
1209
1210 return 0;
1211 }
1212
1213 static int nv_adma_port_resume(struct ata_port *ap)
1214 {
1215 struct nv_adma_port_priv *pp = ap->private_data;
1216 void __iomem *mmio = pp->ctl_block;
1217 u16 tmp;
1218
1219
1220 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1221 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1222
1223
1224 writew(0xffff, mmio + NV_ADMA_STAT);
1225
1226
1227 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1228
1229
1230 writew(0, mmio + NV_ADMA_CPB_COUNT);
1231
1232
1233 tmp = readw(mmio + NV_ADMA_CTL);
1234 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1235 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1236
1237 tmp = readw(mmio + NV_ADMA_CTL);
1238 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1239 readw(mmio + NV_ADMA_CTL);
1240 udelay(1);
1241 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1242 readw(mmio + NV_ADMA_CTL);
1243
1244 return 0;
1245 }
1246 #endif
1247
1248 static void nv_adma_setup_port(struct ata_port *ap)
1249 {
1250 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1251 struct ata_ioports *ioport = &ap->ioaddr;
1252
1253 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1254
1255 ioport->cmd_addr = mmio;
1256 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1257 ioport->error_addr =
1258 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1259 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1260 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1261 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1262 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1263 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1264 ioport->status_addr =
1265 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1266 ioport->altstatus_addr =
1267 ioport->ctl_addr = mmio + 0x20;
1268 }
1269
1270 static int nv_adma_host_init(struct ata_host *host)
1271 {
1272 struct pci_dev *pdev = to_pci_dev(host->dev);
1273 unsigned int i;
1274 u32 tmp32;
1275
1276
1277 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280 NV_MCP_SATA_CFG_20_PORT1_EN |
1281 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282
1283 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284
1285 for (i = 0; i < host->n_ports; i++)
1286 nv_adma_setup_port(host->ports[i]);
1287
1288 return 0;
1289 }
1290
1291 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292 struct scatterlist *sg,
1293 int idx,
1294 struct nv_adma_prd *aprd)
1295 {
1296 u8 flags = 0;
1297 if (qc->tf.flags & ATA_TFLAG_WRITE)
1298 flags |= NV_APRD_WRITE;
1299 if (idx == qc->n_elem - 1)
1300 flags |= NV_APRD_END;
1301 else if (idx != 4)
1302 flags |= NV_APRD_CONT;
1303
1304 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1305 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg)));
1306 aprd->flags = flags;
1307 aprd->packet_len = 0;
1308 }
1309
1310 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311 {
1312 struct nv_adma_port_priv *pp = qc->ap->private_data;
1313 struct nv_adma_prd *aprd;
1314 struct scatterlist *sg;
1315 unsigned int si;
1316
1317 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318 aprd = (si < 5) ? &cpb->aprd[si] :
1319 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1320 nv_adma_fill_aprd(qc, sg, si, aprd);
1321 }
1322 if (si > 5)
1323 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1324 else
1325 cpb->next_aprd = cpu_to_le64(0);
1326 }
1327
1328 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1329 {
1330 struct nv_adma_port_priv *pp = qc->ap->private_data;
1331
1332
1333
1334 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1335 (qc->tf.flags & ATA_TFLAG_POLLING))
1336 return 1;
1337
1338 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1339 (qc->tf.protocol == ATA_PROT_NODATA))
1340 return 0;
1341
1342 return 1;
1343 }
1344
1345 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1346 {
1347 struct nv_adma_port_priv *pp = qc->ap->private_data;
1348 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1349 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1350 NV_CPB_CTL_IEN;
1351
1352 if (nv_adma_use_reg_mode(qc)) {
1353 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1354 (qc->flags & ATA_QCFLAG_DMAMAP));
1355 nv_adma_register_mode(qc->ap);
1356 ata_bmdma_qc_prep(qc);
1357 return AC_ERR_OK;
1358 }
1359
1360 cpb->resp_flags = NV_CPB_RESP_DONE;
1361 wmb();
1362 cpb->ctl_flags = 0;
1363 wmb();
1364
1365 cpb->len = 3;
1366 cpb->tag = qc->hw_tag;
1367 cpb->next_cpb_idx = 0;
1368
1369
1370 if (qc->tf.protocol == ATA_PROT_NCQ)
1371 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1372
1373 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1374
1375 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1376 nv_adma_fill_sg(qc, cpb);
1377 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1378 } else
1379 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1380
1381
1382
1383 wmb();
1384 cpb->ctl_flags = ctl_flags;
1385 wmb();
1386 cpb->resp_flags = 0;
1387
1388 return AC_ERR_OK;
1389 }
1390
1391 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1392 {
1393 struct nv_adma_port_priv *pp = qc->ap->private_data;
1394 void __iomem *mmio = pp->ctl_block;
1395 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1396
1397
1398
1399
1400 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1401 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1402 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1403 return AC_ERR_SYSTEM;
1404 }
1405
1406 if (nv_adma_use_reg_mode(qc)) {
1407
1408 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1409 (qc->flags & ATA_QCFLAG_DMAMAP));
1410 nv_adma_register_mode(qc->ap);
1411 return ata_bmdma_qc_issue(qc);
1412 } else
1413 nv_adma_mode(qc->ap);
1414
1415
1416
1417 wmb();
1418
1419 if (curr_ncq != pp->last_issue_ncq) {
1420
1421
1422 udelay(20);
1423 pp->last_issue_ncq = curr_ncq;
1424 }
1425
1426 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1427
1428 return 0;
1429 }
1430
1431 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1432 {
1433 struct ata_host *host = dev_instance;
1434 unsigned int i;
1435 unsigned int handled = 0;
1436 unsigned long flags;
1437
1438 spin_lock_irqsave(&host->lock, flags);
1439
1440 for (i = 0; i < host->n_ports; i++) {
1441 struct ata_port *ap = host->ports[i];
1442 struct ata_queued_cmd *qc;
1443
1444 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1445 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1446 handled += ata_bmdma_port_intr(ap, qc);
1447 } else {
1448
1449
1450
1451
1452 ap->ops->sff_check_status(ap);
1453 }
1454 }
1455
1456 spin_unlock_irqrestore(&host->lock, flags);
1457
1458 return IRQ_RETVAL(handled);
1459 }
1460
1461 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1462 {
1463 int i, handled = 0;
1464
1465 for (i = 0; i < host->n_ports; i++) {
1466 handled += nv_host_intr(host->ports[i], irq_stat);
1467 irq_stat >>= NV_INT_PORT_SHIFT;
1468 }
1469
1470 return IRQ_RETVAL(handled);
1471 }
1472
1473 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1474 {
1475 struct ata_host *host = dev_instance;
1476 u8 irq_stat;
1477 irqreturn_t ret;
1478
1479 spin_lock(&host->lock);
1480 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1481 ret = nv_do_interrupt(host, irq_stat);
1482 spin_unlock(&host->lock);
1483
1484 return ret;
1485 }
1486
1487 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1488 {
1489 struct ata_host *host = dev_instance;
1490 u8 irq_stat;
1491 irqreturn_t ret;
1492
1493 spin_lock(&host->lock);
1494 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1495 ret = nv_do_interrupt(host, irq_stat);
1496 spin_unlock(&host->lock);
1497
1498 return ret;
1499 }
1500
1501 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1502 {
1503 if (sc_reg > SCR_CONTROL)
1504 return -EINVAL;
1505
1506 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1507 return 0;
1508 }
1509
1510 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1511 {
1512 if (sc_reg > SCR_CONTROL)
1513 return -EINVAL;
1514
1515 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1516 return 0;
1517 }
1518
1519 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1520 unsigned long deadline)
1521 {
1522 struct ata_eh_context *ehc = &link->eh_context;
1523
1524
1525
1526
1527 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1528 !ata_dev_enabled(link->device))
1529 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1530 NULL, NULL);
1531 else {
1532 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1533 int rc;
1534
1535 if (!(ehc->i.flags & ATA_EHI_QUIET))
1536 ata_link_info(link,
1537 "nv: skipping hardreset on occupied port\n");
1538
1539
1540 rc = sata_link_resume(link, timing, deadline);
1541
1542 if (rc && rc != -EOPNOTSUPP)
1543 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1544 rc);
1545 }
1546
1547
1548 return -EAGAIN;
1549 }
1550
1551 static void nv_nf2_freeze(struct ata_port *ap)
1552 {
1553 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1554 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1555 u8 mask;
1556
1557 mask = ioread8(scr_addr + NV_INT_ENABLE);
1558 mask &= ~(NV_INT_ALL << shift);
1559 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1560 }
1561
1562 static void nv_nf2_thaw(struct ata_port *ap)
1563 {
1564 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1565 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1566 u8 mask;
1567
1568 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1569
1570 mask = ioread8(scr_addr + NV_INT_ENABLE);
1571 mask |= (NV_INT_MASK << shift);
1572 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1573 }
1574
1575 static void nv_ck804_freeze(struct ata_port *ap)
1576 {
1577 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1579 u8 mask;
1580
1581 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1582 mask &= ~(NV_INT_ALL << shift);
1583 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1584 }
1585
1586 static void nv_ck804_thaw(struct ata_port *ap)
1587 {
1588 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1589 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1590 u8 mask;
1591
1592 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1593
1594 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1595 mask |= (NV_INT_MASK << shift);
1596 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1597 }
1598
1599 static void nv_mcp55_freeze(struct ata_port *ap)
1600 {
1601 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1602 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1603 u32 mask;
1604
1605 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1606
1607 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1608 mask &= ~(NV_INT_ALL_MCP55 << shift);
1609 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1610 }
1611
1612 static void nv_mcp55_thaw(struct ata_port *ap)
1613 {
1614 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1615 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1616 u32 mask;
1617
1618 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1619
1620 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1621 mask |= (NV_INT_MASK_MCP55 << shift);
1622 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1623 }
1624
1625 static void nv_adma_error_handler(struct ata_port *ap)
1626 {
1627 struct nv_adma_port_priv *pp = ap->private_data;
1628 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1629 void __iomem *mmio = pp->ctl_block;
1630 int i;
1631 u16 tmp;
1632
1633 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1634 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1635 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1636 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1637 u32 status = readw(mmio + NV_ADMA_STAT);
1638 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1639 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1640
1641 ata_port_err(ap,
1642 "EH in ADMA mode, notifier 0x%X "
1643 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1644 "next cpb count 0x%X next cpb idx 0x%x\n",
1645 notifier, notifier_error, gen_ctl, status,
1646 cpb_count, next_cpb_idx);
1647
1648 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1649 struct nv_adma_cpb *cpb = &pp->cpb[i];
1650 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1651 ap->link.sactive & (1 << i))
1652 ata_port_err(ap,
1653 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1654 i, cpb->ctl_flags, cpb->resp_flags);
1655 }
1656 }
1657
1658
1659 nv_adma_register_mode(ap);
1660
1661
1662
1663 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1664 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1665
1666
1667 writew(0, mmio + NV_ADMA_CPB_COUNT);
1668
1669
1670 tmp = readw(mmio + NV_ADMA_CTL);
1671 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1672 readw(mmio + NV_ADMA_CTL);
1673 udelay(1);
1674 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1675 readw(mmio + NV_ADMA_CTL);
1676 }
1677
1678 ata_bmdma_error_handler(ap);
1679 }
1680
1681 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1682 {
1683 struct nv_swncq_port_priv *pp = ap->private_data;
1684 struct defer_queue *dq = &pp->defer_queue;
1685
1686
1687 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1688 dq->defer_bits |= (1 << qc->hw_tag);
1689 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1690 }
1691
1692 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1693 {
1694 struct nv_swncq_port_priv *pp = ap->private_data;
1695 struct defer_queue *dq = &pp->defer_queue;
1696 unsigned int tag;
1697
1698 if (dq->head == dq->tail)
1699 return NULL;
1700
1701 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1702 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1703 WARN_ON(!(dq->defer_bits & (1 << tag)));
1704 dq->defer_bits &= ~(1 << tag);
1705
1706 return ata_qc_from_tag(ap, tag);
1707 }
1708
1709 static void nv_swncq_fis_reinit(struct ata_port *ap)
1710 {
1711 struct nv_swncq_port_priv *pp = ap->private_data;
1712
1713 pp->dhfis_bits = 0;
1714 pp->dmafis_bits = 0;
1715 pp->sdbfis_bits = 0;
1716 pp->ncq_flags = 0;
1717 }
1718
1719 static void nv_swncq_pp_reinit(struct ata_port *ap)
1720 {
1721 struct nv_swncq_port_priv *pp = ap->private_data;
1722 struct defer_queue *dq = &pp->defer_queue;
1723
1724 dq->head = 0;
1725 dq->tail = 0;
1726 dq->defer_bits = 0;
1727 pp->qc_active = 0;
1728 pp->last_issue_tag = ATA_TAG_POISON;
1729 nv_swncq_fis_reinit(ap);
1730 }
1731
1732 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1733 {
1734 struct nv_swncq_port_priv *pp = ap->private_data;
1735
1736 writew(fis, pp->irq_block);
1737 }
1738
1739 static void __ata_bmdma_stop(struct ata_port *ap)
1740 {
1741 struct ata_queued_cmd qc;
1742
1743 qc.ap = ap;
1744 ata_bmdma_stop(&qc);
1745 }
1746
1747 static void nv_swncq_ncq_stop(struct ata_port *ap)
1748 {
1749 struct nv_swncq_port_priv *pp = ap->private_data;
1750 unsigned int i;
1751 u32 sactive;
1752 u32 done_mask;
1753
1754 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1755 ap->qc_active, ap->link.sactive);
1756 ata_port_err(ap,
1757 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1758 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1759 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1760 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1761
1762 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1763 ap->ops->sff_check_status(ap),
1764 ioread8(ap->ioaddr.error_addr));
1765
1766 sactive = readl(pp->sactive_block);
1767 done_mask = pp->qc_active ^ sactive;
1768
1769 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1770 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1771 u8 err = 0;
1772 if (pp->qc_active & (1 << i))
1773 err = 0;
1774 else if (done_mask & (1 << i))
1775 err = 1;
1776 else
1777 continue;
1778
1779 ata_port_err(ap,
1780 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1781 (pp->dhfis_bits >> i) & 0x1,
1782 (pp->dmafis_bits >> i) & 0x1,
1783 (pp->sdbfis_bits >> i) & 0x1,
1784 (sactive >> i) & 0x1,
1785 (err ? "error! tag doesn't exit" : " "));
1786 }
1787
1788 nv_swncq_pp_reinit(ap);
1789 ap->ops->sff_irq_clear(ap);
1790 __ata_bmdma_stop(ap);
1791 nv_swncq_irq_clear(ap, 0xffff);
1792 }
1793
1794 static void nv_swncq_error_handler(struct ata_port *ap)
1795 {
1796 struct ata_eh_context *ehc = &ap->link.eh_context;
1797
1798 if (ap->link.sactive) {
1799 nv_swncq_ncq_stop(ap);
1800 ehc->i.action |= ATA_EH_RESET;
1801 }
1802
1803 ata_bmdma_error_handler(ap);
1804 }
1805
1806 #ifdef CONFIG_PM
1807 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1808 {
1809 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1810 u32 tmp;
1811
1812
1813 writel(~0, mmio + NV_INT_STATUS_MCP55);
1814
1815
1816 writel(0, mmio + NV_INT_ENABLE_MCP55);
1817
1818
1819 tmp = readl(mmio + NV_CTL_MCP55);
1820 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1821 writel(tmp, mmio + NV_CTL_MCP55);
1822
1823 return 0;
1824 }
1825
1826 static int nv_swncq_port_resume(struct ata_port *ap)
1827 {
1828 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1829 u32 tmp;
1830
1831
1832 writel(~0, mmio + NV_INT_STATUS_MCP55);
1833
1834
1835 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1836
1837
1838 tmp = readl(mmio + NV_CTL_MCP55);
1839 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1840
1841 return 0;
1842 }
1843 #endif
1844
1845 static void nv_swncq_host_init(struct ata_host *host)
1846 {
1847 u32 tmp;
1848 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1849 struct pci_dev *pdev = to_pci_dev(host->dev);
1850 u8 regval;
1851
1852
1853 pci_read_config_byte(pdev, 0x7f, ®val);
1854 regval &= ~(1 << 7);
1855 pci_write_config_byte(pdev, 0x7f, regval);
1856
1857
1858 tmp = readl(mmio + NV_CTL_MCP55);
1859 dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
1860 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1861
1862
1863 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1864 dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
1865 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1866
1867
1868 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1869 }
1870
1871 static int nv_swncq_slave_config(struct scsi_device *sdev)
1872 {
1873 struct ata_port *ap = ata_shost_to_port(sdev->host);
1874 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1875 struct ata_device *dev;
1876 int rc;
1877 u8 rev;
1878 u8 check_maxtor = 0;
1879 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1880
1881 rc = ata_scsi_slave_config(sdev);
1882 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1883
1884 return rc;
1885
1886 dev = &ap->link.device[sdev->id];
1887 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1888 return rc;
1889
1890
1891 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1892 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1893 check_maxtor = 1;
1894
1895
1896 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1897 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1898 pci_read_config_byte(pdev, 0x8, &rev);
1899 if (rev <= 0xa2)
1900 check_maxtor = 1;
1901 }
1902
1903 if (!check_maxtor)
1904 return rc;
1905
1906 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1907
1908 if (strncmp(model_num, "Maxtor", 6) == 0) {
1909 ata_scsi_change_queue_depth(sdev, 1);
1910 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1911 sdev->queue_depth);
1912 }
1913
1914 return rc;
1915 }
1916
1917 static int nv_swncq_port_start(struct ata_port *ap)
1918 {
1919 struct device *dev = ap->host->dev;
1920 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1921 struct nv_swncq_port_priv *pp;
1922 int rc;
1923
1924
1925 rc = ata_bmdma_port_start(ap);
1926 if (rc)
1927 return rc;
1928
1929 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1930 if (!pp)
1931 return -ENOMEM;
1932
1933 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1934 &pp->prd_dma, GFP_KERNEL);
1935 if (!pp->prd)
1936 return -ENOMEM;
1937
1938 ap->private_data = pp;
1939 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1940 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1941 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1942
1943 return 0;
1944 }
1945
1946 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1947 {
1948 if (qc->tf.protocol != ATA_PROT_NCQ) {
1949 ata_bmdma_qc_prep(qc);
1950 return AC_ERR_OK;
1951 }
1952
1953 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1954 return AC_ERR_OK;
1955
1956 nv_swncq_fill_sg(qc);
1957
1958 return AC_ERR_OK;
1959 }
1960
1961 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1962 {
1963 struct ata_port *ap = qc->ap;
1964 struct scatterlist *sg;
1965 struct nv_swncq_port_priv *pp = ap->private_data;
1966 struct ata_bmdma_prd *prd;
1967 unsigned int si, idx;
1968
1969 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1970
1971 idx = 0;
1972 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1973 u32 addr, offset;
1974 u32 sg_len, len;
1975
1976 addr = (u32)sg_dma_address(sg);
1977 sg_len = sg_dma_len(sg);
1978
1979 while (sg_len) {
1980 offset = addr & 0xffff;
1981 len = sg_len;
1982 if ((offset + sg_len) > 0x10000)
1983 len = 0x10000 - offset;
1984
1985 prd[idx].addr = cpu_to_le32(addr);
1986 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1987
1988 idx++;
1989 sg_len -= len;
1990 addr += len;
1991 }
1992 }
1993
1994 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1995 }
1996
1997 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1998 struct ata_queued_cmd *qc)
1999 {
2000 struct nv_swncq_port_priv *pp = ap->private_data;
2001
2002 if (qc == NULL)
2003 return 0;
2004
2005 writel((1 << qc->hw_tag), pp->sactive_block);
2006 pp->last_issue_tag = qc->hw_tag;
2007 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2008 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2009 pp->qc_active |= (0x1 << qc->hw_tag);
2010
2011 trace_ata_tf_load(ap, &qc->tf);
2012 ap->ops->sff_tf_load(ap, &qc->tf);
2013 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
2014 ap->ops->sff_exec_command(ap, &qc->tf);
2015
2016 return 0;
2017 }
2018
2019 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2020 {
2021 struct ata_port *ap = qc->ap;
2022 struct nv_swncq_port_priv *pp = ap->private_data;
2023
2024 if (qc->tf.protocol != ATA_PROT_NCQ)
2025 return ata_bmdma_qc_issue(qc);
2026
2027 if (!pp->qc_active)
2028 nv_swncq_issue_atacmd(ap, qc);
2029 else
2030 nv_swncq_qc_to_dq(ap, qc);
2031
2032 return 0;
2033 }
2034
2035 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2036 {
2037 u32 serror;
2038 struct ata_eh_info *ehi = &ap->link.eh_info;
2039
2040 ata_ehi_clear_desc(ehi);
2041
2042
2043 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2044 sata_scr_write(&ap->link, SCR_ERROR, serror);
2045
2046
2047 if (fis & NV_SWNCQ_IRQ_ADDED)
2048 ata_ehi_push_desc(ehi, "hot plug");
2049 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2050 ata_ehi_push_desc(ehi, "hot unplug");
2051
2052 ata_ehi_hotplugged(ehi);
2053
2054
2055 ehi->serror |= serror;
2056
2057 ata_port_freeze(ap);
2058 }
2059
2060 static int nv_swncq_sdbfis(struct ata_port *ap)
2061 {
2062 struct ata_queued_cmd *qc;
2063 struct nv_swncq_port_priv *pp = ap->private_data;
2064 struct ata_eh_info *ehi = &ap->link.eh_info;
2065 u32 sactive;
2066 u32 done_mask;
2067 u8 host_stat;
2068 u8 lack_dhfis = 0;
2069
2070 host_stat = ap->ops->bmdma_status(ap);
2071 trace_ata_bmdma_status(ap, host_stat);
2072 if (unlikely(host_stat & ATA_DMA_ERR)) {
2073
2074 ata_ehi_clear_desc(ehi);
2075 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2076 ehi->err_mask |= AC_ERR_HOST_BUS;
2077 ehi->action |= ATA_EH_RESET;
2078 return -EINVAL;
2079 }
2080
2081 ap->ops->sff_irq_clear(ap);
2082 __ata_bmdma_stop(ap);
2083
2084 sactive = readl(pp->sactive_block);
2085 done_mask = pp->qc_active ^ sactive;
2086
2087 pp->qc_active &= ~done_mask;
2088 pp->dhfis_bits &= ~done_mask;
2089 pp->dmafis_bits &= ~done_mask;
2090 pp->sdbfis_bits |= done_mask;
2091 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2092
2093 if (!ap->qc_active) {
2094 ata_port_dbg(ap, "over\n");
2095 nv_swncq_pp_reinit(ap);
2096 return 0;
2097 }
2098
2099 if (pp->qc_active & pp->dhfis_bits)
2100 return 0;
2101
2102 if ((pp->ncq_flags & ncq_saw_backout) ||
2103 (pp->qc_active ^ pp->dhfis_bits))
2104
2105
2106
2107 lack_dhfis = 1;
2108
2109 ata_port_dbg(ap, "QC: qc_active 0x%llx,"
2110 "SWNCQ:qc_active 0x%X defer_bits %X "
2111 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2112 ap->qc_active, pp->qc_active,
2113 pp->defer_queue.defer_bits, pp->dhfis_bits,
2114 pp->dmafis_bits, pp->last_issue_tag);
2115
2116 nv_swncq_fis_reinit(ap);
2117
2118 if (lack_dhfis) {
2119 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2120 nv_swncq_issue_atacmd(ap, qc);
2121 return 0;
2122 }
2123
2124 if (pp->defer_queue.defer_bits) {
2125
2126 qc = nv_swncq_qc_from_dq(ap);
2127 WARN_ON(qc == NULL);
2128 nv_swncq_issue_atacmd(ap, qc);
2129 }
2130
2131 return 0;
2132 }
2133
2134 static inline u32 nv_swncq_tag(struct ata_port *ap)
2135 {
2136 struct nv_swncq_port_priv *pp = ap->private_data;
2137 u32 tag;
2138
2139 tag = readb(pp->tag_block) >> 2;
2140 return (tag & 0x1f);
2141 }
2142
2143 static void nv_swncq_dmafis(struct ata_port *ap)
2144 {
2145 struct ata_queued_cmd *qc;
2146 unsigned int rw;
2147 u8 dmactl;
2148 u32 tag;
2149 struct nv_swncq_port_priv *pp = ap->private_data;
2150
2151 __ata_bmdma_stop(ap);
2152 tag = nv_swncq_tag(ap);
2153
2154 ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
2155 qc = ata_qc_from_tag(ap, tag);
2156
2157 if (unlikely(!qc))
2158 return;
2159
2160 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2161
2162
2163 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2164 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2165
2166
2167 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2168 dmactl &= ~ATA_DMA_WR;
2169 if (!rw)
2170 dmactl |= ATA_DMA_WR;
2171
2172 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2173 }
2174
2175 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2176 {
2177 struct nv_swncq_port_priv *pp = ap->private_data;
2178 struct ata_queued_cmd *qc;
2179 struct ata_eh_info *ehi = &ap->link.eh_info;
2180 u32 serror;
2181 u8 ata_stat;
2182
2183 ata_stat = ap->ops->sff_check_status(ap);
2184 nv_swncq_irq_clear(ap, fis);
2185 if (!fis)
2186 return;
2187
2188 if (ap->pflags & ATA_PFLAG_FROZEN)
2189 return;
2190
2191 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2192 nv_swncq_hotplug(ap, fis);
2193 return;
2194 }
2195
2196 if (!pp->qc_active)
2197 return;
2198
2199 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2200 return;
2201 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2202
2203 if (ata_stat & ATA_ERR) {
2204 ata_ehi_clear_desc(ehi);
2205 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2206 ehi->err_mask |= AC_ERR_DEV;
2207 ehi->serror |= serror;
2208 ehi->action |= ATA_EH_RESET;
2209 ata_port_freeze(ap);
2210 return;
2211 }
2212
2213 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2214
2215
2216
2217 pp->ncq_flags |= ncq_saw_backout;
2218 }
2219
2220 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2221 pp->ncq_flags |= ncq_saw_sdb;
2222 ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
2223 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2224 pp->qc_active, pp->dhfis_bits,
2225 pp->dmafis_bits, readl(pp->sactive_block));
2226 if (nv_swncq_sdbfis(ap) < 0)
2227 goto irq_error;
2228 }
2229
2230 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2231
2232
2233
2234 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2235 pp->ncq_flags |= ncq_saw_d2h;
2236 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2237 ata_ehi_push_desc(ehi, "illegal fis transaction");
2238 ehi->err_mask |= AC_ERR_HSM;
2239 ehi->action |= ATA_EH_RESET;
2240 goto irq_error;
2241 }
2242
2243 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2244 !(pp->ncq_flags & ncq_saw_dmas)) {
2245 ata_stat = ap->ops->sff_check_status(ap);
2246 if (ata_stat & ATA_BUSY)
2247 goto irq_exit;
2248
2249 if (pp->defer_queue.defer_bits) {
2250 ata_port_dbg(ap, "send next command\n");
2251 qc = nv_swncq_qc_from_dq(ap);
2252 nv_swncq_issue_atacmd(ap, qc);
2253 }
2254 }
2255 }
2256
2257 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2258
2259
2260
2261 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2262 pp->ncq_flags |= ncq_saw_dmas;
2263 nv_swncq_dmafis(ap);
2264 }
2265
2266 irq_exit:
2267 return;
2268 irq_error:
2269 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2270 ata_port_freeze(ap);
2271 return;
2272 }
2273
2274 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2275 {
2276 struct ata_host *host = dev_instance;
2277 unsigned int i;
2278 unsigned int handled = 0;
2279 unsigned long flags;
2280 u32 irq_stat;
2281
2282 spin_lock_irqsave(&host->lock, flags);
2283
2284 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2285
2286 for (i = 0; i < host->n_ports; i++) {
2287 struct ata_port *ap = host->ports[i];
2288
2289 if (ap->link.sactive) {
2290 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2291 handled = 1;
2292 } else {
2293 if (irq_stat)
2294 nv_swncq_irq_clear(ap, 0xfff0);
2295
2296 handled += nv_host_intr(ap, (u8)irq_stat);
2297 }
2298 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2299 }
2300
2301 spin_unlock_irqrestore(&host->lock, flags);
2302
2303 return IRQ_RETVAL(handled);
2304 }
2305
2306 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2307 {
2308 const struct ata_port_info *ppi[] = { NULL, NULL };
2309 struct nv_pi_priv *ipriv;
2310 struct ata_host *host;
2311 struct nv_host_priv *hpriv;
2312 int rc;
2313 u32 bar;
2314 void __iomem *base;
2315 unsigned long type = ent->driver_data;
2316
2317
2318
2319
2320 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2321 if (pci_resource_start(pdev, bar) == 0)
2322 return -ENODEV;
2323
2324 ata_print_version_once(&pdev->dev, DRV_VERSION);
2325
2326 rc = pcim_enable_device(pdev);
2327 if (rc)
2328 return rc;
2329
2330
2331 if (type == CK804 && adma_enabled) {
2332 dev_notice(&pdev->dev, "Using ADMA mode\n");
2333 type = ADMA;
2334 } else if (type == MCP5x && swncq_enabled) {
2335 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2336 type = SWNCQ;
2337 }
2338
2339 ppi[0] = &nv_port_info[type];
2340 ipriv = ppi[0]->private_data;
2341 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2342 if (rc)
2343 return rc;
2344
2345 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2346 if (!hpriv)
2347 return -ENOMEM;
2348 hpriv->type = type;
2349 host->private_data = hpriv;
2350
2351
2352 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2353 if (rc)
2354 return rc;
2355
2356
2357 base = host->iomap[NV_MMIO_BAR];
2358 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2359 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2360
2361
2362 if (type >= CK804) {
2363 u8 regval;
2364
2365 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2366 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2367 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2368 }
2369
2370
2371 if (type == ADMA) {
2372 rc = nv_adma_host_init(host);
2373 if (rc)
2374 return rc;
2375 } else if (type == SWNCQ)
2376 nv_swncq_host_init(host);
2377
2378 if (msi_enabled) {
2379 dev_notice(&pdev->dev, "Using MSI\n");
2380 pci_enable_msi(pdev);
2381 }
2382
2383 pci_set_master(pdev);
2384 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2385 }
2386
2387 #ifdef CONFIG_PM_SLEEP
2388 static int nv_pci_device_resume(struct pci_dev *pdev)
2389 {
2390 struct ata_host *host = pci_get_drvdata(pdev);
2391 struct nv_host_priv *hpriv = host->private_data;
2392 int rc;
2393
2394 rc = ata_pci_device_do_resume(pdev);
2395 if (rc)
2396 return rc;
2397
2398 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2399 if (hpriv->type >= CK804) {
2400 u8 regval;
2401
2402 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2403 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2404 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2405 }
2406 if (hpriv->type == ADMA) {
2407 u32 tmp32;
2408 struct nv_adma_port_priv *pp;
2409
2410 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2411
2412 pp = host->ports[0]->private_data;
2413 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2414 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2415 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2416 else
2417 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2418 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2419 pp = host->ports[1]->private_data;
2420 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2421 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2422 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2423 else
2424 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2425 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2426
2427 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2428 }
2429 }
2430
2431 ata_host_resume(host);
2432
2433 return 0;
2434 }
2435 #endif
2436
2437 static void nv_ck804_host_stop(struct ata_host *host)
2438 {
2439 struct pci_dev *pdev = to_pci_dev(host->dev);
2440 u8 regval;
2441
2442
2443 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2444 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2445 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2446 }
2447
2448 static void nv_adma_host_stop(struct ata_host *host)
2449 {
2450 struct pci_dev *pdev = to_pci_dev(host->dev);
2451 u32 tmp32;
2452
2453
2454 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2455 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2456 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2457 NV_MCP_SATA_CFG_20_PORT1_EN |
2458 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2459
2460 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2461
2462 nv_ck804_host_stop(host);
2463 }
2464
2465 module_pci_driver(nv_pci_driver);
2466
2467 module_param_named(adma, adma_enabled, bool, 0444);
2468 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2469 module_param_named(swncq, swncq_enabled, bool, 0444);
2470 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2471 module_param_named(msi, msi_enabled, bool, 0444);
2472 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");