0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #include <linux/kernel.h>
0040 #include <linux/module.h>
0041 #include <linux/pci.h>
0042 #include <linux/init.h>
0043 #include <linux/blkdev.h>
0044 #include <linux/delay.h>
0045 #include <linux/interrupt.h>
0046 #include <linux/dmapool.h>
0047 #include <linux/dma-mapping.h>
0048 #include <linux/device.h>
0049 #include <linux/clk.h>
0050 #include <linux/phy/phy.h>
0051 #include <linux/platform_device.h>
0052 #include <linux/ata_platform.h>
0053 #include <linux/mbus.h>
0054 #include <linux/bitops.h>
0055 #include <linux/gfp.h>
0056 #include <linux/of.h>
0057 #include <linux/of_irq.h>
0058 #include <scsi/scsi_host.h>
0059 #include <scsi/scsi_cmnd.h>
0060 #include <scsi/scsi_device.h>
0061 #include <linux/libata.h>
0062
0063 #define DRV_NAME "sata_mv"
0064 #define DRV_VERSION "1.28"
0065
0066
0067
0068
0069
0070 #ifdef CONFIG_PCI
0071 static int msi;
0072 module_param(msi, int, S_IRUGO);
0073 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
0074 #endif
0075
0076 static int irq_coalescing_io_count;
0077 module_param(irq_coalescing_io_count, int, S_IRUGO);
0078 MODULE_PARM_DESC(irq_coalescing_io_count,
0079 "IRQ coalescing I/O count threshold (0..255)");
0080
0081 static int irq_coalescing_usecs;
0082 module_param(irq_coalescing_usecs, int, S_IRUGO);
0083 MODULE_PARM_DESC(irq_coalescing_usecs,
0084 "IRQ coalescing time threshold in usecs");
0085
0086 enum {
0087
0088 MV_PRIMARY_BAR = 0,
0089 MV_IO_BAR = 2,
0090 MV_MISC_BAR = 3,
0091
0092 MV_MAJOR_REG_AREA_SZ = 0x10000,
0093 MV_MINOR_REG_AREA_SZ = 0x2000,
0094
0095
0096 COAL_CLOCKS_PER_USEC = 150,
0097 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1),
0098 MAX_COAL_IO_COUNT = 255,
0099
0100 MV_PCI_REG_BASE = 0,
0101
0102
0103
0104
0105
0106
0107
0108
0109 COAL_REG_BASE = 0x18000,
0110 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
0111 ALL_PORTS_COAL_IRQ = (1 << 4),
0112
0113 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
0114 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
0115
0116
0117
0118
0119 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
0120 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
0121
0122 SATAHC0_REG_BASE = 0x20000,
0123 FLASH_CTL = 0x1046c,
0124 GPIO_PORT_CTL = 0x104f0,
0125 RESET_CFG = 0x180d8,
0126
0127 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
0128 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
0129 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ,
0130 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
0131
0132 MV_MAX_Q_DEPTH = 32,
0133 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
0134
0135
0136
0137
0138
0139 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
0140 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
0141 MV_MAX_SG_CT = 256,
0142 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
0143
0144
0145 MV_PORT_HC_SHIFT = 2,
0146 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT),
0147
0148 MV_PORT_MASK = (MV_PORTS_PER_HC - 1),
0149
0150
0151 MV_FLAG_DUAL_HC = (1 << 30),
0152
0153 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
0154
0155 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
0156
0157 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
0158 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
0159
0160 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
0161
0162 CRQB_FLAG_READ = (1 << 0),
0163 CRQB_TAG_SHIFT = 1,
0164 CRQB_IOID_SHIFT = 6,
0165 CRQB_PMP_SHIFT = 12,
0166 CRQB_HOSTQ_SHIFT = 17,
0167 CRQB_CMD_ADDR_SHIFT = 8,
0168 CRQB_CMD_CS = (0x2 << 11),
0169 CRQB_CMD_LAST = (1 << 15),
0170
0171 CRPB_FLAG_STATUS_SHIFT = 8,
0172 CRPB_IOID_SHIFT_6 = 5,
0173 CRPB_IOID_SHIFT_7 = 7,
0174
0175 EPRD_FLAG_END_OF_TBL = (1 << 31),
0176
0177
0178
0179 MV_PCI_COMMAND = 0xc00,
0180 MV_PCI_COMMAND_MWRCOM = (1 << 4),
0181 MV_PCI_COMMAND_MRDTRIG = (1 << 7),
0182
0183 PCI_MAIN_CMD_STS = 0xd30,
0184 STOP_PCI_MASTER = (1 << 2),
0185 PCI_MASTER_EMPTY = (1 << 3),
0186 GLOB_SFT_RST = (1 << 4),
0187
0188 MV_PCI_MODE = 0xd00,
0189 MV_PCI_MODE_MASK = 0x30,
0190
0191 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
0192 MV_PCI_DISC_TIMER = 0xd04,
0193 MV_PCI_MSI_TRIGGER = 0xc38,
0194 MV_PCI_SERR_MASK = 0xc28,
0195 MV_PCI_XBAR_TMOUT = 0x1d04,
0196 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
0197 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
0198 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
0199 MV_PCI_ERR_COMMAND = 0x1d50,
0200
0201 PCI_IRQ_CAUSE = 0x1d58,
0202 PCI_IRQ_MASK = 0x1d5c,
0203 PCI_UNMASK_ALL_IRQS = 0x7fffff,
0204
0205 PCIE_IRQ_CAUSE = 0x1900,
0206 PCIE_IRQ_MASK = 0x1910,
0207 PCIE_UNMASK_ALL_IRQS = 0x40a,
0208
0209
0210 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
0211 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
0212 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
0213 SOC_HC_MAIN_IRQ_MASK = 0x20024,
0214 ERR_IRQ = (1 << 0),
0215 DONE_IRQ = (1 << 1),
0216 HC0_IRQ_PEND = 0x1ff,
0217 HC_SHIFT = 9,
0218 DONE_IRQ_0_3 = 0x000000aa,
0219 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT),
0220 PCI_ERR = (1 << 18),
0221 TRAN_COAL_LO_DONE = (1 << 19),
0222 TRAN_COAL_HI_DONE = (1 << 20),
0223 PORTS_0_3_COAL_DONE = (1 << 8),
0224 PORTS_4_7_COAL_DONE = (1 << 17),
0225 ALL_PORTS_COAL_DONE = (1 << 21),
0226 GPIO_INT = (1 << 22),
0227 SELF_INT = (1 << 23),
0228 TWSI_INT = (1 << 24),
0229 HC_MAIN_RSVD = (0x7f << 25),
0230 HC_MAIN_RSVD_5 = (0x1fff << 19),
0231 HC_MAIN_RSVD_SOC = (0x3fffffb << 6),
0232
0233
0234 HC_CFG = 0x00,
0235
0236 HC_IRQ_CAUSE = 0x14,
0237 DMA_IRQ = (1 << 0),
0238 HC_COAL_IRQ = (1 << 4),
0239 DEV_IRQ = (1 << 8),
0240
0241
0242
0243
0244
0245
0246
0247
0248 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
0249 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
0250
0251 SOC_LED_CTRL = 0x2c,
0252 SOC_LED_CTRL_BLINK = (1 << 0),
0253 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),
0254
0255
0256
0257 SHD_BLK = 0x100,
0258 SHD_CTL_AST = 0x20,
0259
0260
0261 SATA_STATUS = 0x300,
0262 SATA_ACTIVE = 0x350,
0263 FIS_IRQ_CAUSE = 0x364,
0264 FIS_IRQ_CAUSE_AN = (1 << 9),
0265
0266 LTMODE = 0x30c,
0267 LTMODE_BIT8 = (1 << 8),
0268
0269 PHY_MODE2 = 0x330,
0270 PHY_MODE3 = 0x310,
0271
0272 PHY_MODE4 = 0x314,
0273 PHY_MODE4_CFG_MASK = 0x00000003,
0274 PHY_MODE4_CFG_VALUE = 0x00000001,
0275 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa,
0276 PHY_MODE4_RSVD_ONES = 0x00000005,
0277
0278 SATA_IFCTL = 0x344,
0279 SATA_TESTCTL = 0x348,
0280 SATA_IFSTAT = 0x34c,
0281 VENDOR_UNIQUE_FIS = 0x35c,
0282
0283 FISCFG = 0x360,
0284 FISCFG_WAIT_DEV_ERR = (1 << 8),
0285 FISCFG_SINGLE_SYNC = (1 << 16),
0286
0287 PHY_MODE9_GEN2 = 0x398,
0288 PHY_MODE9_GEN1 = 0x39c,
0289 PHYCFG_OFS = 0x3a0,
0290
0291 MV5_PHY_MODE = 0x74,
0292 MV5_LTMODE = 0x30,
0293 MV5_PHY_CTL = 0x0C,
0294 SATA_IFCFG = 0x050,
0295 LP_PHY_CTL = 0x058,
0296 LP_PHY_CTL_PIN_PU_PLL = (1 << 0),
0297 LP_PHY_CTL_PIN_PU_RX = (1 << 1),
0298 LP_PHY_CTL_PIN_PU_TX = (1 << 2),
0299 LP_PHY_CTL_GEN_TX_3G = (1 << 5),
0300 LP_PHY_CTL_GEN_RX_3G = (1 << 9),
0301
0302 MV_M2_PREAMP_MASK = 0x7e0,
0303
0304
0305 EDMA_CFG = 0,
0306 EDMA_CFG_Q_DEPTH = 0x1f,
0307 EDMA_CFG_NCQ = (1 << 5),
0308 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14),
0309 EDMA_CFG_RD_BRST_EXT = (1 << 11),
0310 EDMA_CFG_WR_BUFF_LEN = (1 << 13),
0311 EDMA_CFG_EDMA_FBS = (1 << 16),
0312 EDMA_CFG_FBS = (1 << 26),
0313
0314 EDMA_ERR_IRQ_CAUSE = 0x8,
0315 EDMA_ERR_IRQ_MASK = 0xc,
0316 EDMA_ERR_D_PAR = (1 << 0),
0317 EDMA_ERR_PRD_PAR = (1 << 1),
0318 EDMA_ERR_DEV = (1 << 2),
0319 EDMA_ERR_DEV_DCON = (1 << 3),
0320 EDMA_ERR_DEV_CON = (1 << 4),
0321 EDMA_ERR_SERR = (1 << 5),
0322 EDMA_ERR_SELF_DIS = (1 << 7),
0323 EDMA_ERR_SELF_DIS_5 = (1 << 8),
0324 EDMA_ERR_BIST_ASYNC = (1 << 8),
0325 EDMA_ERR_TRANS_IRQ_7 = (1 << 8),
0326 EDMA_ERR_CRQB_PAR = (1 << 9),
0327 EDMA_ERR_CRPB_PAR = (1 << 10),
0328 EDMA_ERR_INTRL_PAR = (1 << 11),
0329 EDMA_ERR_IORDY = (1 << 12),
0330
0331 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
0332 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13),
0333 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14),
0334 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
0335 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16),
0336
0337 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
0338
0339 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
0340 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21),
0341 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22),
0342 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23),
0343 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24),
0344 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25),
0345
0346 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
0347
0348 EDMA_ERR_TRANS_PROTO = (1 << 31),
0349 EDMA_ERR_OVERRUN_5 = (1 << 5),
0350 EDMA_ERR_UNDERRUN_5 = (1 << 6),
0351
0352 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
0353 EDMA_ERR_LNK_CTRL_RX_1 |
0354 EDMA_ERR_LNK_CTRL_RX_3 |
0355 EDMA_ERR_LNK_CTRL_TX,
0356
0357 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
0358 EDMA_ERR_PRD_PAR |
0359 EDMA_ERR_DEV_DCON |
0360 EDMA_ERR_DEV_CON |
0361 EDMA_ERR_SERR |
0362 EDMA_ERR_SELF_DIS |
0363 EDMA_ERR_CRQB_PAR |
0364 EDMA_ERR_CRPB_PAR |
0365 EDMA_ERR_INTRL_PAR |
0366 EDMA_ERR_IORDY |
0367 EDMA_ERR_LNK_CTRL_RX_2 |
0368 EDMA_ERR_LNK_DATA_RX |
0369 EDMA_ERR_LNK_DATA_TX |
0370 EDMA_ERR_TRANS_PROTO,
0371
0372 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
0373 EDMA_ERR_PRD_PAR |
0374 EDMA_ERR_DEV_DCON |
0375 EDMA_ERR_DEV_CON |
0376 EDMA_ERR_OVERRUN_5 |
0377 EDMA_ERR_UNDERRUN_5 |
0378 EDMA_ERR_SELF_DIS_5 |
0379 EDMA_ERR_CRQB_PAR |
0380 EDMA_ERR_CRPB_PAR |
0381 EDMA_ERR_INTRL_PAR |
0382 EDMA_ERR_IORDY,
0383
0384 EDMA_REQ_Q_BASE_HI = 0x10,
0385 EDMA_REQ_Q_IN_PTR = 0x14,
0386
0387 EDMA_REQ_Q_OUT_PTR = 0x18,
0388 EDMA_REQ_Q_PTR_SHIFT = 5,
0389
0390 EDMA_RSP_Q_BASE_HI = 0x1c,
0391 EDMA_RSP_Q_IN_PTR = 0x20,
0392 EDMA_RSP_Q_OUT_PTR = 0x24,
0393 EDMA_RSP_Q_PTR_SHIFT = 3,
0394
0395 EDMA_CMD = 0x28,
0396 EDMA_EN = (1 << 0),
0397 EDMA_DS = (1 << 1),
0398 EDMA_RESET = (1 << 2),
0399
0400 EDMA_STATUS = 0x30,
0401 EDMA_STATUS_CACHE_EMPTY = (1 << 6),
0402 EDMA_STATUS_IDLE = (1 << 7),
0403
0404 EDMA_IORDY_TMOUT = 0x34,
0405 EDMA_ARB_CFG = 0x38,
0406
0407 EDMA_HALTCOND = 0x60,
0408 EDMA_UNKNOWN_RSVD = 0x6C,
0409
0410 BMDMA_CMD = 0x224,
0411 BMDMA_STATUS = 0x228,
0412 BMDMA_PRD_LOW = 0x22c,
0413 BMDMA_PRD_HIGH = 0x230,
0414
0415
0416 MV_HP_FLAG_MSI = (1 << 0),
0417 MV_HP_ERRATA_50XXB0 = (1 << 1),
0418 MV_HP_ERRATA_50XXB2 = (1 << 2),
0419 MV_HP_ERRATA_60X1B2 = (1 << 3),
0420 MV_HP_ERRATA_60X1C0 = (1 << 4),
0421 MV_HP_GEN_I = (1 << 6),
0422 MV_HP_GEN_II = (1 << 7),
0423 MV_HP_GEN_IIE = (1 << 8),
0424 MV_HP_PCIE = (1 << 9),
0425 MV_HP_CUT_THROUGH = (1 << 10),
0426 MV_HP_FLAG_SOC = (1 << 11),
0427 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),
0428 MV_HP_FIX_LP_PHY_CTL = (1 << 13),
0429
0430
0431 MV_PP_FLAG_EDMA_EN = (1 << 0),
0432 MV_PP_FLAG_NCQ_EN = (1 << 1),
0433 MV_PP_FLAG_FBS_EN = (1 << 2),
0434 MV_PP_FLAG_DELAYED_EH = (1 << 3),
0435 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),
0436 };
0437
0438 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
0439 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
0440 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
0441 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
0442 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
0443
0444 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
0445 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
0446
0447 enum {
0448
0449
0450
0451 MV_DMA_BOUNDARY = 0xffffU,
0452
0453
0454
0455
0456 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
0457
0458
0459 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
0460 };
0461
0462 enum chip_type {
0463 chip_504x,
0464 chip_508x,
0465 chip_5080,
0466 chip_604x,
0467 chip_608x,
0468 chip_6042,
0469 chip_7042,
0470 chip_soc,
0471 };
0472
0473
0474 struct mv_crqb {
0475 __le32 sg_addr;
0476 __le32 sg_addr_hi;
0477 __le16 ctrl_flags;
0478 __le16 ata_cmd[11];
0479 };
0480
0481 struct mv_crqb_iie {
0482 __le32 addr;
0483 __le32 addr_hi;
0484 __le32 flags;
0485 __le32 len;
0486 __le32 ata_cmd[4];
0487 };
0488
0489
0490 struct mv_crpb {
0491 __le16 id;
0492 __le16 flags;
0493 __le32 tmstmp;
0494 };
0495
0496
0497 struct mv_sg {
0498 __le32 addr;
0499 __le32 flags_size;
0500 __le32 addr_hi;
0501 __le32 reserved;
0502 };
0503
0504
0505
0506
0507
0508
0509 struct mv_cached_regs {
0510 u32 fiscfg;
0511 u32 ltmode;
0512 u32 haltcond;
0513 u32 unknown_rsvd;
0514 };
0515
0516 struct mv_port_priv {
0517 struct mv_crqb *crqb;
0518 dma_addr_t crqb_dma;
0519 struct mv_crpb *crpb;
0520 dma_addr_t crpb_dma;
0521 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
0522 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
0523
0524 unsigned int req_idx;
0525 unsigned int resp_idx;
0526
0527 u32 pp_flags;
0528 struct mv_cached_regs cached;
0529 unsigned int delayed_eh_pmp_map;
0530 };
0531
0532 struct mv_port_signal {
0533 u32 amps;
0534 u32 pre;
0535 };
0536
0537 struct mv_host_priv {
0538 u32 hp_flags;
0539 unsigned int board_idx;
0540 u32 main_irq_mask;
0541 struct mv_port_signal signal[8];
0542 const struct mv_hw_ops *ops;
0543 int n_ports;
0544 void __iomem *base;
0545 void __iomem *main_irq_cause_addr;
0546 void __iomem *main_irq_mask_addr;
0547 u32 irq_cause_offset;
0548 u32 irq_mask_offset;
0549 u32 unmask_all_irqs;
0550
0551
0552
0553
0554
0555
0556
0557
0558 struct clk *clk;
0559 struct clk **port_clks;
0560
0561
0562
0563
0564
0565 struct phy **port_phys;
0566
0567
0568
0569
0570
0571 struct dma_pool *crqb_pool;
0572 struct dma_pool *crpb_pool;
0573 struct dma_pool *sg_tbl_pool;
0574 };
0575
0576 struct mv_hw_ops {
0577 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
0578 unsigned int port);
0579 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
0580 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
0581 void __iomem *mmio);
0582 int (*reset_hc)(struct ata_host *host, void __iomem *mmio,
0583 unsigned int n_hc);
0584 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
0585 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
0586 };
0587
0588 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
0589 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
0590 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
0591 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
0592 static int mv_port_start(struct ata_port *ap);
0593 static void mv_port_stop(struct ata_port *ap);
0594 static int mv_qc_defer(struct ata_queued_cmd *qc);
0595 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
0596 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
0597 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
0598 static int mv_hardreset(struct ata_link *link, unsigned int *class,
0599 unsigned long deadline);
0600 static void mv_eh_freeze(struct ata_port *ap);
0601 static void mv_eh_thaw(struct ata_port *ap);
0602 static void mv6_dev_config(struct ata_device *dev);
0603
0604 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
0605 unsigned int port);
0606 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
0607 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
0608 void __iomem *mmio);
0609 static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
0610 unsigned int n_hc);
0611 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
0612 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
0613
0614 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
0615 unsigned int port);
0616 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
0617 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
0618 void __iomem *mmio);
0619 static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
0620 unsigned int n_hc);
0621 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
0622 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
0623 void __iomem *mmio);
0624 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
0625 void __iomem *mmio);
0626 static int mv_soc_reset_hc(struct ata_host *host,
0627 void __iomem *mmio, unsigned int n_hc);
0628 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
0629 void __iomem *mmio);
0630 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
0631 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
0632 void __iomem *mmio, unsigned int port);
0633 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
0634 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
0635 unsigned int port_no);
0636 static int mv_stop_edma(struct ata_port *ap);
0637 static int mv_stop_edma_engine(void __iomem *port_mmio);
0638 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
0639
0640 static void mv_pmp_select(struct ata_port *ap, int pmp);
0641 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
0642 unsigned long deadline);
0643 static int mv_softreset(struct ata_link *link, unsigned int *class,
0644 unsigned long deadline);
0645 static void mv_pmp_error_handler(struct ata_port *ap);
0646 static void mv_process_crpb_entries(struct ata_port *ap,
0647 struct mv_port_priv *pp);
0648
0649 static void mv_sff_irq_clear(struct ata_port *ap);
0650 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
0651 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
0652 static void mv_bmdma_start(struct ata_queued_cmd *qc);
0653 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
0654 static u8 mv_bmdma_status(struct ata_port *ap);
0655 static u8 mv_sff_check_status(struct ata_port *ap);
0656
0657
0658
0659
0660
0661 #ifdef CONFIG_PCI
0662 static struct scsi_host_template mv5_sht = {
0663 ATA_BASE_SHT(DRV_NAME),
0664 .sg_tablesize = MV_MAX_SG_CT / 2,
0665 .dma_boundary = MV_DMA_BOUNDARY,
0666 };
0667 #endif
0668 static struct scsi_host_template mv6_sht = {
0669 __ATA_BASE_SHT(DRV_NAME),
0670 .can_queue = MV_MAX_Q_DEPTH - 1,
0671 .sg_tablesize = MV_MAX_SG_CT / 2,
0672 .dma_boundary = MV_DMA_BOUNDARY,
0673 .sdev_groups = ata_ncq_sdev_groups,
0674 .change_queue_depth = ata_scsi_change_queue_depth,
0675 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
0676 .slave_configure = ata_scsi_slave_config
0677 };
0678
0679 static struct ata_port_operations mv5_ops = {
0680 .inherits = &ata_sff_port_ops,
0681
0682 .lost_interrupt = ATA_OP_NULL,
0683
0684 .qc_defer = mv_qc_defer,
0685 .qc_prep = mv_qc_prep,
0686 .qc_issue = mv_qc_issue,
0687
0688 .freeze = mv_eh_freeze,
0689 .thaw = mv_eh_thaw,
0690 .hardreset = mv_hardreset,
0691
0692 .scr_read = mv5_scr_read,
0693 .scr_write = mv5_scr_write,
0694
0695 .port_start = mv_port_start,
0696 .port_stop = mv_port_stop,
0697 };
0698
0699 static struct ata_port_operations mv6_ops = {
0700 .inherits = &ata_bmdma_port_ops,
0701
0702 .lost_interrupt = ATA_OP_NULL,
0703
0704 .qc_defer = mv_qc_defer,
0705 .qc_prep = mv_qc_prep,
0706 .qc_issue = mv_qc_issue,
0707
0708 .dev_config = mv6_dev_config,
0709
0710 .freeze = mv_eh_freeze,
0711 .thaw = mv_eh_thaw,
0712 .hardreset = mv_hardreset,
0713 .softreset = mv_softreset,
0714 .pmp_hardreset = mv_pmp_hardreset,
0715 .pmp_softreset = mv_softreset,
0716 .error_handler = mv_pmp_error_handler,
0717
0718 .scr_read = mv_scr_read,
0719 .scr_write = mv_scr_write,
0720
0721 .sff_check_status = mv_sff_check_status,
0722 .sff_irq_clear = mv_sff_irq_clear,
0723 .check_atapi_dma = mv_check_atapi_dma,
0724 .bmdma_setup = mv_bmdma_setup,
0725 .bmdma_start = mv_bmdma_start,
0726 .bmdma_stop = mv_bmdma_stop,
0727 .bmdma_status = mv_bmdma_status,
0728
0729 .port_start = mv_port_start,
0730 .port_stop = mv_port_stop,
0731 };
0732
0733 static struct ata_port_operations mv_iie_ops = {
0734 .inherits = &mv6_ops,
0735 .dev_config = ATA_OP_NULL,
0736 .qc_prep = mv_qc_prep_iie,
0737 };
0738
0739 static const struct ata_port_info mv_port_info[] = {
0740 {
0741 .flags = MV_GEN_I_FLAGS,
0742 .pio_mask = ATA_PIO4,
0743 .udma_mask = ATA_UDMA6,
0744 .port_ops = &mv5_ops,
0745 },
0746 {
0747 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
0748 .pio_mask = ATA_PIO4,
0749 .udma_mask = ATA_UDMA6,
0750 .port_ops = &mv5_ops,
0751 },
0752 {
0753 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
0754 .pio_mask = ATA_PIO4,
0755 .udma_mask = ATA_UDMA6,
0756 .port_ops = &mv5_ops,
0757 },
0758 {
0759 .flags = MV_GEN_II_FLAGS,
0760 .pio_mask = ATA_PIO4,
0761 .udma_mask = ATA_UDMA6,
0762 .port_ops = &mv6_ops,
0763 },
0764 {
0765 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
0766 .pio_mask = ATA_PIO4,
0767 .udma_mask = ATA_UDMA6,
0768 .port_ops = &mv6_ops,
0769 },
0770 {
0771 .flags = MV_GEN_IIE_FLAGS,
0772 .pio_mask = ATA_PIO4,
0773 .udma_mask = ATA_UDMA6,
0774 .port_ops = &mv_iie_ops,
0775 },
0776 {
0777 .flags = MV_GEN_IIE_FLAGS,
0778 .pio_mask = ATA_PIO4,
0779 .udma_mask = ATA_UDMA6,
0780 .port_ops = &mv_iie_ops,
0781 },
0782 {
0783 .flags = MV_GEN_IIE_FLAGS,
0784 .pio_mask = ATA_PIO4,
0785 .udma_mask = ATA_UDMA6,
0786 .port_ops = &mv_iie_ops,
0787 },
0788 };
0789
0790 static const struct pci_device_id mv_pci_tbl[] = {
0791 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
0792 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
0793 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
0794 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
0795
0796 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
0797 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
0798 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
0799
0800 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
0801 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
0802 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
0803 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
0804 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
0805
0806 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
0807
0808
0809 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
0810
0811
0812 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
0813
0814
0815 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
0816 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
0817
0818 { }
0819 };
0820
0821 static const struct mv_hw_ops mv5xxx_ops = {
0822 .phy_errata = mv5_phy_errata,
0823 .enable_leds = mv5_enable_leds,
0824 .read_preamp = mv5_read_preamp,
0825 .reset_hc = mv5_reset_hc,
0826 .reset_flash = mv5_reset_flash,
0827 .reset_bus = mv5_reset_bus,
0828 };
0829
0830 static const struct mv_hw_ops mv6xxx_ops = {
0831 .phy_errata = mv6_phy_errata,
0832 .enable_leds = mv6_enable_leds,
0833 .read_preamp = mv6_read_preamp,
0834 .reset_hc = mv6_reset_hc,
0835 .reset_flash = mv6_reset_flash,
0836 .reset_bus = mv_reset_pci_bus,
0837 };
0838
0839 static const struct mv_hw_ops mv_soc_ops = {
0840 .phy_errata = mv6_phy_errata,
0841 .enable_leds = mv_soc_enable_leds,
0842 .read_preamp = mv_soc_read_preamp,
0843 .reset_hc = mv_soc_reset_hc,
0844 .reset_flash = mv_soc_reset_flash,
0845 .reset_bus = mv_soc_reset_bus,
0846 };
0847
0848 static const struct mv_hw_ops mv_soc_65n_ops = {
0849 .phy_errata = mv_soc_65n_phy_errata,
0850 .enable_leds = mv_soc_enable_leds,
0851 .reset_hc = mv_soc_reset_hc,
0852 .reset_flash = mv_soc_reset_flash,
0853 .reset_bus = mv_soc_reset_bus,
0854 };
0855
0856
0857
0858
0859
0860 static inline void writelfl(unsigned long data, void __iomem *addr)
0861 {
0862 writel(data, addr);
0863 (void) readl(addr);
0864 }
0865
0866 static inline unsigned int mv_hc_from_port(unsigned int port)
0867 {
0868 return port >> MV_PORT_HC_SHIFT;
0869 }
0870
0871 static inline unsigned int mv_hardport_from_port(unsigned int port)
0872 {
0873 return port & MV_PORT_MASK;
0874 }
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
0888 { \
0889 shift = mv_hc_from_port(port) * HC_SHIFT; \
0890 hardport = mv_hardport_from_port(port); \
0891 shift += hardport * 2; \
0892 }
0893
0894 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
0895 {
0896 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
0897 }
0898
0899 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
0900 unsigned int port)
0901 {
0902 return mv_hc_base(base, mv_hc_from_port(port));
0903 }
0904
0905 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
0906 {
0907 return mv_hc_base_from_port(base, port) +
0908 MV_SATAHC_ARBTR_REG_SZ +
0909 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
0910 }
0911
0912 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
0913 {
0914 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
0915 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
0916
0917 return hc_mmio + ofs;
0918 }
0919
0920 static inline void __iomem *mv_host_base(struct ata_host *host)
0921 {
0922 struct mv_host_priv *hpriv = host->private_data;
0923 return hpriv->base;
0924 }
0925
0926 static inline void __iomem *mv_ap_base(struct ata_port *ap)
0927 {
0928 return mv_port_base(mv_host_base(ap->host), ap->port_no);
0929 }
0930
0931 static inline int mv_get_hc_count(unsigned long port_flags)
0932 {
0933 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
0934 }
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946 static void mv_save_cached_regs(struct ata_port *ap)
0947 {
0948 void __iomem *port_mmio = mv_ap_base(ap);
0949 struct mv_port_priv *pp = ap->private_data;
0950
0951 pp->cached.fiscfg = readl(port_mmio + FISCFG);
0952 pp->cached.ltmode = readl(port_mmio + LTMODE);
0953 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
0954 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
0955 }
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
0967 {
0968 if (new != *old) {
0969 unsigned long laddr;
0970 *old = new;
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980 laddr = (unsigned long)addr & 0xffff;
0981 if (laddr >= 0x300 && laddr <= 0x33c) {
0982 laddr &= 0x000f;
0983 if (laddr == 0x4 || laddr == 0xc) {
0984 writelfl(new, addr);
0985 return;
0986 }
0987 }
0988 writel(new, addr);
0989 }
0990 }
0991
0992 static void mv_set_edma_ptrs(void __iomem *port_mmio,
0993 struct mv_host_priv *hpriv,
0994 struct mv_port_priv *pp)
0995 {
0996 u32 index;
0997
0998
0999
1000
1001 pp->req_idx &= MV_MAX_Q_DEPTH_MASK;
1002 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1003
1004 WARN_ON(pp->crqb_dma & 0x3ff);
1005 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1006 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1007 port_mmio + EDMA_REQ_Q_IN_PTR);
1008 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1009
1010
1011
1012
1013 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;
1014 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1015
1016 WARN_ON(pp->crpb_dma & 0xff);
1017 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1018 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1019 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1020 port_mmio + EDMA_RSP_Q_OUT_PTR);
1021 }
1022
1023 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1024 {
1025
1026
1027
1028
1029
1030
1031
1032
1033 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1034 mask &= ~DONE_IRQ_0_3;
1035 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1036 mask &= ~DONE_IRQ_4_7;
1037 writelfl(mask, hpriv->main_irq_mask_addr);
1038 }
1039
1040 static void mv_set_main_irq_mask(struct ata_host *host,
1041 u32 disable_bits, u32 enable_bits)
1042 {
1043 struct mv_host_priv *hpriv = host->private_data;
1044 u32 old_mask, new_mask;
1045
1046 old_mask = hpriv->main_irq_mask;
1047 new_mask = (old_mask & ~disable_bits) | enable_bits;
1048 if (new_mask != old_mask) {
1049 hpriv->main_irq_mask = new_mask;
1050 mv_write_main_irq_mask(new_mask, hpriv);
1051 }
1052 }
1053
1054 static void mv_enable_port_irqs(struct ata_port *ap,
1055 unsigned int port_bits)
1056 {
1057 unsigned int shift, hardport, port = ap->port_no;
1058 u32 disable_bits, enable_bits;
1059
1060 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1061
1062 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1063 enable_bits = port_bits << shift;
1064 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1065 }
1066
1067 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1068 void __iomem *port_mmio,
1069 unsigned int port_irqs)
1070 {
1071 struct mv_host_priv *hpriv = ap->host->private_data;
1072 int hardport = mv_hardport_from_port(ap->port_no);
1073 void __iomem *hc_mmio = mv_hc_base_from_port(
1074 mv_host_base(ap->host), ap->port_no);
1075 u32 hc_irq_cause;
1076
1077
1078 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1079
1080
1081 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1082 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1083
1084
1085 if (IS_GEN_IIE(hpriv))
1086 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1087
1088 mv_enable_port_irqs(ap, port_irqs);
1089 }
1090
1091 static void mv_set_irq_coalescing(struct ata_host *host,
1092 unsigned int count, unsigned int usecs)
1093 {
1094 struct mv_host_priv *hpriv = host->private_data;
1095 void __iomem *mmio = hpriv->base, *hc_mmio;
1096 u32 coal_enable = 0;
1097 unsigned long flags;
1098 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1099 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1100 ALL_PORTS_COAL_DONE;
1101
1102
1103 if (!usecs || !count) {
1104 clks = count = 0;
1105 } else {
1106
1107 clks = usecs * COAL_CLOCKS_PER_USEC;
1108 if (clks > MAX_COAL_TIME_THRESHOLD)
1109 clks = MAX_COAL_TIME_THRESHOLD;
1110 if (count > MAX_COAL_IO_COUNT)
1111 count = MAX_COAL_IO_COUNT;
1112 }
1113
1114 spin_lock_irqsave(&host->lock, flags);
1115 mv_set_main_irq_mask(host, coal_disable, 0);
1116
1117 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1118
1119
1120
1121
1122 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1123 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1124
1125 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1126 if (count)
1127 coal_enable = ALL_PORTS_COAL_DONE;
1128 clks = count = 0;
1129 }
1130
1131
1132
1133
1134 hc_mmio = mv_hc_base_from_port(mmio, 0);
1135 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1136 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1137 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1138 if (count)
1139 coal_enable |= PORTS_0_3_COAL_DONE;
1140 if (is_dual_hc) {
1141 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1142 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1143 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1144 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1145 if (count)
1146 coal_enable |= PORTS_4_7_COAL_DONE;
1147 }
1148
1149 mv_set_main_irq_mask(host, 0, coal_enable);
1150 spin_unlock_irqrestore(&host->lock, flags);
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1164 struct mv_port_priv *pp, u8 protocol)
1165 {
1166 int want_ncq = (protocol == ATA_PROT_NCQ);
1167
1168 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1169 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1170 if (want_ncq != using_ncq)
1171 mv_stop_edma(ap);
1172 }
1173 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1174 struct mv_host_priv *hpriv = ap->host->private_data;
1175
1176 mv_edma_cfg(ap, want_ncq, 1);
1177
1178 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1179 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1180
1181 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1182 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1183 }
1184 }
1185
1186 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1187 {
1188 void __iomem *port_mmio = mv_ap_base(ap);
1189 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1190 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1191 int i;
1192
1193
1194
1195
1196
1197
1198
1199
1200 for (i = 0; i < timeout; ++i) {
1201 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1202 if ((edma_stat & empty_idle) == empty_idle)
1203 break;
1204 udelay(per_loop);
1205 }
1206
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216 static int mv_stop_edma_engine(void __iomem *port_mmio)
1217 {
1218 int i;
1219
1220
1221 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1222
1223
1224 for (i = 10000; i > 0; i--) {
1225 u32 reg = readl(port_mmio + EDMA_CMD);
1226 if (!(reg & EDMA_EN))
1227 return 0;
1228 udelay(10);
1229 }
1230 return -EIO;
1231 }
1232
1233 static int mv_stop_edma(struct ata_port *ap)
1234 {
1235 void __iomem *port_mmio = mv_ap_base(ap);
1236 struct mv_port_priv *pp = ap->private_data;
1237 int err = 0;
1238
1239 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1240 return 0;
1241 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1242 mv_wait_for_edma_empty_idle(ap);
1243 if (mv_stop_edma_engine(port_mmio)) {
1244 ata_port_err(ap, "Unable to stop eDMA\n");
1245 err = -EIO;
1246 }
1247 mv_edma_cfg(ap, 0, 0);
1248 return err;
1249 }
1250
1251 static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
1252 {
1253 int b, w, o;
1254 unsigned char linebuf[38];
1255
1256 for (b = 0; b < bytes; ) {
1257 for (w = 0, o = 0; b < bytes && w < 4; w++) {
1258 o += snprintf(linebuf + o, sizeof(linebuf) - o,
1259 "%08x ", readl(start + b));
1260 b += sizeof(u32);
1261 }
1262 dev_dbg(dev, "%s: %p: %s\n",
1263 __func__, start + b, linebuf);
1264 }
1265 }
1266
1267 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1268 {
1269 int b, w, o;
1270 u32 dw = 0;
1271 unsigned char linebuf[38];
1272
1273 for (b = 0; b < bytes; ) {
1274 for (w = 0, o = 0; b < bytes && w < 4; w++) {
1275 (void) pci_read_config_dword(pdev, b, &dw);
1276 o += snprintf(linebuf + o, sizeof(linebuf) - o,
1277 "%08x ", dw);
1278 b += sizeof(u32);
1279 }
1280 dev_dbg(&pdev->dev, "%s: %02x: %s\n",
1281 __func__, b, linebuf);
1282 }
1283 }
1284
1285 static void mv_dump_all_regs(void __iomem *mmio_base,
1286 struct pci_dev *pdev)
1287 {
1288 void __iomem *hc_base;
1289 void __iomem *port_base;
1290 int start_port, num_ports, p, start_hc, num_hcs, hc;
1291
1292 start_hc = start_port = 0;
1293 num_ports = 8;
1294 num_hcs = 2;
1295 dev_dbg(&pdev->dev,
1296 "%s: All registers for port(s) %u-%u:\n", __func__,
1297 start_port, num_ports > 1 ? num_ports - 1 : start_port);
1298
1299 dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__);
1300 mv_dump_pci_cfg(pdev, 0x68);
1301
1302 dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__);
1303 mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c);
1304 mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34);
1305 mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4);
1306 mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c);
1307 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1308 hc_base = mv_hc_base(mmio_base, hc);
1309 dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc);
1310 mv_dump_mem(&pdev->dev, hc_base, 0x1c);
1311 }
1312 for (p = start_port; p < start_port + num_ports; p++) {
1313 port_base = mv_port_base(mmio_base, p);
1314 dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p);
1315 mv_dump_mem(&pdev->dev, port_base, 0x54);
1316 dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p);
1317 mv_dump_mem(&pdev->dev, port_base+0x300, 0x60);
1318 }
1319 }
1320
1321 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1322 {
1323 unsigned int ofs;
1324
1325 switch (sc_reg_in) {
1326 case SCR_STATUS:
1327 case SCR_CONTROL:
1328 case SCR_ERROR:
1329 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1330 break;
1331 case SCR_ACTIVE:
1332 ofs = SATA_ACTIVE;
1333 break;
1334 default:
1335 ofs = 0xffffffffU;
1336 break;
1337 }
1338 return ofs;
1339 }
1340
1341 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1342 {
1343 unsigned int ofs = mv_scr_offset(sc_reg_in);
1344
1345 if (ofs != 0xffffffffU) {
1346 *val = readl(mv_ap_base(link->ap) + ofs);
1347 return 0;
1348 } else
1349 return -EINVAL;
1350 }
1351
1352 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1353 {
1354 unsigned int ofs = mv_scr_offset(sc_reg_in);
1355
1356 if (ofs != 0xffffffffU) {
1357 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1358 struct mv_host_priv *hpriv = link->ap->host->private_data;
1359 if (sc_reg_in == SCR_CONTROL) {
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1374 val |= 0xf000;
1375
1376 if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1377 void __iomem *lp_phy_addr =
1378 mv_ap_base(link->ap) + LP_PHY_CTL;
1379
1380
1381
1382 u32 lp_phy_val =
1383 LP_PHY_CTL_PIN_PU_PLL |
1384 LP_PHY_CTL_PIN_PU_RX |
1385 LP_PHY_CTL_PIN_PU_TX;
1386
1387 if ((val & 0xf0) != 0x10)
1388 lp_phy_val |=
1389 LP_PHY_CTL_GEN_TX_3G |
1390 LP_PHY_CTL_GEN_RX_3G;
1391
1392 writelfl(lp_phy_val, lp_phy_addr);
1393 }
1394 }
1395 writelfl(val, addr);
1396 return 0;
1397 } else
1398 return -EINVAL;
1399 }
1400
1401 static void mv6_dev_config(struct ata_device *adev)
1402 {
1403
1404
1405
1406
1407
1408
1409 if (adev->flags & ATA_DFLAG_NCQ) {
1410 if (sata_pmp_attached(adev->link->ap)) {
1411 adev->flags &= ~ATA_DFLAG_NCQ;
1412 ata_dev_info(adev,
1413 "NCQ disabled for command-based switching\n");
1414 }
1415 }
1416 }
1417
1418 static int mv_qc_defer(struct ata_queued_cmd *qc)
1419 {
1420 struct ata_link *link = qc->dev->link;
1421 struct ata_port *ap = link->ap;
1422 struct mv_port_priv *pp = ap->private_data;
1423
1424
1425
1426
1427
1428 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1429 return ATA_DEFER_PORT;
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 if (unlikely(ap->excl_link)) {
1440 if (link == ap->excl_link) {
1441 if (ap->nr_active_links)
1442 return ATA_DEFER_PORT;
1443 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1444 return 0;
1445 } else
1446 return ATA_DEFER_PORT;
1447 }
1448
1449
1450
1451
1452 if (ap->nr_active_links == 0)
1453 return 0;
1454
1455
1456
1457
1458
1459
1460
1461 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1462 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1463 if (ata_is_ncq(qc->tf.protocol))
1464 return 0;
1465 else {
1466 ap->excl_link = link;
1467 return ATA_DEFER_PORT;
1468 }
1469 }
1470
1471 return ATA_DEFER_PORT;
1472 }
1473
1474 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1475 {
1476 struct mv_port_priv *pp = ap->private_data;
1477 void __iomem *port_mmio;
1478
1479 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1480 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1481 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1482
1483 ltmode = *old_ltmode & ~LTMODE_BIT8;
1484 haltcond = *old_haltcond | EDMA_ERR_DEV;
1485
1486 if (want_fbs) {
1487 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1488 ltmode = *old_ltmode | LTMODE_BIT8;
1489 if (want_ncq)
1490 haltcond &= ~EDMA_ERR_DEV;
1491 else
1492 fiscfg |= FISCFG_WAIT_DEV_ERR;
1493 } else {
1494 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1495 }
1496
1497 port_mmio = mv_ap_base(ap);
1498 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1499 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1500 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1501 }
1502
1503 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1504 {
1505 struct mv_host_priv *hpriv = ap->host->private_data;
1506 u32 old, new;
1507
1508
1509 old = readl(hpriv->base + GPIO_PORT_CTL);
1510 if (want_ncq)
1511 new = old | (1 << 22);
1512 else
1513 new = old & ~(1 << 22);
1514 if (new != old)
1515 writel(new, hpriv->base + GPIO_PORT_CTL);
1516 }
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1531 {
1532 struct mv_port_priv *pp = ap->private_data;
1533 u32 new, *old = &pp->cached.unknown_rsvd;
1534
1535 if (enable_bmdma)
1536 new = *old | 1;
1537 else
1538 new = *old & ~1;
1539 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1540 }
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 static void mv_soc_led_blink_enable(struct ata_port *ap)
1557 {
1558 struct ata_host *host = ap->host;
1559 struct mv_host_priv *hpriv = host->private_data;
1560 void __iomem *hc_mmio;
1561 u32 led_ctrl;
1562
1563 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1564 return;
1565 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1566 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1567 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1568 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1569 }
1570
1571 static void mv_soc_led_blink_disable(struct ata_port *ap)
1572 {
1573 struct ata_host *host = ap->host;
1574 struct mv_host_priv *hpriv = host->private_data;
1575 void __iomem *hc_mmio;
1576 u32 led_ctrl;
1577 unsigned int port;
1578
1579 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1580 return;
1581
1582
1583 for (port = 0; port < hpriv->n_ports; port++) {
1584 struct ata_port *this_ap = host->ports[port];
1585 struct mv_port_priv *pp = this_ap->private_data;
1586
1587 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1588 return;
1589 }
1590
1591 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1592 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1593 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1594 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1595 }
1596
1597 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1598 {
1599 u32 cfg;
1600 struct mv_port_priv *pp = ap->private_data;
1601 struct mv_host_priv *hpriv = ap->host->private_data;
1602 void __iomem *port_mmio = mv_ap_base(ap);
1603
1604
1605 cfg = EDMA_CFG_Q_DEPTH;
1606 pp->pp_flags &=
1607 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1608
1609 if (IS_GEN_I(hpriv))
1610 cfg |= (1 << 8);
1611
1612 else if (IS_GEN_II(hpriv)) {
1613 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1614 mv_60x1_errata_sata25(ap, want_ncq);
1615
1616 } else if (IS_GEN_IIE(hpriv)) {
1617 int want_fbs = sata_pmp_attached(ap);
1618
1619
1620
1621
1622
1623
1624
1625
1626 want_fbs &= want_ncq;
1627
1628 mv_config_fbs(ap, want_ncq, want_fbs);
1629
1630 if (want_fbs) {
1631 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1632 cfg |= EDMA_CFG_EDMA_FBS;
1633 }
1634
1635 cfg |= (1 << 23);
1636 if (want_edma) {
1637 cfg |= (1 << 22);
1638 if (!IS_SOC(hpriv))
1639 cfg |= (1 << 18);
1640 }
1641 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1642 cfg |= (1 << 17);
1643 mv_bmdma_enable_iie(ap, !want_edma);
1644
1645 if (IS_SOC(hpriv)) {
1646 if (want_ncq)
1647 mv_soc_led_blink_enable(ap);
1648 else
1649 mv_soc_led_blink_disable(ap);
1650 }
1651 }
1652
1653 if (want_ncq) {
1654 cfg |= EDMA_CFG_NCQ;
1655 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1656 }
1657
1658 writelfl(cfg, port_mmio + EDMA_CFG);
1659 }
1660
1661 static void mv_port_free_dma_mem(struct ata_port *ap)
1662 {
1663 struct mv_host_priv *hpriv = ap->host->private_data;
1664 struct mv_port_priv *pp = ap->private_data;
1665 int tag;
1666
1667 if (pp->crqb) {
1668 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1669 pp->crqb = NULL;
1670 }
1671 if (pp->crpb) {
1672 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1673 pp->crpb = NULL;
1674 }
1675
1676
1677
1678
1679 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1680 if (pp->sg_tbl[tag]) {
1681 if (tag == 0 || !IS_GEN_I(hpriv))
1682 dma_pool_free(hpriv->sg_tbl_pool,
1683 pp->sg_tbl[tag],
1684 pp->sg_tbl_dma[tag]);
1685 pp->sg_tbl[tag] = NULL;
1686 }
1687 }
1688 }
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 static int mv_port_start(struct ata_port *ap)
1701 {
1702 struct device *dev = ap->host->dev;
1703 struct mv_host_priv *hpriv = ap->host->private_data;
1704 struct mv_port_priv *pp;
1705 unsigned long flags;
1706 int tag;
1707
1708 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1709 if (!pp)
1710 return -ENOMEM;
1711 ap->private_data = pp;
1712
1713 pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1714 if (!pp->crqb)
1715 return -ENOMEM;
1716
1717 pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1718 if (!pp->crpb)
1719 goto out_port_free_dma_mem;
1720
1721
1722 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1723 ap->flags |= ATA_FLAG_AN;
1724
1725
1726
1727
1728 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1729 if (tag == 0 || !IS_GEN_I(hpriv)) {
1730 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1731 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1732 if (!pp->sg_tbl[tag])
1733 goto out_port_free_dma_mem;
1734 } else {
1735 pp->sg_tbl[tag] = pp->sg_tbl[0];
1736 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1737 }
1738 }
1739
1740 spin_lock_irqsave(ap->lock, flags);
1741 mv_save_cached_regs(ap);
1742 mv_edma_cfg(ap, 0, 0);
1743 spin_unlock_irqrestore(ap->lock, flags);
1744
1745 return 0;
1746
1747 out_port_free_dma_mem:
1748 mv_port_free_dma_mem(ap);
1749 return -ENOMEM;
1750 }
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761 static void mv_port_stop(struct ata_port *ap)
1762 {
1763 unsigned long flags;
1764
1765 spin_lock_irqsave(ap->lock, flags);
1766 mv_stop_edma(ap);
1767 mv_enable_port_irqs(ap, 0);
1768 spin_unlock_irqrestore(ap->lock, flags);
1769 mv_port_free_dma_mem(ap);
1770 }
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781 static void mv_fill_sg(struct ata_queued_cmd *qc)
1782 {
1783 struct mv_port_priv *pp = qc->ap->private_data;
1784 struct scatterlist *sg;
1785 struct mv_sg *mv_sg, *last_sg = NULL;
1786 unsigned int si;
1787
1788 mv_sg = pp->sg_tbl[qc->hw_tag];
1789 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1790 dma_addr_t addr = sg_dma_address(sg);
1791 u32 sg_len = sg_dma_len(sg);
1792
1793 while (sg_len) {
1794 u32 offset = addr & 0xffff;
1795 u32 len = sg_len;
1796
1797 if (offset + len > 0x10000)
1798 len = 0x10000 - offset;
1799
1800 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1801 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1802 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1803 mv_sg->reserved = 0;
1804
1805 sg_len -= len;
1806 addr += len;
1807
1808 last_sg = mv_sg;
1809 mv_sg++;
1810 }
1811 }
1812
1813 if (likely(last_sg))
1814 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1815 mb();
1816 }
1817
1818 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1819 {
1820 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1821 (last ? CRQB_CMD_LAST : 0);
1822 *cmdw = cpu_to_le16(tmp);
1823 }
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833 static void mv_sff_irq_clear(struct ata_port *ap)
1834 {
1835 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1850 {
1851 struct scsi_cmnd *scmd = qc->scsicmd;
1852
1853 if (scmd) {
1854 switch (scmd->cmnd[0]) {
1855 case READ_6:
1856 case READ_10:
1857 case READ_12:
1858 case WRITE_6:
1859 case WRITE_10:
1860 case WRITE_12:
1861 case GPCMD_READ_CD:
1862 case GPCMD_SEND_DVD_STRUCTURE:
1863 case GPCMD_SEND_CUE_SHEET:
1864 return 0;
1865 }
1866 }
1867 return -EOPNOTSUPP;
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1878 {
1879 struct ata_port *ap = qc->ap;
1880 void __iomem *port_mmio = mv_ap_base(ap);
1881 struct mv_port_priv *pp = ap->private_data;
1882
1883 mv_fill_sg(qc);
1884
1885
1886 writel(0, port_mmio + BMDMA_CMD);
1887
1888
1889 writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
1890 port_mmio + BMDMA_PRD_HIGH);
1891 writelfl(pp->sg_tbl_dma[qc->hw_tag],
1892 port_mmio + BMDMA_PRD_LOW);
1893
1894
1895 ap->ops->sff_exec_command(ap, &qc->tf);
1896 }
1897
1898
1899
1900
1901
1902
1903
1904
1905 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1906 {
1907 struct ata_port *ap = qc->ap;
1908 void __iomem *port_mmio = mv_ap_base(ap);
1909 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1910 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1911
1912
1913 writelfl(cmd, port_mmio + BMDMA_CMD);
1914 }
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925 static void mv_bmdma_stop_ap(struct ata_port *ap)
1926 {
1927 void __iomem *port_mmio = mv_ap_base(ap);
1928 u32 cmd;
1929
1930
1931 cmd = readl(port_mmio + BMDMA_CMD);
1932 if (cmd & ATA_DMA_START) {
1933 cmd &= ~ATA_DMA_START;
1934 writelfl(cmd, port_mmio + BMDMA_CMD);
1935
1936
1937 ata_sff_dma_pause(ap);
1938 }
1939 }
1940
1941 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1942 {
1943 mv_bmdma_stop_ap(qc->ap);
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955 static u8 mv_bmdma_status(struct ata_port *ap)
1956 {
1957 void __iomem *port_mmio = mv_ap_base(ap);
1958 u32 reg, status;
1959
1960
1961
1962
1963
1964 reg = readl(port_mmio + BMDMA_STATUS);
1965 if (reg & ATA_DMA_ACTIVE)
1966 status = ATA_DMA_ACTIVE;
1967 else if (reg & ATA_DMA_ERR)
1968 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1969 else {
1970
1971
1972
1973
1974
1975
1976 mv_bmdma_stop_ap(ap);
1977 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1978 status = 0;
1979 else
1980 status = ATA_DMA_INTR;
1981 }
1982 return status;
1983 }
1984
1985 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1986 {
1987 struct ata_taskfile *tf = &qc->tf;
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
2002 if (qc->dev->multi_count > 7) {
2003 switch (tf->command) {
2004 case ATA_CMD_WRITE_MULTI:
2005 tf->command = ATA_CMD_PIO_WRITE;
2006 break;
2007 case ATA_CMD_WRITE_MULTI_FUA_EXT:
2008 tf->flags &= ~ATA_TFLAG_FUA;
2009 fallthrough;
2010 case ATA_CMD_WRITE_MULTI_EXT:
2011 tf->command = ATA_CMD_PIO_WRITE_EXT;
2012 break;
2013 }
2014 }
2015 }
2016 }
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
2031 {
2032 struct ata_port *ap = qc->ap;
2033 struct mv_port_priv *pp = ap->private_data;
2034 __le16 *cw;
2035 struct ata_taskfile *tf = &qc->tf;
2036 u16 flags = 0;
2037 unsigned in_index;
2038
2039 switch (tf->protocol) {
2040 case ATA_PROT_DMA:
2041 if (tf->command == ATA_CMD_DSM)
2042 return AC_ERR_OK;
2043 fallthrough;
2044 case ATA_PROT_NCQ:
2045 break;
2046 case ATA_PROT_PIO:
2047 mv_rw_multi_errata_sata24(qc);
2048 return AC_ERR_OK;
2049 default:
2050 return AC_ERR_OK;
2051 }
2052
2053
2054
2055 if (!(tf->flags & ATA_TFLAG_WRITE))
2056 flags |= CRQB_FLAG_READ;
2057 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2058 flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2059 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2060
2061
2062 in_index = pp->req_idx;
2063
2064 pp->crqb[in_index].sg_addr =
2065 cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2066 pp->crqb[in_index].sg_addr_hi =
2067 cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2068 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2069
2070 cw = &pp->crqb[in_index].ata_cmd[0];
2071
2072
2073
2074
2075
2076
2077
2078
2079 switch (tf->command) {
2080 case ATA_CMD_READ:
2081 case ATA_CMD_READ_EXT:
2082 case ATA_CMD_WRITE:
2083 case ATA_CMD_WRITE_EXT:
2084 case ATA_CMD_WRITE_FUA_EXT:
2085 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2086 break;
2087 case ATA_CMD_FPDMA_READ:
2088 case ATA_CMD_FPDMA_WRITE:
2089 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2090 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2091 break;
2092 default:
2093
2094
2095
2096
2097
2098 ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
2099 tf->command);
2100 return AC_ERR_INVALID;
2101 }
2102 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2103 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2104 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2105 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2106 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2107 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2108 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2109 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2110 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);
2111
2112 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2113 return AC_ERR_OK;
2114 mv_fill_sg(qc);
2115
2116 return AC_ERR_OK;
2117 }
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
2132 {
2133 struct ata_port *ap = qc->ap;
2134 struct mv_port_priv *pp = ap->private_data;
2135 struct mv_crqb_iie *crqb;
2136 struct ata_taskfile *tf = &qc->tf;
2137 unsigned in_index;
2138 u32 flags = 0;
2139
2140 if ((tf->protocol != ATA_PROT_DMA) &&
2141 (tf->protocol != ATA_PROT_NCQ))
2142 return AC_ERR_OK;
2143 if (tf->command == ATA_CMD_DSM)
2144 return AC_ERR_OK;
2145
2146
2147 if (!(tf->flags & ATA_TFLAG_WRITE))
2148 flags |= CRQB_FLAG_READ;
2149
2150 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2151 flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2152 flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
2153 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2154
2155
2156 in_index = pp->req_idx;
2157
2158 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2159 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2160 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2161 crqb->flags = cpu_to_le32(flags);
2162
2163 crqb->ata_cmd[0] = cpu_to_le32(
2164 (tf->command << 16) |
2165 (tf->feature << 24)
2166 );
2167 crqb->ata_cmd[1] = cpu_to_le32(
2168 (tf->lbal << 0) |
2169 (tf->lbam << 8) |
2170 (tf->lbah << 16) |
2171 (tf->device << 24)
2172 );
2173 crqb->ata_cmd[2] = cpu_to_le32(
2174 (tf->hob_lbal << 0) |
2175 (tf->hob_lbam << 8) |
2176 (tf->hob_lbah << 16) |
2177 (tf->hob_feature << 24)
2178 );
2179 crqb->ata_cmd[3] = cpu_to_le32(
2180 (tf->nsect << 0) |
2181 (tf->hob_nsect << 8)
2182 );
2183
2184 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2185 return AC_ERR_OK;
2186 mv_fill_sg(qc);
2187
2188 return AC_ERR_OK;
2189 }
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204 static u8 mv_sff_check_status(struct ata_port *ap)
2205 {
2206 u8 stat = ioread8(ap->ioaddr.status_addr);
2207 struct mv_port_priv *pp = ap->private_data;
2208
2209 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2210 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2211 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2212 else
2213 stat = ATA_BUSY;
2214 }
2215 return stat;
2216 }
2217
2218
2219
2220
2221
2222
2223
2224 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2225 {
2226 void __iomem *port_mmio = mv_ap_base(ap);
2227 u32 ifctl, old_ifctl, ifstat;
2228 int i, timeout = 200, final_word = nwords - 1;
2229
2230
2231 old_ifctl = readl(port_mmio + SATA_IFCTL);
2232 ifctl = 0x100 | (old_ifctl & 0xf);
2233 writelfl(ifctl, port_mmio + SATA_IFCTL);
2234
2235
2236 for (i = 0; i < final_word; ++i)
2237 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2238
2239
2240 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2241 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2242
2243
2244
2245
2246
2247 do {
2248 ifstat = readl(port_mmio + SATA_IFSTAT);
2249 } while (!(ifstat & 0x1000) && --timeout);
2250
2251
2252 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2253
2254
2255 if ((ifstat & 0x3000) != 0x1000) {
2256 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2257 __func__, ifstat);
2258 return AC_ERR_OTHER;
2259 }
2260 return 0;
2261 }
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2281 {
2282 struct ata_port *ap = qc->ap;
2283 struct mv_port_priv *pp = ap->private_data;
2284 struct ata_link *link = qc->dev->link;
2285 u32 fis[5];
2286 int err = 0;
2287
2288 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2289 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2290 if (err)
2291 return err;
2292
2293 switch (qc->tf.protocol) {
2294 case ATAPI_PROT_PIO:
2295 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2296 fallthrough;
2297 case ATAPI_PROT_NODATA:
2298 ap->hsm_task_state = HSM_ST_FIRST;
2299 break;
2300 case ATA_PROT_PIO:
2301 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2302 if (qc->tf.flags & ATA_TFLAG_WRITE)
2303 ap->hsm_task_state = HSM_ST_FIRST;
2304 else
2305 ap->hsm_task_state = HSM_ST;
2306 break;
2307 default:
2308 ap->hsm_task_state = HSM_ST_LAST;
2309 break;
2310 }
2311
2312 if (qc->tf.flags & ATA_TFLAG_POLLING)
2313 ata_sff_queue_pio_task(link, 0);
2314 return 0;
2315 }
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2330 {
2331 static int limit_warnings = 10;
2332 struct ata_port *ap = qc->ap;
2333 void __iomem *port_mmio = mv_ap_base(ap);
2334 struct mv_port_priv *pp = ap->private_data;
2335 u32 in_index;
2336 unsigned int port_irqs;
2337
2338 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2339
2340 switch (qc->tf.protocol) {
2341 case ATA_PROT_DMA:
2342 if (qc->tf.command == ATA_CMD_DSM) {
2343 if (!ap->ops->bmdma_setup)
2344 return AC_ERR_OTHER;
2345 break;
2346 }
2347 fallthrough;
2348 case ATA_PROT_NCQ:
2349 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2350 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2351 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2352
2353
2354 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2355 port_mmio + EDMA_REQ_Q_IN_PTR);
2356 return 0;
2357
2358 case ATA_PROT_PIO:
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2371 --limit_warnings;
2372 ata_link_warn(qc->dev->link, DRV_NAME
2373 ": attempting PIO w/multiple DRQ: "
2374 "this may fail due to h/w errata\n");
2375 }
2376 fallthrough;
2377 case ATA_PROT_NODATA:
2378 case ATAPI_PROT_PIO:
2379 case ATAPI_PROT_NODATA:
2380 if (ap->flags & ATA_FLAG_PIO_POLLING)
2381 qc->tf.flags |= ATA_TFLAG_POLLING;
2382 break;
2383 }
2384
2385 if (qc->tf.flags & ATA_TFLAG_POLLING)
2386 port_irqs = ERR_IRQ;
2387 else
2388 port_irqs = ERR_IRQ | DONE_IRQ;
2389
2390
2391
2392
2393
2394
2395 mv_stop_edma(ap);
2396 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2397 mv_pmp_select(ap, qc->dev->link->pmp);
2398
2399 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2400 struct mv_host_priv *hpriv = ap->host->private_data;
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412 if (IS_GEN_II(hpriv))
2413 return mv_qc_issue_fis(qc);
2414 }
2415 return ata_bmdma_qc_issue(qc);
2416 }
2417
2418 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2419 {
2420 struct mv_port_priv *pp = ap->private_data;
2421 struct ata_queued_cmd *qc;
2422
2423 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2424 return NULL;
2425 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2426 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2427 return qc;
2428 return NULL;
2429 }
2430
2431 static void mv_pmp_error_handler(struct ata_port *ap)
2432 {
2433 unsigned int pmp, pmp_map;
2434 struct mv_port_priv *pp = ap->private_data;
2435
2436 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2437
2438
2439
2440
2441
2442
2443 pmp_map = pp->delayed_eh_pmp_map;
2444 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2445 for (pmp = 0; pmp_map != 0; pmp++) {
2446 unsigned int this_pmp = (1 << pmp);
2447 if (pmp_map & this_pmp) {
2448 struct ata_link *link = &ap->pmp_link[pmp];
2449 pmp_map &= ~this_pmp;
2450 ata_eh_analyze_ncq_error(link);
2451 }
2452 }
2453 ata_port_freeze(ap);
2454 }
2455 sata_pmp_error_handler(ap);
2456 }
2457
2458 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2459 {
2460 void __iomem *port_mmio = mv_ap_base(ap);
2461
2462 return readl(port_mmio + SATA_TESTCTL) >> 16;
2463 }
2464
2465 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2466 {
2467 unsigned int pmp;
2468
2469
2470
2471
2472 for (pmp = 0; pmp_map != 0; pmp++) {
2473 unsigned int this_pmp = (1 << pmp);
2474 if (pmp_map & this_pmp) {
2475 struct ata_link *link = &ap->pmp_link[pmp];
2476 struct ata_eh_info *ehi = &link->eh_info;
2477
2478 pmp_map &= ~this_pmp;
2479 ata_ehi_clear_desc(ehi);
2480 ata_ehi_push_desc(ehi, "dev err");
2481 ehi->err_mask |= AC_ERR_DEV;
2482 ehi->action |= ATA_EH_RESET;
2483 ata_link_abort(link);
2484 }
2485 }
2486 }
2487
2488 static int mv_req_q_empty(struct ata_port *ap)
2489 {
2490 void __iomem *port_mmio = mv_ap_base(ap);
2491 u32 in_ptr, out_ptr;
2492
2493 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2494 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2495 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2496 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2497 return (in_ptr == out_ptr);
2498 }
2499
2500 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2501 {
2502 struct mv_port_priv *pp = ap->private_data;
2503 int failed_links;
2504 unsigned int old_map, new_map;
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2515 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2516 pp->delayed_eh_pmp_map = 0;
2517 }
2518 old_map = pp->delayed_eh_pmp_map;
2519 new_map = old_map | mv_get_err_pmp_map(ap);
2520
2521 if (old_map != new_map) {
2522 pp->delayed_eh_pmp_map = new_map;
2523 mv_pmp_eh_prep(ap, new_map & ~old_map);
2524 }
2525 failed_links = hweight16(new_map);
2526
2527 ata_port_info(ap,
2528 "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
2529 __func__, pp->delayed_eh_pmp_map,
2530 ap->qc_active, failed_links,
2531 ap->nr_active_links);
2532
2533 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2534 mv_process_crpb_entries(ap, pp);
2535 mv_stop_edma(ap);
2536 mv_eh_freeze(ap);
2537 ata_port_info(ap, "%s: done\n", __func__);
2538 return 1;
2539 }
2540 ata_port_info(ap, "%s: waiting\n", __func__);
2541 return 1;
2542 }
2543
2544 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2545 {
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557 return 0;
2558 }
2559
2560 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2561 {
2562 struct mv_port_priv *pp = ap->private_data;
2563
2564 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2565 return 0;
2566 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2567 return 0;
2568
2569 if (!(edma_err_cause & EDMA_ERR_DEV))
2570 return 0;
2571 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2572 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2573 return 0;
2574
2575 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2576
2577
2578
2579
2580
2581 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2582 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2583 __func__, edma_err_cause, pp->pp_flags);
2584 return 0;
2585 }
2586 return mv_handle_fbs_ncq_dev_err(ap);
2587 } else {
2588
2589
2590
2591
2592
2593 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2594 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2595 __func__, edma_err_cause, pp->pp_flags);
2596 return 0;
2597 }
2598 return mv_handle_fbs_non_ncq_dev_err(ap);
2599 }
2600 return 0;
2601 }
2602
2603 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2604 {
2605 struct ata_eh_info *ehi = &ap->link.eh_info;
2606 char *when = "idle";
2607
2608 ata_ehi_clear_desc(ehi);
2609 if (edma_was_enabled) {
2610 when = "EDMA enabled";
2611 } else {
2612 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2613 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2614 when = "polling";
2615 }
2616 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2617 ehi->err_mask |= AC_ERR_OTHER;
2618 ehi->action |= ATA_EH_RESET;
2619 ata_port_freeze(ap);
2620 }
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 static void mv_err_intr(struct ata_port *ap)
2634 {
2635 void __iomem *port_mmio = mv_ap_base(ap);
2636 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2637 u32 fis_cause = 0;
2638 struct mv_port_priv *pp = ap->private_data;
2639 struct mv_host_priv *hpriv = ap->host->private_data;
2640 unsigned int action = 0, err_mask = 0;
2641 struct ata_eh_info *ehi = &ap->link.eh_info;
2642 struct ata_queued_cmd *qc;
2643 int abort = 0;
2644
2645
2646
2647
2648
2649
2650 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2651 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2652
2653 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2654 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2655 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2656 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2657 }
2658 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2659
2660 if (edma_err_cause & EDMA_ERR_DEV) {
2661
2662
2663
2664
2665 if (mv_handle_dev_err(ap, edma_err_cause))
2666 return;
2667 }
2668
2669 qc = mv_get_active_qc(ap);
2670 ata_ehi_clear_desc(ehi);
2671 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2672 edma_err_cause, pp->pp_flags);
2673
2674 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2675 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2676 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2677 u32 ec = edma_err_cause &
2678 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2679 sata_async_notification(ap);
2680 if (!ec)
2681 return;
2682 ata_ehi_push_desc(ehi, "SDB notify");
2683 }
2684 }
2685
2686
2687
2688 if (edma_err_cause & EDMA_ERR_DEV) {
2689 err_mask |= AC_ERR_DEV;
2690 action |= ATA_EH_RESET;
2691 ata_ehi_push_desc(ehi, "dev error");
2692 }
2693 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2694 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2695 EDMA_ERR_INTRL_PAR)) {
2696 err_mask |= AC_ERR_ATA_BUS;
2697 action |= ATA_EH_RESET;
2698 ata_ehi_push_desc(ehi, "parity error");
2699 }
2700 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2701 ata_ehi_hotplugged(ehi);
2702 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2703 "dev disconnect" : "dev connect");
2704 action |= ATA_EH_RESET;
2705 }
2706
2707
2708
2709
2710
2711 if (IS_GEN_I(hpriv)) {
2712 eh_freeze_mask = EDMA_EH_FREEZE_5;
2713 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2714 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2715 ata_ehi_push_desc(ehi, "EDMA self-disable");
2716 }
2717 } else {
2718 eh_freeze_mask = EDMA_EH_FREEZE;
2719 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2720 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2721 ata_ehi_push_desc(ehi, "EDMA self-disable");
2722 }
2723 if (edma_err_cause & EDMA_ERR_SERR) {
2724 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2725 err_mask |= AC_ERR_ATA_BUS;
2726 action |= ATA_EH_RESET;
2727 }
2728 }
2729
2730 if (!err_mask) {
2731 err_mask = AC_ERR_OTHER;
2732 action |= ATA_EH_RESET;
2733 }
2734
2735 ehi->serror |= serr;
2736 ehi->action |= action;
2737
2738 if (qc)
2739 qc->err_mask |= err_mask;
2740 else
2741 ehi->err_mask |= err_mask;
2742
2743 if (err_mask == AC_ERR_DEV) {
2744
2745
2746
2747
2748
2749 mv_eh_freeze(ap);
2750 abort = 1;
2751 } else if (edma_err_cause & eh_freeze_mask) {
2752
2753
2754
2755 ata_port_freeze(ap);
2756 } else {
2757 abort = 1;
2758 }
2759
2760 if (abort) {
2761 if (qc)
2762 ata_link_abort(qc->dev->link);
2763 else
2764 ata_port_abort(ap);
2765 }
2766 }
2767
2768 static bool mv_process_crpb_response(struct ata_port *ap,
2769 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2770 {
2771 u8 ata_status;
2772 u16 edma_status = le16_to_cpu(response->flags);
2773
2774
2775
2776
2777
2778
2779 if (!ncq_enabled) {
2780 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2781 if (err_cause) {
2782
2783
2784
2785
2786 return false;
2787 }
2788 }
2789 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2790 if (!ac_err_mask(ata_status))
2791 return true;
2792
2793 return false;
2794 }
2795
2796 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2797 {
2798 void __iomem *port_mmio = mv_ap_base(ap);
2799 struct mv_host_priv *hpriv = ap->host->private_data;
2800 u32 in_index;
2801 bool work_done = false;
2802 u32 done_mask = 0;
2803 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2804
2805
2806 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2807 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2808
2809
2810 while (in_index != pp->resp_idx) {
2811 unsigned int tag;
2812 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2813
2814 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2815
2816 if (IS_GEN_I(hpriv)) {
2817
2818 tag = ap->link.active_tag;
2819 } else {
2820
2821 tag = le16_to_cpu(response->id) & 0x1f;
2822 }
2823 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2824 done_mask |= 1 << tag;
2825 work_done = true;
2826 }
2827
2828 if (work_done) {
2829 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2830
2831
2832 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2833 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2834 port_mmio + EDMA_RSP_Q_OUT_PTR);
2835 }
2836 }
2837
2838 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2839 {
2840 struct mv_port_priv *pp;
2841 int edma_was_enabled;
2842
2843
2844
2845
2846
2847
2848 pp = ap->private_data;
2849 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2850
2851
2852
2853 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2854 mv_process_crpb_entries(ap, pp);
2855 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2856 mv_handle_fbs_ncq_dev_err(ap);
2857 }
2858
2859
2860
2861 if (unlikely(port_cause & ERR_IRQ)) {
2862 mv_err_intr(ap);
2863 } else if (!edma_was_enabled) {
2864 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2865 if (qc)
2866 ata_bmdma_port_intr(ap, qc);
2867 else
2868 mv_unexpected_intr(ap, edma_was_enabled);
2869 }
2870 }
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2881 {
2882 struct mv_host_priv *hpriv = host->private_data;
2883 void __iomem *mmio = hpriv->base, *hc_mmio;
2884 unsigned int handled = 0, port;
2885
2886
2887 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2888 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2889
2890 for (port = 0; port < hpriv->n_ports; port++) {
2891 struct ata_port *ap = host->ports[port];
2892 unsigned int p, shift, hardport, port_cause;
2893
2894 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2895
2896
2897
2898
2899 if (hardport == 0) {
2900 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2901 u32 port_mask, ack_irqs;
2902
2903
2904
2905 if (!hc_cause) {
2906 port += MV_PORTS_PER_HC - 1;
2907 continue;
2908 }
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921 ack_irqs = 0;
2922 if (hc_cause & PORTS_0_3_COAL_DONE)
2923 ack_irqs = HC_COAL_IRQ;
2924 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2925 if ((port + p) >= hpriv->n_ports)
2926 break;
2927 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2928 if (hc_cause & port_mask)
2929 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2930 }
2931 hc_mmio = mv_hc_base_from_port(mmio, port);
2932 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2933 handled = 1;
2934 }
2935
2936
2937
2938 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2939 if (port_cause)
2940 mv_port_intr(ap, port_cause);
2941 }
2942 return handled;
2943 }
2944
2945 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2946 {
2947 struct mv_host_priv *hpriv = host->private_data;
2948 struct ata_port *ap;
2949 struct ata_queued_cmd *qc;
2950 struct ata_eh_info *ehi;
2951 unsigned int i, err_mask, printed = 0;
2952 u32 err_cause;
2953
2954 err_cause = readl(mmio + hpriv->irq_cause_offset);
2955
2956 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2957
2958 dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__);
2959 mv_dump_all_regs(mmio, to_pci_dev(host->dev));
2960
2961 writelfl(0, mmio + hpriv->irq_cause_offset);
2962
2963 for (i = 0; i < host->n_ports; i++) {
2964 ap = host->ports[i];
2965 if (!ata_link_offline(&ap->link)) {
2966 ehi = &ap->link.eh_info;
2967 ata_ehi_clear_desc(ehi);
2968 if (!printed++)
2969 ata_ehi_push_desc(ehi,
2970 "PCI err cause 0x%08x", err_cause);
2971 err_mask = AC_ERR_HOST_BUS;
2972 ehi->action = ATA_EH_RESET;
2973 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2974 if (qc)
2975 qc->err_mask |= err_mask;
2976 else
2977 ehi->err_mask |= err_mask;
2978
2979 ata_port_freeze(ap);
2980 }
2981 }
2982 return 1;
2983 }
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
3000 {
3001 struct ata_host *host = dev_instance;
3002 struct mv_host_priv *hpriv = host->private_data;
3003 unsigned int handled = 0;
3004 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3005 u32 main_irq_cause, pending_irqs;
3006
3007 spin_lock(&host->lock);
3008
3009
3010 if (using_msi)
3011 mv_write_main_irq_mask(0, hpriv);
3012
3013 main_irq_cause = readl(hpriv->main_irq_cause_addr);
3014 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
3015
3016
3017
3018
3019 if (pending_irqs && main_irq_cause != 0xffffffffU) {
3020 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3021 handled = mv_pci_error(host, hpriv->base);
3022 else
3023 handled = mv_host_intr(host, pending_irqs);
3024 }
3025
3026
3027 if (using_msi)
3028 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3029
3030 spin_unlock(&host->lock);
3031
3032 return IRQ_RETVAL(handled);
3033 }
3034
3035 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3036 {
3037 unsigned int ofs;
3038
3039 switch (sc_reg_in) {
3040 case SCR_STATUS:
3041 case SCR_ERROR:
3042 case SCR_CONTROL:
3043 ofs = sc_reg_in * sizeof(u32);
3044 break;
3045 default:
3046 ofs = 0xffffffffU;
3047 break;
3048 }
3049 return ofs;
3050 }
3051
3052 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3053 {
3054 struct mv_host_priv *hpriv = link->ap->host->private_data;
3055 void __iomem *mmio = hpriv->base;
3056 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3057 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3058
3059 if (ofs != 0xffffffffU) {
3060 *val = readl(addr + ofs);
3061 return 0;
3062 } else
3063 return -EINVAL;
3064 }
3065
3066 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3067 {
3068 struct mv_host_priv *hpriv = link->ap->host->private_data;
3069 void __iomem *mmio = hpriv->base;
3070 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3071 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3072
3073 if (ofs != 0xffffffffU) {
3074 writelfl(val, addr + ofs);
3075 return 0;
3076 } else
3077 return -EINVAL;
3078 }
3079
3080 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3081 {
3082 struct pci_dev *pdev = to_pci_dev(host->dev);
3083 int early_5080;
3084
3085 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3086
3087 if (!early_5080) {
3088 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3089 tmp |= (1 << 0);
3090 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3091 }
3092
3093 mv_reset_pci_bus(host, mmio);
3094 }
3095
3096 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3097 {
3098 writel(0x0fcfffff, mmio + FLASH_CTL);
3099 }
3100
3101 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3102 void __iomem *mmio)
3103 {
3104 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3105 u32 tmp;
3106
3107 tmp = readl(phy_mmio + MV5_PHY_MODE);
3108
3109 hpriv->signal[idx].pre = tmp & 0x1800;
3110 hpriv->signal[idx].amps = tmp & 0xe0;
3111 }
3112
3113 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3114 {
3115 u32 tmp;
3116
3117 writel(0, mmio + GPIO_PORT_CTL);
3118
3119
3120
3121 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3122 tmp |= ~(1 << 0);
3123 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3124 }
3125
3126 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3127 unsigned int port)
3128 {
3129 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3130 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3131 u32 tmp;
3132 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3133
3134 if (fix_apm_sq) {
3135 tmp = readl(phy_mmio + MV5_LTMODE);
3136 tmp |= (1 << 19);
3137 writel(tmp, phy_mmio + MV5_LTMODE);
3138
3139 tmp = readl(phy_mmio + MV5_PHY_CTL);
3140 tmp &= ~0x3;
3141 tmp |= 0x1;
3142 writel(tmp, phy_mmio + MV5_PHY_CTL);
3143 }
3144
3145 tmp = readl(phy_mmio + MV5_PHY_MODE);
3146 tmp &= ~mask;
3147 tmp |= hpriv->signal[port].pre;
3148 tmp |= hpriv->signal[port].amps;
3149 writel(tmp, phy_mmio + MV5_PHY_MODE);
3150 }
3151
3152
3153 #undef ZERO
3154 #define ZERO(reg) writel(0, port_mmio + (reg))
3155 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3156 unsigned int port)
3157 {
3158 void __iomem *port_mmio = mv_port_base(mmio, port);
3159
3160 mv_reset_channel(hpriv, mmio, port);
3161
3162 ZERO(0x028);
3163 writel(0x11f, port_mmio + EDMA_CFG);
3164 ZERO(0x004);
3165 ZERO(0x008);
3166 ZERO(0x00c);
3167 ZERO(0x010);
3168 ZERO(0x014);
3169 ZERO(0x018);
3170 ZERO(0x01c);
3171 ZERO(0x024);
3172 ZERO(0x020);
3173 ZERO(0x02c);
3174 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3175 }
3176 #undef ZERO
3177
3178 #define ZERO(reg) writel(0, hc_mmio + (reg))
3179 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3180 unsigned int hc)
3181 {
3182 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3183 u32 tmp;
3184
3185 ZERO(0x00c);
3186 ZERO(0x010);
3187 ZERO(0x014);
3188 ZERO(0x018);
3189
3190 tmp = readl(hc_mmio + 0x20);
3191 tmp &= 0x1c1c1c1c;
3192 tmp |= 0x03030303;
3193 writel(tmp, hc_mmio + 0x20);
3194 }
3195 #undef ZERO
3196
3197 static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
3198 unsigned int n_hc)
3199 {
3200 struct mv_host_priv *hpriv = host->private_data;
3201 unsigned int hc, port;
3202
3203 for (hc = 0; hc < n_hc; hc++) {
3204 for (port = 0; port < MV_PORTS_PER_HC; port++)
3205 mv5_reset_hc_port(hpriv, mmio,
3206 (hc * MV_PORTS_PER_HC) + port);
3207
3208 mv5_reset_one_hc(hpriv, mmio, hc);
3209 }
3210
3211 return 0;
3212 }
3213
3214 #undef ZERO
3215 #define ZERO(reg) writel(0, mmio + (reg))
3216 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3217 {
3218 struct mv_host_priv *hpriv = host->private_data;
3219 u32 tmp;
3220
3221 tmp = readl(mmio + MV_PCI_MODE);
3222 tmp &= 0xff00ffff;
3223 writel(tmp, mmio + MV_PCI_MODE);
3224
3225 ZERO(MV_PCI_DISC_TIMER);
3226 ZERO(MV_PCI_MSI_TRIGGER);
3227 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3228 ZERO(MV_PCI_SERR_MASK);
3229 ZERO(hpriv->irq_cause_offset);
3230 ZERO(hpriv->irq_mask_offset);
3231 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3232 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3233 ZERO(MV_PCI_ERR_ATTRIBUTE);
3234 ZERO(MV_PCI_ERR_COMMAND);
3235 }
3236 #undef ZERO
3237
3238 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3239 {
3240 u32 tmp;
3241
3242 mv5_reset_flash(hpriv, mmio);
3243
3244 tmp = readl(mmio + GPIO_PORT_CTL);
3245 tmp &= 0x3;
3246 tmp |= (1 << 5) | (1 << 6);
3247 writel(tmp, mmio + GPIO_PORT_CTL);
3248 }
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259 static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
3260 unsigned int n_hc)
3261 {
3262 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3263 int i, rc = 0;
3264 u32 t;
3265
3266
3267
3268
3269 t = readl(reg);
3270 writel(t | STOP_PCI_MASTER, reg);
3271
3272 for (i = 0; i < 1000; i++) {
3273 udelay(1);
3274 t = readl(reg);
3275 if (PCI_MASTER_EMPTY & t)
3276 break;
3277 }
3278 if (!(PCI_MASTER_EMPTY & t)) {
3279 dev_err(host->dev, "PCI master won't flush\n");
3280 rc = 1;
3281 goto done;
3282 }
3283
3284
3285 i = 5;
3286 do {
3287 writel(t | GLOB_SFT_RST, reg);
3288 t = readl(reg);
3289 udelay(1);
3290 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3291
3292 if (!(GLOB_SFT_RST & t)) {
3293 dev_err(host->dev, "can't set global reset\n");
3294 rc = 1;
3295 goto done;
3296 }
3297
3298
3299 i = 5;
3300 do {
3301 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3302 t = readl(reg);
3303 udelay(1);
3304 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3305
3306 if (GLOB_SFT_RST & t) {
3307 dev_err(host->dev, "can't clear global reset\n");
3308 rc = 1;
3309 }
3310 done:
3311 return rc;
3312 }
3313
3314 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3315 void __iomem *mmio)
3316 {
3317 void __iomem *port_mmio;
3318 u32 tmp;
3319
3320 tmp = readl(mmio + RESET_CFG);
3321 if ((tmp & (1 << 0)) == 0) {
3322 hpriv->signal[idx].amps = 0x7 << 8;
3323 hpriv->signal[idx].pre = 0x1 << 5;
3324 return;
3325 }
3326
3327 port_mmio = mv_port_base(mmio, idx);
3328 tmp = readl(port_mmio + PHY_MODE2);
3329
3330 hpriv->signal[idx].amps = tmp & 0x700;
3331 hpriv->signal[idx].pre = tmp & 0xe0;
3332 }
3333
3334 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3335 {
3336 writel(0x00000060, mmio + GPIO_PORT_CTL);
3337 }
3338
3339 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3340 unsigned int port)
3341 {
3342 void __iomem *port_mmio = mv_port_base(mmio, port);
3343
3344 u32 hp_flags = hpriv->hp_flags;
3345 int fix_phy_mode2 =
3346 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3347 int fix_phy_mode4 =
3348 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3349 u32 m2, m3;
3350
3351 if (fix_phy_mode2) {
3352 m2 = readl(port_mmio + PHY_MODE2);
3353 m2 &= ~(1 << 16);
3354 m2 |= (1 << 31);
3355 writel(m2, port_mmio + PHY_MODE2);
3356
3357 udelay(200);
3358
3359 m2 = readl(port_mmio + PHY_MODE2);
3360 m2 &= ~((1 << 16) | (1 << 31));
3361 writel(m2, port_mmio + PHY_MODE2);
3362
3363 udelay(200);
3364 }
3365
3366
3367
3368
3369
3370 m3 = readl(port_mmio + PHY_MODE3);
3371 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3372
3373
3374 if (IS_SOC(hpriv))
3375 m3 &= ~0x1c;
3376
3377 if (fix_phy_mode4) {
3378 u32 m4 = readl(port_mmio + PHY_MODE4);
3379
3380
3381
3382
3383
3384 if (IS_GEN_IIE(hpriv))
3385 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3386 else
3387 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3388 writel(m4, port_mmio + PHY_MODE4);
3389 }
3390
3391
3392
3393
3394
3395
3396 writel(m3, port_mmio + PHY_MODE3);
3397
3398
3399 m2 = readl(port_mmio + PHY_MODE2);
3400
3401 m2 &= ~MV_M2_PREAMP_MASK;
3402 m2 |= hpriv->signal[port].amps;
3403 m2 |= hpriv->signal[port].pre;
3404 m2 &= ~(1 << 16);
3405
3406
3407 if (IS_GEN_IIE(hpriv)) {
3408 m2 &= ~0xC30FF01F;
3409 m2 |= 0x0000900F;
3410 }
3411
3412 writel(m2, port_mmio + PHY_MODE2);
3413 }
3414
3415
3416
3417 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3418 void __iomem *mmio)
3419 {
3420 return;
3421 }
3422
3423 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3424 void __iomem *mmio)
3425 {
3426 void __iomem *port_mmio;
3427 u32 tmp;
3428
3429 port_mmio = mv_port_base(mmio, idx);
3430 tmp = readl(port_mmio + PHY_MODE2);
3431
3432 hpriv->signal[idx].amps = tmp & 0x700;
3433 hpriv->signal[idx].pre = tmp & 0xe0;
3434 }
3435
3436 #undef ZERO
3437 #define ZERO(reg) writel(0, port_mmio + (reg))
3438 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3439 void __iomem *mmio, unsigned int port)
3440 {
3441 void __iomem *port_mmio = mv_port_base(mmio, port);
3442
3443 mv_reset_channel(hpriv, mmio, port);
3444
3445 ZERO(0x028);
3446 writel(0x101f, port_mmio + EDMA_CFG);
3447 ZERO(0x004);
3448 ZERO(0x008);
3449 ZERO(0x00c);
3450 ZERO(0x010);
3451 ZERO(0x014);
3452 ZERO(0x018);
3453 ZERO(0x01c);
3454 ZERO(0x024);
3455 ZERO(0x020);
3456 ZERO(0x02c);
3457 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3458 }
3459
3460 #undef ZERO
3461
3462 #define ZERO(reg) writel(0, hc_mmio + (reg))
3463 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3464 void __iomem *mmio)
3465 {
3466 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3467
3468 ZERO(0x00c);
3469 ZERO(0x010);
3470 ZERO(0x014);
3471
3472 }
3473
3474 #undef ZERO
3475
3476 static int mv_soc_reset_hc(struct ata_host *host,
3477 void __iomem *mmio, unsigned int n_hc)
3478 {
3479 struct mv_host_priv *hpriv = host->private_data;
3480 unsigned int port;
3481
3482 for (port = 0; port < hpriv->n_ports; port++)
3483 mv_soc_reset_hc_port(hpriv, mmio, port);
3484
3485 mv_soc_reset_one_hc(hpriv, mmio);
3486
3487 return 0;
3488 }
3489
3490 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3491 void __iomem *mmio)
3492 {
3493 return;
3494 }
3495
3496 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3497 {
3498 return;
3499 }
3500
3501 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3502 void __iomem *mmio, unsigned int port)
3503 {
3504 void __iomem *port_mmio = mv_port_base(mmio, port);
3505 u32 reg;
3506
3507 reg = readl(port_mmio + PHY_MODE3);
3508 reg &= ~(0x3 << 27);
3509 reg |= (0x1 << 27);
3510 reg &= ~(0x3 << 29);
3511 reg |= (0x1 << 29);
3512 writel(reg, port_mmio + PHY_MODE3);
3513
3514 reg = readl(port_mmio + PHY_MODE4);
3515 reg &= ~0x1;
3516 reg |= (0x1 << 16);
3517 writel(reg, port_mmio + PHY_MODE4);
3518
3519 reg = readl(port_mmio + PHY_MODE9_GEN2);
3520 reg &= ~0xf;
3521 reg |= 0x8;
3522 reg &= ~(0x1 << 14);
3523 writel(reg, port_mmio + PHY_MODE9_GEN2);
3524
3525 reg = readl(port_mmio + PHY_MODE9_GEN1);
3526 reg &= ~0xf;
3527 reg |= 0x8;
3528 reg &= ~(0x1 << 14);
3529 writel(reg, port_mmio + PHY_MODE9_GEN1);
3530 }
3531
3532
3533
3534
3535
3536
3537
3538
3539 static bool soc_is_65n(struct mv_host_priv *hpriv)
3540 {
3541 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3542
3543 if (readl(port0_mmio + PHYCFG_OFS))
3544 return true;
3545 return false;
3546 }
3547
3548 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3549 {
3550 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3551
3552 ifcfg = (ifcfg & 0xf7f) | 0x9b1000;
3553 if (want_gen2i)
3554 ifcfg |= (1 << 7);
3555 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3556 }
3557
3558 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3559 unsigned int port_no)
3560 {
3561 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3562
3563
3564
3565
3566
3567
3568 mv_stop_edma_engine(port_mmio);
3569 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3570
3571 if (!IS_GEN_I(hpriv)) {
3572
3573 mv_setup_ifcfg(port_mmio, 1);
3574 }
3575
3576
3577
3578
3579
3580 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3581 udelay(25);
3582 writelfl(0, port_mmio + EDMA_CMD);
3583
3584 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3585
3586 if (IS_GEN_I(hpriv))
3587 usleep_range(500, 1000);
3588 }
3589
3590 static void mv_pmp_select(struct ata_port *ap, int pmp)
3591 {
3592 if (sata_pmp_supported(ap)) {
3593 void __iomem *port_mmio = mv_ap_base(ap);
3594 u32 reg = readl(port_mmio + SATA_IFCTL);
3595 int old = reg & 0xf;
3596
3597 if (old != pmp) {
3598 reg = (reg & ~0xf) | pmp;
3599 writelfl(reg, port_mmio + SATA_IFCTL);
3600 }
3601 }
3602 }
3603
3604 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3605 unsigned long deadline)
3606 {
3607 mv_pmp_select(link->ap, sata_srst_pmp(link));
3608 return sata_std_hardreset(link, class, deadline);
3609 }
3610
3611 static int mv_softreset(struct ata_link *link, unsigned int *class,
3612 unsigned long deadline)
3613 {
3614 mv_pmp_select(link->ap, sata_srst_pmp(link));
3615 return ata_sff_softreset(link, class, deadline);
3616 }
3617
3618 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3619 unsigned long deadline)
3620 {
3621 struct ata_port *ap = link->ap;
3622 struct mv_host_priv *hpriv = ap->host->private_data;
3623 struct mv_port_priv *pp = ap->private_data;
3624 void __iomem *mmio = hpriv->base;
3625 int rc, attempts = 0, extra = 0;
3626 u32 sstatus;
3627 bool online;
3628
3629 mv_reset_channel(hpriv, mmio, ap->port_no);
3630 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3631 pp->pp_flags &=
3632 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3633
3634
3635 do {
3636 const unsigned long *timing =
3637 sata_ehc_deb_timing(&link->eh_context);
3638
3639 rc = sata_link_hardreset(link, timing, deadline + extra,
3640 &online, NULL);
3641 rc = online ? -EAGAIN : rc;
3642 if (rc)
3643 return rc;
3644 sata_scr_read(link, SCR_STATUS, &sstatus);
3645 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3646
3647 mv_setup_ifcfg(mv_ap_base(ap), 0);
3648 if (time_after(jiffies + HZ, deadline))
3649 extra = HZ;
3650 }
3651 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3652 mv_save_cached_regs(ap);
3653 mv_edma_cfg(ap, 0, 0);
3654
3655 return rc;
3656 }
3657
3658 static void mv_eh_freeze(struct ata_port *ap)
3659 {
3660 mv_stop_edma(ap);
3661 mv_enable_port_irqs(ap, 0);
3662 }
3663
3664 static void mv_eh_thaw(struct ata_port *ap)
3665 {
3666 struct mv_host_priv *hpriv = ap->host->private_data;
3667 unsigned int port = ap->port_no;
3668 unsigned int hardport = mv_hardport_from_port(port);
3669 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3670 void __iomem *port_mmio = mv_ap_base(ap);
3671 u32 hc_irq_cause;
3672
3673
3674 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3675
3676
3677 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3678 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3679
3680 mv_enable_port_irqs(ap, ERR_IRQ);
3681 }
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3696 {
3697 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3698
3699
3700
3701 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3702 port->error_addr =
3703 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3704 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3705 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3706 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3707 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3708 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3709 port->status_addr =
3710 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3711
3712 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3713
3714
3715 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3716 writelfl(readl(serr), serr);
3717 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3718
3719
3720 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3721 }
3722
3723 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3724 {
3725 struct mv_host_priv *hpriv = host->private_data;
3726 void __iomem *mmio = hpriv->base;
3727 u32 reg;
3728
3729 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3730 return 0;
3731 reg = readl(mmio + MV_PCI_MODE);
3732 if ((reg & MV_PCI_MODE_MASK) == 0)
3733 return 0;
3734 return 1;
3735 }
3736
3737 static int mv_pci_cut_through_okay(struct ata_host *host)
3738 {
3739 struct mv_host_priv *hpriv = host->private_data;
3740 void __iomem *mmio = hpriv->base;
3741 u32 reg;
3742
3743 if (!mv_in_pcix_mode(host)) {
3744 reg = readl(mmio + MV_PCI_COMMAND);
3745 if (reg & MV_PCI_COMMAND_MRDTRIG)
3746 return 0;
3747 }
3748 return 1;
3749 }
3750
3751 static void mv_60x1b2_errata_pci7(struct ata_host *host)
3752 {
3753 struct mv_host_priv *hpriv = host->private_data;
3754 void __iomem *mmio = hpriv->base;
3755
3756
3757 if (mv_in_pcix_mode(host)) {
3758 u32 reg = readl(mmio + MV_PCI_COMMAND);
3759 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3760 }
3761 }
3762
3763 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3764 {
3765 struct pci_dev *pdev = to_pci_dev(host->dev);
3766 struct mv_host_priv *hpriv = host->private_data;
3767 u32 hp_flags = hpriv->hp_flags;
3768
3769 switch (board_idx) {
3770 case chip_5080:
3771 hpriv->ops = &mv5xxx_ops;
3772 hp_flags |= MV_HP_GEN_I;
3773
3774 switch (pdev->revision) {
3775 case 0x1:
3776 hp_flags |= MV_HP_ERRATA_50XXB0;
3777 break;
3778 case 0x3:
3779 hp_flags |= MV_HP_ERRATA_50XXB2;
3780 break;
3781 default:
3782 dev_warn(&pdev->dev,
3783 "Applying 50XXB2 workarounds to unknown rev\n");
3784 hp_flags |= MV_HP_ERRATA_50XXB2;
3785 break;
3786 }
3787 break;
3788
3789 case chip_504x:
3790 case chip_508x:
3791 hpriv->ops = &mv5xxx_ops;
3792 hp_flags |= MV_HP_GEN_I;
3793
3794 switch (pdev->revision) {
3795 case 0x0:
3796 hp_flags |= MV_HP_ERRATA_50XXB0;
3797 break;
3798 case 0x3:
3799 hp_flags |= MV_HP_ERRATA_50XXB2;
3800 break;
3801 default:
3802 dev_warn(&pdev->dev,
3803 "Applying B2 workarounds to unknown rev\n");
3804 hp_flags |= MV_HP_ERRATA_50XXB2;
3805 break;
3806 }
3807 break;
3808
3809 case chip_604x:
3810 case chip_608x:
3811 hpriv->ops = &mv6xxx_ops;
3812 hp_flags |= MV_HP_GEN_II;
3813
3814 switch (pdev->revision) {
3815 case 0x7:
3816 mv_60x1b2_errata_pci7(host);
3817 hp_flags |= MV_HP_ERRATA_60X1B2;
3818 break;
3819 case 0x9:
3820 hp_flags |= MV_HP_ERRATA_60X1C0;
3821 break;
3822 default:
3823 dev_warn(&pdev->dev,
3824 "Applying B2 workarounds to unknown rev\n");
3825 hp_flags |= MV_HP_ERRATA_60X1B2;
3826 break;
3827 }
3828 break;
3829
3830 case chip_7042:
3831 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3832 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3833 (pdev->device == 0x2300 || pdev->device == 0x2310))
3834 {
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852 dev_warn(&pdev->dev, "Highpoint RocketRAID"
3853 " BIOS CORRUPTS DATA on all attached drives,"
3854 " regardless of if/how they are configured."
3855 " BEWARE!\n");
3856 dev_warn(&pdev->dev, "For data safety, do not"
3857 " use sectors 8-9 on \"Legacy\" drives,"
3858 " and avoid the final two gigabytes on"
3859 " all RocketRAID BIOS initialized drives.\n");
3860 }
3861 fallthrough;
3862 case chip_6042:
3863 hpriv->ops = &mv6xxx_ops;
3864 hp_flags |= MV_HP_GEN_IIE;
3865 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3866 hp_flags |= MV_HP_CUT_THROUGH;
3867
3868 switch (pdev->revision) {
3869 case 0x2:
3870 hp_flags |= MV_HP_ERRATA_60X1C0;
3871 break;
3872 default:
3873 dev_warn(&pdev->dev,
3874 "Applying 60X1C0 workarounds to unknown rev\n");
3875 hp_flags |= MV_HP_ERRATA_60X1C0;
3876 break;
3877 }
3878 break;
3879 case chip_soc:
3880 if (soc_is_65n(hpriv))
3881 hpriv->ops = &mv_soc_65n_ops;
3882 else
3883 hpriv->ops = &mv_soc_ops;
3884 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3885 MV_HP_ERRATA_60X1C0;
3886 break;
3887
3888 default:
3889 dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
3890 return -EINVAL;
3891 }
3892
3893 hpriv->hp_flags = hp_flags;
3894 if (hp_flags & MV_HP_PCIE) {
3895 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3896 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3897 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3898 } else {
3899 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3900 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3901 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3902 }
3903
3904 return 0;
3905 }
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917 static int mv_init_host(struct ata_host *host)
3918 {
3919 int rc = 0, n_hc, port, hc;
3920 struct mv_host_priv *hpriv = host->private_data;
3921 void __iomem *mmio = hpriv->base;
3922
3923 rc = mv_chip_id(host, hpriv->board_idx);
3924 if (rc)
3925 goto done;
3926
3927 if (IS_SOC(hpriv)) {
3928 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3929 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3930 } else {
3931 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3932 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3933 }
3934
3935
3936 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3937
3938
3939 mv_set_main_irq_mask(host, ~0, 0);
3940
3941 n_hc = mv_get_hc_count(host->ports[0]->flags);
3942
3943 for (port = 0; port < host->n_ports; port++)
3944 if (hpriv->ops->read_preamp)
3945 hpriv->ops->read_preamp(hpriv, port, mmio);
3946
3947 rc = hpriv->ops->reset_hc(host, mmio, n_hc);
3948 if (rc)
3949 goto done;
3950
3951 hpriv->ops->reset_flash(hpriv, mmio);
3952 hpriv->ops->reset_bus(host, mmio);
3953 hpriv->ops->enable_leds(hpriv, mmio);
3954
3955 for (port = 0; port < host->n_ports; port++) {
3956 struct ata_port *ap = host->ports[port];
3957 void __iomem *port_mmio = mv_port_base(mmio, port);
3958
3959 mv_port_init(&ap->ioaddr, port_mmio);
3960 }
3961
3962 for (hc = 0; hc < n_hc; hc++) {
3963 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3964
3965 dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause "
3966 "(before clear)=0x%08x\n", hc,
3967 readl(hc_mmio + HC_CFG),
3968 readl(hc_mmio + HC_IRQ_CAUSE));
3969
3970
3971 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3972 }
3973
3974 if (!IS_SOC(hpriv)) {
3975
3976 writelfl(0, mmio + hpriv->irq_cause_offset);
3977
3978
3979 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3980 }
3981
3982
3983
3984
3985
3986 mv_set_main_irq_mask(host, 0, PCI_ERR);
3987 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3988 irq_coalescing_usecs);
3989 done:
3990 return rc;
3991 }
3992
3993 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3994 {
3995 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3996 MV_CRQB_Q_SZ, 0);
3997 if (!hpriv->crqb_pool)
3998 return -ENOMEM;
3999
4000 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4001 MV_CRPB_Q_SZ, 0);
4002 if (!hpriv->crpb_pool)
4003 return -ENOMEM;
4004
4005 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4006 MV_SG_TBL_SZ, 0);
4007 if (!hpriv->sg_tbl_pool)
4008 return -ENOMEM;
4009
4010 return 0;
4011 }
4012
4013 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4014 const struct mbus_dram_target_info *dram)
4015 {
4016 int i;
4017
4018 for (i = 0; i < 4; i++) {
4019 writel(0, hpriv->base + WINDOW_CTRL(i));
4020 writel(0, hpriv->base + WINDOW_BASE(i));
4021 }
4022
4023 for (i = 0; i < dram->num_cs; i++) {
4024 const struct mbus_dram_window *cs = dram->cs + i;
4025
4026 writel(((cs->size - 1) & 0xffff0000) |
4027 (cs->mbus_attr << 8) |
4028 (dram->mbus_dram_target_id << 4) | 1,
4029 hpriv->base + WINDOW_CTRL(i));
4030 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4031 }
4032 }
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042 static int mv_platform_probe(struct platform_device *pdev)
4043 {
4044 const struct mv_sata_platform_data *mv_platform_data;
4045 const struct mbus_dram_target_info *dram;
4046 const struct ata_port_info *ppi[] =
4047 { &mv_port_info[chip_soc], NULL };
4048 struct ata_host *host;
4049 struct mv_host_priv *hpriv;
4050 struct resource *res;
4051 int n_ports = 0, irq = 0;
4052 int rc;
4053 int port;
4054
4055 ata_print_version_once(&pdev->dev, DRV_VERSION);
4056
4057
4058
4059
4060 if (unlikely(pdev->num_resources != 1)) {
4061 dev_err(&pdev->dev, "invalid number of resources\n");
4062 return -EINVAL;
4063 }
4064
4065
4066
4067
4068 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4069 if (res == NULL)
4070 return -EINVAL;
4071
4072
4073 if (pdev->dev.of_node) {
4074 rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
4075 &n_ports);
4076 if (rc) {
4077 dev_err(&pdev->dev,
4078 "error parsing nr-ports property: %d\n", rc);
4079 return rc;
4080 }
4081
4082 if (n_ports <= 0) {
4083 dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
4084 n_ports);
4085 return -EINVAL;
4086 }
4087
4088 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4089 } else {
4090 mv_platform_data = dev_get_platdata(&pdev->dev);
4091 n_ports = mv_platform_data->n_ports;
4092 irq = platform_get_irq(pdev, 0);
4093 }
4094 if (irq < 0)
4095 return irq;
4096 if (!irq)
4097 return -EINVAL;
4098
4099 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4100 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4101
4102 if (!host || !hpriv)
4103 return -ENOMEM;
4104 hpriv->port_clks = devm_kcalloc(&pdev->dev,
4105 n_ports, sizeof(struct clk *),
4106 GFP_KERNEL);
4107 if (!hpriv->port_clks)
4108 return -ENOMEM;
4109 hpriv->port_phys = devm_kcalloc(&pdev->dev,
4110 n_ports, sizeof(struct phy *),
4111 GFP_KERNEL);
4112 if (!hpriv->port_phys)
4113 return -ENOMEM;
4114 host->private_data = hpriv;
4115 hpriv->board_idx = chip_soc;
4116
4117 host->iomap = NULL;
4118 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4119 resource_size(res));
4120 if (!hpriv->base)
4121 return -ENOMEM;
4122
4123 hpriv->base -= SATAHC0_REG_BASE;
4124
4125 hpriv->clk = clk_get(&pdev->dev, NULL);
4126 if (IS_ERR(hpriv->clk))
4127 dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4128 else
4129 clk_prepare_enable(hpriv->clk);
4130
4131 for (port = 0; port < n_ports; port++) {
4132 char port_number[16];
4133 sprintf(port_number, "%d", port);
4134 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4135 if (!IS_ERR(hpriv->port_clks[port]))
4136 clk_prepare_enable(hpriv->port_clks[port]);
4137
4138 sprintf(port_number, "port%d", port);
4139 hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4140 port_number);
4141 if (IS_ERR(hpriv->port_phys[port])) {
4142 rc = PTR_ERR(hpriv->port_phys[port]);
4143 hpriv->port_phys[port] = NULL;
4144 if (rc != -EPROBE_DEFER)
4145 dev_warn(&pdev->dev, "error getting phy %d", rc);
4146
4147
4148 hpriv->n_ports = port;
4149 goto err;
4150 } else
4151 phy_power_on(hpriv->port_phys[port]);
4152 }
4153
4154
4155 hpriv->n_ports = n_ports;
4156
4157
4158
4159
4160 dram = mv_mbus_dram_info();
4161 if (dram)
4162 mv_conf_mbus_windows(hpriv, dram);
4163
4164 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4165 if (rc)
4166 goto err;
4167
4168
4169
4170
4171
4172 if (pdev->dev.of_node &&
4173 of_device_is_compatible(pdev->dev.of_node,
4174 "marvell,armada-370-sata"))
4175 hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4176
4177
4178 rc = mv_init_host(host);
4179 if (rc)
4180 goto err;
4181
4182 dev_info(&pdev->dev, "slots %u ports %d\n",
4183 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4184
4185 rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4186 if (!rc)
4187 return 0;
4188
4189 err:
4190 if (!IS_ERR(hpriv->clk)) {
4191 clk_disable_unprepare(hpriv->clk);
4192 clk_put(hpriv->clk);
4193 }
4194 for (port = 0; port < hpriv->n_ports; port++) {
4195 if (!IS_ERR(hpriv->port_clks[port])) {
4196 clk_disable_unprepare(hpriv->port_clks[port]);
4197 clk_put(hpriv->port_clks[port]);
4198 }
4199 phy_power_off(hpriv->port_phys[port]);
4200 }
4201
4202 return rc;
4203 }
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213 static int mv_platform_remove(struct platform_device *pdev)
4214 {
4215 struct ata_host *host = platform_get_drvdata(pdev);
4216 struct mv_host_priv *hpriv = host->private_data;
4217 int port;
4218 ata_host_detach(host);
4219
4220 if (!IS_ERR(hpriv->clk)) {
4221 clk_disable_unprepare(hpriv->clk);
4222 clk_put(hpriv->clk);
4223 }
4224 for (port = 0; port < host->n_ports; port++) {
4225 if (!IS_ERR(hpriv->port_clks[port])) {
4226 clk_disable_unprepare(hpriv->port_clks[port]);
4227 clk_put(hpriv->port_clks[port]);
4228 }
4229 phy_power_off(hpriv->port_phys[port]);
4230 }
4231 return 0;
4232 }
4233
4234 #ifdef CONFIG_PM_SLEEP
4235 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4236 {
4237 struct ata_host *host = platform_get_drvdata(pdev);
4238
4239 if (host)
4240 ata_host_suspend(host, state);
4241 return 0;
4242 }
4243
4244 static int mv_platform_resume(struct platform_device *pdev)
4245 {
4246 struct ata_host *host = platform_get_drvdata(pdev);
4247 const struct mbus_dram_target_info *dram;
4248 int ret;
4249
4250 if (host) {
4251 struct mv_host_priv *hpriv = host->private_data;
4252
4253
4254
4255
4256 dram = mv_mbus_dram_info();
4257 if (dram)
4258 mv_conf_mbus_windows(hpriv, dram);
4259
4260
4261 ret = mv_init_host(host);
4262 if (ret) {
4263 dev_err(&pdev->dev, "Error during HW init\n");
4264 return ret;
4265 }
4266 ata_host_resume(host);
4267 }
4268
4269 return 0;
4270 }
4271 #else
4272 #define mv_platform_suspend NULL
4273 #define mv_platform_resume NULL
4274 #endif
4275
4276 #ifdef CONFIG_OF
4277 static const struct of_device_id mv_sata_dt_ids[] = {
4278 { .compatible = "marvell,armada-370-sata", },
4279 { .compatible = "marvell,orion-sata", },
4280 { }
4281 };
4282 MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4283 #endif
4284
4285 static struct platform_driver mv_platform_driver = {
4286 .probe = mv_platform_probe,
4287 .remove = mv_platform_remove,
4288 .suspend = mv_platform_suspend,
4289 .resume = mv_platform_resume,
4290 .driver = {
4291 .name = DRV_NAME,
4292 .of_match_table = of_match_ptr(mv_sata_dt_ids),
4293 },
4294 };
4295
4296
4297 #ifdef CONFIG_PCI
4298 static int mv_pci_init_one(struct pci_dev *pdev,
4299 const struct pci_device_id *ent);
4300 #ifdef CONFIG_PM_SLEEP
4301 static int mv_pci_device_resume(struct pci_dev *pdev);
4302 #endif
4303
4304
4305 static struct pci_driver mv_pci_driver = {
4306 .name = DRV_NAME,
4307 .id_table = mv_pci_tbl,
4308 .probe = mv_pci_init_one,
4309 .remove = ata_pci_remove_one,
4310 #ifdef CONFIG_PM_SLEEP
4311 .suspend = ata_pci_device_suspend,
4312 .resume = mv_pci_device_resume,
4313 #endif
4314
4315 };
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326 static void mv_print_info(struct ata_host *host)
4327 {
4328 struct pci_dev *pdev = to_pci_dev(host->dev);
4329 struct mv_host_priv *hpriv = host->private_data;
4330 u8 scc;
4331 const char *scc_s, *gen;
4332
4333
4334
4335
4336 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4337 if (scc == 0)
4338 scc_s = "SCSI";
4339 else if (scc == 0x01)
4340 scc_s = "RAID";
4341 else
4342 scc_s = "?";
4343
4344 if (IS_GEN_I(hpriv))
4345 gen = "I";
4346 else if (IS_GEN_II(hpriv))
4347 gen = "II";
4348 else if (IS_GEN_IIE(hpriv))
4349 gen = "IIE";
4350 else
4351 gen = "?";
4352
4353 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4354 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4355 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4356 }
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366 static int mv_pci_init_one(struct pci_dev *pdev,
4367 const struct pci_device_id *ent)
4368 {
4369 unsigned int board_idx = (unsigned int)ent->driver_data;
4370 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4371 struct ata_host *host;
4372 struct mv_host_priv *hpriv;
4373 int n_ports, port, rc;
4374
4375 ata_print_version_once(&pdev->dev, DRV_VERSION);
4376
4377
4378 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4379
4380 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4381 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4382 if (!host || !hpriv)
4383 return -ENOMEM;
4384 host->private_data = hpriv;
4385 hpriv->n_ports = n_ports;
4386 hpriv->board_idx = board_idx;
4387
4388
4389 rc = pcim_enable_device(pdev);
4390 if (rc)
4391 return rc;
4392
4393 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4394 if (rc == -EBUSY)
4395 pcim_pin_device(pdev);
4396 if (rc)
4397 return rc;
4398 host->iomap = pcim_iomap_table(pdev);
4399 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4400
4401 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4402 if (rc) {
4403 dev_err(&pdev->dev, "DMA enable failed\n");
4404 return rc;
4405 }
4406
4407 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4408 if (rc)
4409 return rc;
4410
4411 for (port = 0; port < host->n_ports; port++) {
4412 struct ata_port *ap = host->ports[port];
4413 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4414 unsigned int offset = port_mmio - hpriv->base;
4415
4416 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4417 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4418 }
4419
4420
4421 rc = mv_init_host(host);
4422 if (rc)
4423 return rc;
4424
4425
4426 if (msi && pci_enable_msi(pdev) == 0)
4427 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4428
4429 mv_dump_pci_cfg(pdev, 0x68);
4430 mv_print_info(host);
4431
4432 pci_set_master(pdev);
4433 pci_try_set_mwi(pdev);
4434 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4435 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4436 }
4437
4438 #ifdef CONFIG_PM_SLEEP
4439 static int mv_pci_device_resume(struct pci_dev *pdev)
4440 {
4441 struct ata_host *host = pci_get_drvdata(pdev);
4442 int rc;
4443
4444 rc = ata_pci_device_do_resume(pdev);
4445 if (rc)
4446 return rc;
4447
4448
4449 rc = mv_init_host(host);
4450 if (rc)
4451 return rc;
4452
4453 ata_host_resume(host);
4454
4455 return 0;
4456 }
4457 #endif
4458 #endif
4459
4460 static int __init mv_init(void)
4461 {
4462 int rc = -ENODEV;
4463 #ifdef CONFIG_PCI
4464 rc = pci_register_driver(&mv_pci_driver);
4465 if (rc < 0)
4466 return rc;
4467 #endif
4468 rc = platform_driver_register(&mv_platform_driver);
4469
4470 #ifdef CONFIG_PCI
4471 if (rc < 0)
4472 pci_unregister_driver(&mv_pci_driver);
4473 #endif
4474 return rc;
4475 }
4476
4477 static void __exit mv_exit(void)
4478 {
4479 #ifdef CONFIG_PCI
4480 pci_unregister_driver(&mv_pci_driver);
4481 #endif
4482 platform_driver_unregister(&mv_platform_driver);
4483 }
4484
4485 MODULE_AUTHOR("Brett Russ");
4486 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4487 MODULE_LICENSE("GPL v2");
4488 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4489 MODULE_VERSION(DRV_VERSION);
4490 MODULE_ALIAS("platform:" DRV_NAME);
4491
4492 module_init(mv_init);
4493 module_exit(mv_exit);