0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/kernel.h>
0018 #include <linux/module.h>
0019 #include <linux/device.h>
0020 #include <linux/dmaengine.h>
0021 #include <linux/of_address.h>
0022 #include <linux/of_irq.h>
0023 #include <linux/of_platform.h>
0024 #include <linux/platform_device.h>
0025 #include <linux/phy/phy.h>
0026 #include <linux/libata.h>
0027 #include <linux/slab.h>
0028 #include <trace/events/libata.h>
0029
0030 #include "libata.h"
0031
0032 #include <scsi/scsi_host.h>
0033 #include <scsi/scsi_cmnd.h>
0034
0035
0036 #undef DRV_NAME
0037 #undef DRV_VERSION
0038
0039 #define DRV_NAME "sata-dwc"
0040 #define DRV_VERSION "1.3"
0041
0042 #define sata_dwc_writel(a, v) writel_relaxed(v, a)
0043 #define sata_dwc_readl(a) readl_relaxed(a)
0044
0045 #ifndef NO_IRQ
0046 #define NO_IRQ 0
0047 #endif
0048
0049 #define AHB_DMA_BRST_DFLT 64
0050
0051 enum {
0052 SATA_DWC_MAX_PORTS = 1,
0053
0054 SATA_DWC_SCR_OFFSET = 0x24,
0055 SATA_DWC_REG_OFFSET = 0x64,
0056 };
0057
0058
0059 struct sata_dwc_regs {
0060 u32 fptagr;
0061 u32 fpbor;
0062 u32 fptcr;
0063 u32 dmacr;
0064 u32 dbtsr;
0065 u32 intpr;
0066 u32 intmr;
0067 u32 errmr;
0068 u32 llcr;
0069 u32 phycr;
0070 u32 physr;
0071 u32 rxbistpd;
0072 u32 rxbistpd1;
0073 u32 rxbistpd2;
0074 u32 txbistpd;
0075 u32 txbistpd1;
0076 u32 txbistpd2;
0077 u32 bistcr;
0078 u32 bistfctr;
0079 u32 bistsr;
0080 u32 bistdecr;
0081 u32 res[15];
0082 u32 testr;
0083 u32 versionr;
0084 u32 idr;
0085 u32 unimpl[192];
0086 u32 dmadr[256];
0087 };
0088
0089 enum {
0090 SCR_SCONTROL_DET_ENABLE = 0x00000001,
0091 SCR_SSTATUS_DET_PRESENT = 0x00000001,
0092 SCR_SERROR_DIAG_X = 0x04000000,
0093
0094 SATA_DWC_TXFIFO_DEPTH = 0x01FF,
0095 SATA_DWC_RXFIFO_DEPTH = 0x01FF,
0096 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
0097 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
0098 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
0099 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
0100 SATA_DWC_INTPR_DMAT = 0x00000001,
0101 SATA_DWC_INTPR_NEWFP = 0x00000002,
0102 SATA_DWC_INTPR_PMABRT = 0x00000004,
0103 SATA_DWC_INTPR_ERR = 0x00000008,
0104 SATA_DWC_INTPR_NEWBIST = 0x00000010,
0105 SATA_DWC_INTPR_IPF = 0x10000000,
0106 SATA_DWC_INTMR_DMATM = 0x00000001,
0107 SATA_DWC_INTMR_NEWFPM = 0x00000002,
0108 SATA_DWC_INTMR_PMABRTM = 0x00000004,
0109 SATA_DWC_INTMR_ERRM = 0x00000008,
0110 SATA_DWC_INTMR_NEWBISTM = 0x00000010,
0111 SATA_DWC_LLCR_SCRAMEN = 0x00000001,
0112 SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
0113 SATA_DWC_LLCR_RPDEN = 0x00000004,
0114
0115 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
0116 };
0117
0118 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
0119 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
0120 SATA_DWC_DMACR_TMOD_TXCHEN)
0121 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
0122 SATA_DWC_DMACR_TMOD_TXCHEN)
0123 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
0124 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
0125 << 16)
0126 struct sata_dwc_device {
0127 struct device *dev;
0128 struct ata_probe_ent *pe;
0129 struct ata_host *host;
0130 struct sata_dwc_regs __iomem *sata_dwc_regs;
0131 u32 sactive_issued;
0132 u32 sactive_queued;
0133 struct phy *phy;
0134 phys_addr_t dmadr;
0135 #ifdef CONFIG_SATA_DWC_OLD_DMA
0136 struct dw_dma_chip *dma;
0137 #endif
0138 };
0139
0140
0141
0142
0143
0144 #define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
0145
0146 struct sata_dwc_device_port {
0147 struct sata_dwc_device *hsdev;
0148 int cmd_issued[SATA_DWC_QCMD_MAX];
0149 int dma_pending[SATA_DWC_QCMD_MAX];
0150
0151
0152 struct dma_chan *chan;
0153 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
0154 u32 dma_interrupt_count;
0155 };
0156
0157
0158
0159
0160 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
0161 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
0162 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
0163 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
0164 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
0165
0166 enum {
0167 SATA_DWC_CMD_ISSUED_NOT = 0,
0168 SATA_DWC_CMD_ISSUED_PEND = 1,
0169 SATA_DWC_CMD_ISSUED_EXEC = 2,
0170 SATA_DWC_CMD_ISSUED_NODATA = 3,
0171
0172 SATA_DWC_DMA_PENDING_NONE = 0,
0173 SATA_DWC_DMA_PENDING_TX = 1,
0174 SATA_DWC_DMA_PENDING_RX = 2,
0175 };
0176
0177
0178
0179
0180 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
0181 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
0182 static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
0183 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
0184
0185 #ifdef CONFIG_SATA_DWC_OLD_DMA
0186
0187 #include <linux/platform_data/dma-dw.h>
0188 #include <linux/dma/dw.h>
0189
0190 static struct dw_dma_slave sata_dwc_dma_dws = {
0191 .src_id = 0,
0192 .dst_id = 0,
0193 .m_master = 1,
0194 .p_master = 0,
0195 };
0196
0197 static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
0198 {
0199 struct dw_dma_slave *dws = &sata_dwc_dma_dws;
0200
0201 if (dws->dma_dev != chan->device->dev)
0202 return false;
0203
0204 chan->private = dws;
0205 return true;
0206 }
0207
0208 static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
0209 {
0210 struct sata_dwc_device *hsdev = hsdevp->hsdev;
0211 struct dw_dma_slave *dws = &sata_dwc_dma_dws;
0212 struct device *dev = hsdev->dev;
0213 dma_cap_mask_t mask;
0214
0215 dws->dma_dev = dev;
0216
0217 dma_cap_zero(mask);
0218 dma_cap_set(DMA_SLAVE, mask);
0219
0220
0221 hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
0222 if (!hsdevp->chan) {
0223 dev_err(dev, "%s: dma channel unavailable\n", __func__);
0224 return -EAGAIN;
0225 }
0226
0227 return 0;
0228 }
0229
0230 static int sata_dwc_dma_init_old(struct platform_device *pdev,
0231 struct sata_dwc_device *hsdev)
0232 {
0233 struct device *dev = &pdev->dev;
0234 struct device_node *np = dev->of_node;
0235
0236 hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
0237 if (!hsdev->dma)
0238 return -ENOMEM;
0239
0240 hsdev->dma->dev = dev;
0241 hsdev->dma->id = pdev->id;
0242
0243
0244 hsdev->dma->irq = irq_of_parse_and_map(np, 1);
0245 if (hsdev->dma->irq == NO_IRQ) {
0246 dev_err(dev, "no SATA DMA irq\n");
0247 return -ENODEV;
0248 }
0249
0250
0251 hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
0252 if (IS_ERR(hsdev->dma->regs))
0253 return PTR_ERR(hsdev->dma->regs);
0254
0255
0256 return dw_dma_probe(hsdev->dma);
0257 }
0258
0259 static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
0260 {
0261 if (!hsdev->dma)
0262 return;
0263
0264 dw_dma_remove(hsdev->dma);
0265 }
0266
0267 #endif
0268
0269 static const char *get_prot_descript(u8 protocol)
0270 {
0271 switch (protocol) {
0272 case ATA_PROT_NODATA:
0273 return "ATA no data";
0274 case ATA_PROT_PIO:
0275 return "ATA PIO";
0276 case ATA_PROT_DMA:
0277 return "ATA DMA";
0278 case ATA_PROT_NCQ:
0279 return "ATA NCQ";
0280 case ATA_PROT_NCQ_NODATA:
0281 return "ATA NCQ no data";
0282 case ATAPI_PROT_NODATA:
0283 return "ATAPI no data";
0284 case ATAPI_PROT_PIO:
0285 return "ATAPI PIO";
0286 case ATAPI_PROT_DMA:
0287 return "ATAPI DMA";
0288 default:
0289 return "unknown";
0290 }
0291 }
0292
0293 static void dma_dwc_xfer_done(void *hsdev_instance)
0294 {
0295 unsigned long flags;
0296 struct sata_dwc_device *hsdev = hsdev_instance;
0297 struct ata_host *host = (struct ata_host *)hsdev->host;
0298 struct ata_port *ap;
0299 struct sata_dwc_device_port *hsdevp;
0300 u8 tag = 0;
0301 unsigned int port = 0;
0302
0303 spin_lock_irqsave(&host->lock, flags);
0304 ap = host->ports[port];
0305 hsdevp = HSDEVP_FROM_AP(ap);
0306 tag = ap->link.active_tag;
0307
0308
0309
0310
0311
0312
0313 hsdevp->dma_interrupt_count++;
0314 sata_dwc_clear_dmacr(hsdevp, tag);
0315
0316 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
0317 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
0318 tag, hsdevp->dma_pending[tag]);
0319 }
0320
0321 if ((hsdevp->dma_interrupt_count % 2) == 0)
0322 sata_dwc_dma_xfer_complete(ap);
0323
0324 spin_unlock_irqrestore(&host->lock, flags);
0325 }
0326
0327 static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
0328 {
0329 struct ata_port *ap = qc->ap;
0330 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
0331 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
0332 struct dma_slave_config sconf;
0333 struct dma_async_tx_descriptor *desc;
0334
0335 if (qc->dma_dir == DMA_DEV_TO_MEM) {
0336 sconf.src_addr = hsdev->dmadr;
0337 sconf.device_fc = false;
0338 } else {
0339 sconf.dst_addr = hsdev->dmadr;
0340 sconf.device_fc = false;
0341 }
0342
0343 sconf.direction = qc->dma_dir;
0344 sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4;
0345 sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4;
0346 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0347 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0348
0349 dmaengine_slave_config(hsdevp->chan, &sconf);
0350
0351
0352 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
0353 qc->dma_dir,
0354 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0355
0356 if (!desc)
0357 return NULL;
0358
0359 desc->callback = dma_dwc_xfer_done;
0360 desc->callback_param = hsdev;
0361
0362 dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
0363 qc->sg, qc->n_elem, &hsdev->dmadr);
0364
0365 return desc;
0366 }
0367
0368 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
0369 {
0370 if (scr > SCR_NOTIFICATION) {
0371 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
0372 __func__, scr);
0373 return -EINVAL;
0374 }
0375
0376 *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
0377 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
0378 link->ap->print_id, scr, *val);
0379
0380 return 0;
0381 }
0382
0383 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
0384 {
0385 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
0386 link->ap->print_id, scr, val);
0387 if (scr > SCR_NOTIFICATION) {
0388 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
0389 __func__, scr);
0390 return -EINVAL;
0391 }
0392 sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
0393
0394 return 0;
0395 }
0396
0397 static void clear_serror(struct ata_port *ap)
0398 {
0399 u32 val;
0400 sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
0401 sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
0402 }
0403
0404 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
0405 {
0406 sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
0407 sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
0408 }
0409
0410 static u32 qcmd_tag_to_mask(u8 tag)
0411 {
0412 return 0x00000001 << (tag & 0x1f);
0413 }
0414
0415
0416 static void sata_dwc_error_intr(struct ata_port *ap,
0417 struct sata_dwc_device *hsdev, uint intpr)
0418 {
0419 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
0420 struct ata_eh_info *ehi = &ap->link.eh_info;
0421 unsigned int err_mask = 0, action = 0;
0422 struct ata_queued_cmd *qc;
0423 u32 serror;
0424 u8 status, tag;
0425
0426 ata_ehi_clear_desc(ehi);
0427
0428 sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
0429 status = ap->ops->sff_check_status(ap);
0430
0431 tag = ap->link.active_tag;
0432
0433 dev_err(ap->dev,
0434 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
0435 __func__, serror, intpr, status, hsdevp->dma_interrupt_count,
0436 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
0437
0438
0439 clear_serror(ap);
0440 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
0441
0442
0443
0444 err_mask |= AC_ERR_HOST_BUS;
0445 action |= ATA_EH_RESET;
0446
0447
0448 ehi->serror |= serror;
0449 ehi->action |= action;
0450
0451 qc = ata_qc_from_tag(ap, tag);
0452 if (qc)
0453 qc->err_mask |= err_mask;
0454 else
0455 ehi->err_mask |= err_mask;
0456
0457 ata_port_abort(ap);
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
0468 {
0469 struct ata_host *host = (struct ata_host *)dev_instance;
0470 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
0471 struct ata_port *ap;
0472 struct ata_queued_cmd *qc;
0473 unsigned long flags;
0474 u8 status, tag;
0475 int handled, num_processed, port = 0;
0476 uint intpr, sactive, sactive2, tag_mask;
0477 struct sata_dwc_device_port *hsdevp;
0478 hsdev->sactive_issued = 0;
0479
0480 spin_lock_irqsave(&host->lock, flags);
0481
0482
0483 intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
0484
0485 ap = host->ports[port];
0486 hsdevp = HSDEVP_FROM_AP(ap);
0487
0488 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
0489 ap->link.active_tag);
0490
0491
0492 if (intpr & SATA_DWC_INTPR_ERR) {
0493 sata_dwc_error_intr(ap, hsdev, intpr);
0494 handled = 1;
0495 goto DONE;
0496 }
0497
0498
0499 if (intpr & SATA_DWC_INTPR_NEWFP) {
0500 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
0501
0502 tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
0503 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
0504 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
0505 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
0506
0507 hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
0508
0509 qc = ata_qc_from_tag(ap, tag);
0510 if (unlikely(!qc)) {
0511 dev_err(ap->dev, "failed to get qc");
0512 handled = 1;
0513 goto DONE;
0514 }
0515
0516
0517
0518
0519
0520 trace_ata_bmdma_start(ap, &qc->tf, tag);
0521 qc->ap->link.active_tag = tag;
0522 sata_dwc_bmdma_start_by_tag(qc, tag);
0523
0524 handled = 1;
0525 goto DONE;
0526 }
0527 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
0528 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
0529
0530
0531 if (hsdev->sactive_issued == 0 && tag_mask == 0) {
0532 if (ap->link.active_tag == ATA_TAG_POISON)
0533 tag = 0;
0534 else
0535 tag = ap->link.active_tag;
0536 qc = ata_qc_from_tag(ap, tag);
0537
0538
0539 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
0540 dev_err(ap->dev,
0541 "%s interrupt with no active qc qc=%p\n",
0542 __func__, qc);
0543 ap->ops->sff_check_status(ap);
0544 handled = 1;
0545 goto DONE;
0546 }
0547 status = ap->ops->sff_check_status(ap);
0548
0549 qc->ap->link.active_tag = tag;
0550 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
0551
0552 if (status & ATA_ERR) {
0553 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
0554 sata_dwc_qc_complete(ap, qc);
0555 handled = 1;
0556 goto DONE;
0557 }
0558
0559 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
0560 __func__, get_prot_descript(qc->tf.protocol));
0561 DRVSTILLBUSY:
0562 if (ata_is_dma(qc->tf.protocol)) {
0563
0564
0565
0566
0567
0568
0569 hsdevp->dma_interrupt_count++;
0570 if (hsdevp->dma_pending[tag] == \
0571 SATA_DWC_DMA_PENDING_NONE) {
0572 dev_err(ap->dev,
0573 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
0574 __func__, intpr, status,
0575 hsdevp->dma_pending[tag]);
0576 }
0577
0578 if ((hsdevp->dma_interrupt_count % 2) == 0)
0579 sata_dwc_dma_xfer_complete(ap);
0580 } else if (ata_is_pio(qc->tf.protocol)) {
0581 ata_sff_hsm_move(ap, qc, status, 0);
0582 handled = 1;
0583 goto DONE;
0584 } else {
0585 if (unlikely(sata_dwc_qc_complete(ap, qc)))
0586 goto DRVSTILLBUSY;
0587 }
0588
0589 handled = 1;
0590 goto DONE;
0591 }
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
0602 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
0603
0604 if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
0605 dev_dbg(ap->dev,
0606 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
0607 __func__, sactive, hsdev->sactive_issued, tag_mask);
0608 }
0609
0610 if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
0611 dev_warn(ap->dev,
0612 "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
0613 sactive, hsdev->sactive_issued, tag_mask);
0614 }
0615
0616
0617 status = ap->ops->sff_check_status(ap);
0618 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
0619
0620 tag = 0;
0621 num_processed = 0;
0622 while (tag_mask) {
0623 num_processed++;
0624 while (!(tag_mask & 0x00000001)) {
0625 tag++;
0626 tag_mask <<= 1;
0627 }
0628
0629 tag_mask &= (~0x00000001);
0630 qc = ata_qc_from_tag(ap, tag);
0631 if (unlikely(!qc)) {
0632 dev_err(ap->dev, "failed to get qc");
0633 handled = 1;
0634 goto DONE;
0635 }
0636
0637
0638 qc->ap->link.active_tag = tag;
0639 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
0640
0641
0642 if (status & ATA_ERR) {
0643 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
0644 status);
0645 sata_dwc_qc_complete(ap, qc);
0646 handled = 1;
0647 goto DONE;
0648 }
0649
0650
0651 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
0652 get_prot_descript(qc->tf.protocol));
0653 if (ata_is_dma(qc->tf.protocol)) {
0654 hsdevp->dma_interrupt_count++;
0655 if (hsdevp->dma_pending[tag] == \
0656 SATA_DWC_DMA_PENDING_NONE)
0657 dev_warn(ap->dev, "%s: DMA not pending?\n",
0658 __func__);
0659 if ((hsdevp->dma_interrupt_count % 2) == 0)
0660 sata_dwc_dma_xfer_complete(ap);
0661 } else {
0662 if (unlikely(sata_dwc_qc_complete(ap, qc)))
0663 goto STILLBUSY;
0664 }
0665 continue;
0666
0667 STILLBUSY:
0668 ap->stats.idle_irq++;
0669 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
0670 ap->print_id);
0671 }
0672
0673
0674
0675
0676
0677
0678
0679
0680 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
0681 if (sactive2 != sactive) {
0682 dev_dbg(ap->dev,
0683 "More completed - sactive=0x%x sactive2=0x%x\n",
0684 sactive, sactive2);
0685 }
0686 handled = 1;
0687
0688 DONE:
0689 spin_unlock_irqrestore(&host->lock, flags);
0690 return IRQ_RETVAL(handled);
0691 }
0692
0693 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
0694 {
0695 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
0696 u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
0697
0698 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
0699 dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
0700 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
0701 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
0702 dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
0703 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
0704 } else {
0705
0706
0707
0708
0709 dev_err(hsdev->dev,
0710 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
0711 __func__, tag, hsdevp->dma_pending[tag], dmacr);
0712 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
0713 SATA_DWC_DMACR_TXRXCH_CLEAR);
0714 }
0715 }
0716
0717 static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
0718 {
0719 struct ata_queued_cmd *qc;
0720 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
0721 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
0722 u8 tag = 0;
0723
0724 tag = ap->link.active_tag;
0725 qc = ata_qc_from_tag(ap, tag);
0726 if (!qc) {
0727 dev_err(ap->dev, "failed to get qc");
0728 return;
0729 }
0730
0731 if (ata_is_dma(qc->tf.protocol)) {
0732 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
0733 dev_err(ap->dev,
0734 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
0735 __func__,
0736 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
0737 }
0738
0739 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
0740 sata_dwc_qc_complete(ap, qc);
0741 ap->link.active_tag = ATA_TAG_POISON;
0742 } else {
0743 sata_dwc_qc_complete(ap, qc);
0744 }
0745 }
0746
0747 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
0748 {
0749 u8 status = 0;
0750 u32 mask = 0x0;
0751 u8 tag = qc->hw_tag;
0752 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
0753 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
0754 hsdev->sactive_queued = 0;
0755
0756 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
0757 dev_err(ap->dev, "TX DMA PENDING\n");
0758 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
0759 dev_err(ap->dev, "RX DMA PENDING\n");
0760 dev_dbg(ap->dev,
0761 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
0762 qc->tf.command, status, ap->print_id, qc->tf.protocol);
0763
0764
0765 mask = (~(qcmd_tag_to_mask(tag)));
0766 hsdev->sactive_queued = hsdev->sactive_queued & mask;
0767 hsdev->sactive_issued = hsdev->sactive_issued & mask;
0768 ata_qc_complete(qc);
0769 return 0;
0770 }
0771
0772 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
0773 {
0774
0775 sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
0776 SATA_DWC_INTMR_ERRM |
0777 SATA_DWC_INTMR_NEWFPM |
0778 SATA_DWC_INTMR_PMABRTM |
0779 SATA_DWC_INTMR_DMATM);
0780
0781
0782
0783
0784 sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
0785
0786 dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
0787 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
0788 sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
0789 }
0790
0791 static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
0792 {
0793 port->cmd_addr = base + 0x00;
0794 port->data_addr = base + 0x00;
0795
0796 port->error_addr = base + 0x04;
0797 port->feature_addr = base + 0x04;
0798
0799 port->nsect_addr = base + 0x08;
0800
0801 port->lbal_addr = base + 0x0c;
0802 port->lbam_addr = base + 0x10;
0803 port->lbah_addr = base + 0x14;
0804
0805 port->device_addr = base + 0x18;
0806 port->command_addr = base + 0x1c;
0807 port->status_addr = base + 0x1c;
0808
0809 port->altstatus_addr = base + 0x20;
0810 port->ctl_addr = base + 0x20;
0811 }
0812
0813 static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
0814 {
0815 struct sata_dwc_device *hsdev = hsdevp->hsdev;
0816 struct device *dev = hsdev->dev;
0817
0818 #ifdef CONFIG_SATA_DWC_OLD_DMA
0819 if (!of_find_property(dev->of_node, "dmas", NULL))
0820 return sata_dwc_dma_get_channel_old(hsdevp);
0821 #endif
0822
0823 hsdevp->chan = dma_request_chan(dev, "sata-dma");
0824 if (IS_ERR(hsdevp->chan)) {
0825 dev_err(dev, "failed to allocate dma channel: %ld\n",
0826 PTR_ERR(hsdevp->chan));
0827 return PTR_ERR(hsdevp->chan);
0828 }
0829
0830 return 0;
0831 }
0832
0833
0834
0835
0836
0837
0838
0839 static int sata_dwc_port_start(struct ata_port *ap)
0840 {
0841 int err = 0;
0842 struct sata_dwc_device *hsdev;
0843 struct sata_dwc_device_port *hsdevp = NULL;
0844 struct device *pdev;
0845 int i;
0846
0847 hsdev = HSDEV_FROM_AP(ap);
0848
0849 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
0850
0851 hsdev->host = ap->host;
0852 pdev = ap->host->dev;
0853 if (!pdev) {
0854 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
0855 err = -ENODEV;
0856 goto CLEANUP;
0857 }
0858
0859
0860 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
0861 if (!hsdevp) {
0862 err = -ENOMEM;
0863 goto CLEANUP;
0864 }
0865 hsdevp->hsdev = hsdev;
0866
0867 err = sata_dwc_dma_get_channel(hsdevp);
0868 if (err)
0869 goto CLEANUP_ALLOC;
0870
0871 err = phy_power_on(hsdev->phy);
0872 if (err)
0873 goto CLEANUP_ALLOC;
0874
0875 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
0876 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
0877
0878 ap->bmdma_prd = NULL;
0879 ap->bmdma_prd_dma = 0;
0880
0881 if (ap->port_no == 0) {
0882 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
0883 __func__);
0884 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
0885 SATA_DWC_DMACR_TXRXCH_CLEAR);
0886
0887 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
0888 __func__);
0889 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
0890 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
0891 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
0892 }
0893
0894
0895 clear_serror(ap);
0896 ap->private_data = hsdevp;
0897 dev_dbg(ap->dev, "%s: done\n", __func__);
0898 return 0;
0899
0900 CLEANUP_ALLOC:
0901 kfree(hsdevp);
0902 CLEANUP:
0903 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
0904 return err;
0905 }
0906
0907 static void sata_dwc_port_stop(struct ata_port *ap)
0908 {
0909 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
0910 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
0911
0912 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
0913
0914 dmaengine_terminate_sync(hsdevp->chan);
0915 dma_release_channel(hsdevp->chan);
0916 phy_power_off(hsdev->phy);
0917
0918 kfree(hsdevp);
0919 ap->private_data = NULL;
0920 }
0921
0922
0923
0924
0925
0926
0927
0928
0929 static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
0930 struct ata_taskfile *tf,
0931 u8 tag, u32 cmd_issued)
0932 {
0933 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
0934
0935 hsdevp->cmd_issued[tag] = cmd_issued;
0936
0937
0938
0939
0940
0941
0942
0943 clear_serror(ap);
0944 ata_sff_exec_command(ap, tf);
0945 }
0946
0947 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
0948 {
0949 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
0950 SATA_DWC_CMD_ISSUED_PEND);
0951 }
0952
0953 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
0954 {
0955 u8 tag = qc->hw_tag;
0956
0957 if (!ata_is_ncq(qc->tf.protocol))
0958 tag = 0;
0959
0960 sata_dwc_bmdma_setup_by_tag(qc, tag);
0961 }
0962
0963 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
0964 {
0965 int start_dma;
0966 u32 reg;
0967 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
0968 struct ata_port *ap = qc->ap;
0969 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
0970 struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
0971 int dir = qc->dma_dir;
0972
0973 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
0974 start_dma = 1;
0975 if (dir == DMA_TO_DEVICE)
0976 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
0977 else
0978 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
0979 } else {
0980 dev_err(ap->dev,
0981 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
0982 __func__, hsdevp->cmd_issued[tag], tag);
0983 start_dma = 0;
0984 }
0985
0986 if (start_dma) {
0987 sata_dwc_scr_read(&ap->link, SCR_ERROR, ®);
0988 if (reg & SATA_DWC_SERROR_ERR_BITS) {
0989 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
0990 __func__, reg);
0991 }
0992
0993 if (dir == DMA_TO_DEVICE)
0994 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
0995 SATA_DWC_DMACR_TXCHEN);
0996 else
0997 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
0998 SATA_DWC_DMACR_RXCHEN);
0999
1000
1001 dmaengine_submit(desc);
1002 dma_async_issue_pending(hsdevp->chan);
1003 }
1004 }
1005
1006 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1007 {
1008 u8 tag = qc->hw_tag;
1009
1010 if (!ata_is_ncq(qc->tf.protocol))
1011 tag = 0;
1012
1013 sata_dwc_bmdma_start_by_tag(qc, tag);
1014 }
1015
1016 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1017 {
1018 u32 sactive;
1019 u8 tag = qc->hw_tag;
1020 struct ata_port *ap = qc->ap;
1021 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1022
1023 if (!ata_is_ncq(qc->tf.protocol))
1024 tag = 0;
1025
1026 if (ata_is_dma(qc->tf.protocol)) {
1027 hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
1028 if (!hsdevp->desc[tag])
1029 return AC_ERR_SYSTEM;
1030 } else {
1031 hsdevp->desc[tag] = NULL;
1032 }
1033
1034 if (ata_is_ncq(qc->tf.protocol)) {
1035 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
1036 sactive |= (0x00000001 << tag);
1037 sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
1038
1039 trace_ata_tf_load(ap, &qc->tf);
1040 ap->ops->sff_tf_load(ap, &qc->tf);
1041 trace_ata_exec_command(ap, &qc->tf, tag);
1042 sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
1043 SATA_DWC_CMD_ISSUED_PEND);
1044 } else {
1045 return ata_bmdma_qc_issue(qc);
1046 }
1047 return 0;
1048 }
1049
1050 static void sata_dwc_error_handler(struct ata_port *ap)
1051 {
1052 ata_sff_error_handler(ap);
1053 }
1054
1055 static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1056 unsigned long deadline)
1057 {
1058 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1059 int ret;
1060
1061 ret = sata_sff_hardreset(link, class, deadline);
1062
1063 sata_dwc_enable_interrupts(hsdev);
1064
1065
1066 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
1067 SATA_DWC_DMACR_TXRXCH_CLEAR);
1068
1069
1070 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
1071 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1072 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
1073
1074 return ret;
1075 }
1076
1077 static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
1078 {
1079
1080 }
1081
1082
1083
1084
1085 static struct scsi_host_template sata_dwc_sht = {
1086 ATA_NCQ_SHT(DRV_NAME),
1087
1088
1089
1090
1091
1092 .sg_tablesize = LIBATA_MAX_PRD,
1093
1094
1095
1096
1097
1098
1099
1100 .dma_boundary = 0x1fff ,
1101 };
1102
1103 static struct ata_port_operations sata_dwc_ops = {
1104 .inherits = &ata_sff_port_ops,
1105
1106 .error_handler = sata_dwc_error_handler,
1107 .hardreset = sata_dwc_hardreset,
1108
1109 .qc_issue = sata_dwc_qc_issue,
1110
1111 .scr_read = sata_dwc_scr_read,
1112 .scr_write = sata_dwc_scr_write,
1113
1114 .port_start = sata_dwc_port_start,
1115 .port_stop = sata_dwc_port_stop,
1116
1117 .sff_dev_select = sata_dwc_dev_select,
1118
1119 .bmdma_setup = sata_dwc_bmdma_setup,
1120 .bmdma_start = sata_dwc_bmdma_start,
1121 };
1122
1123 static const struct ata_port_info sata_dwc_port_info[] = {
1124 {
1125 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
1126 .pio_mask = ATA_PIO4,
1127 .udma_mask = ATA_UDMA6,
1128 .port_ops = &sata_dwc_ops,
1129 },
1130 };
1131
1132 static int sata_dwc_probe(struct platform_device *ofdev)
1133 {
1134 struct device *dev = &ofdev->dev;
1135 struct device_node *np = dev->of_node;
1136 struct sata_dwc_device *hsdev;
1137 u32 idr, versionr;
1138 char *ver = (char *)&versionr;
1139 void __iomem *base;
1140 int err = 0;
1141 int irq;
1142 struct ata_host *host;
1143 struct ata_port_info pi = sata_dwc_port_info[0];
1144 const struct ata_port_info *ppi[] = { &pi, NULL };
1145 struct resource *res;
1146
1147
1148 host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
1149 hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
1150 if (!host || !hsdev)
1151 return -ENOMEM;
1152
1153 host->private_data = hsdev;
1154
1155
1156 base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
1157 if (IS_ERR(base))
1158 return PTR_ERR(base);
1159 dev_dbg(dev, "ioremap done for SATA register address\n");
1160
1161
1162 hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
1163 hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
1164
1165
1166 host->ports[0]->ioaddr.cmd_addr = base;
1167 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
1168 sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
1169
1170
1171 idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
1172 versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
1173 dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
1174
1175
1176 hsdev->dev = dev;
1177
1178
1179 sata_dwc_enable_interrupts(hsdev);
1180
1181
1182 irq = irq_of_parse_and_map(np, 0);
1183 if (irq == NO_IRQ) {
1184 dev_err(dev, "no SATA DMA irq\n");
1185 return -ENODEV;
1186 }
1187
1188 #ifdef CONFIG_SATA_DWC_OLD_DMA
1189 if (!of_find_property(np, "dmas", NULL)) {
1190 err = sata_dwc_dma_init_old(ofdev, hsdev);
1191 if (err)
1192 return err;
1193 }
1194 #endif
1195
1196 hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
1197 if (IS_ERR(hsdev->phy))
1198 return PTR_ERR(hsdev->phy);
1199
1200 err = phy_init(hsdev->phy);
1201 if (err)
1202 goto error_out;
1203
1204
1205
1206
1207
1208
1209 err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1210 if (err)
1211 dev_err(dev, "failed to activate host");
1212
1213 return 0;
1214
1215 error_out:
1216 phy_exit(hsdev->phy);
1217 return err;
1218 }
1219
1220 static int sata_dwc_remove(struct platform_device *ofdev)
1221 {
1222 struct device *dev = &ofdev->dev;
1223 struct ata_host *host = dev_get_drvdata(dev);
1224 struct sata_dwc_device *hsdev = host->private_data;
1225
1226 ata_host_detach(host);
1227
1228 phy_exit(hsdev->phy);
1229
1230 #ifdef CONFIG_SATA_DWC_OLD_DMA
1231
1232 sata_dwc_dma_exit_old(hsdev);
1233 #endif
1234
1235 dev_dbg(dev, "done\n");
1236 return 0;
1237 }
1238
1239 static const struct of_device_id sata_dwc_match[] = {
1240 { .compatible = "amcc,sata-460ex", },
1241 {}
1242 };
1243 MODULE_DEVICE_TABLE(of, sata_dwc_match);
1244
1245 static struct platform_driver sata_dwc_driver = {
1246 .driver = {
1247 .name = DRV_NAME,
1248 .of_match_table = sata_dwc_match,
1249 },
1250 .probe = sata_dwc_probe,
1251 .remove = sata_dwc_remove,
1252 };
1253
1254 module_platform_driver(sata_dwc_driver);
1255
1256 MODULE_LICENSE("GPL");
1257 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
1258 MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
1259 MODULE_VERSION(DRV_VERSION);