Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * drivers/ata/pata_arasan_cf.c
0003  *
0004  * Arasan Compact Flash host controller source file
0005  *
0006  * Copyright (C) 2011 ST Microelectronics
0007  * Viresh Kumar <vireshk@kernel.org>
0008  *
0009  * This file is licensed under the terms of the GNU General Public
0010  * License version 2. This program is licensed "as is" without any
0011  * warranty of any kind, whether express or implied.
0012  */
0013 
0014 /*
0015  * The Arasan CompactFlash Device Controller IP core has three basic modes of
0016  * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
0017  * ATA using true IDE modes. This driver supports only True IDE mode currently.
0018  *
0019  * Arasan CF Controller shares global irq register with Arasan XD Controller.
0020  *
0021  * Tested on arch/arm/mach-spear13xx
0022  */
0023 
0024 #include <linux/ata.h>
0025 #include <linux/clk.h>
0026 #include <linux/completion.h>
0027 #include <linux/delay.h>
0028 #include <linux/dmaengine.h>
0029 #include <linux/io.h>
0030 #include <linux/irq.h>
0031 #include <linux/kernel.h>
0032 #include <linux/libata.h>
0033 #include <linux/module.h>
0034 #include <linux/of.h>
0035 #include <linux/pata_arasan_cf_data.h>
0036 #include <linux/platform_device.h>
0037 #include <linux/pm.h>
0038 #include <linux/slab.h>
0039 #include <linux/spinlock.h>
0040 #include <linux/types.h>
0041 #include <linux/workqueue.h>
0042 #include <trace/events/libata.h>
0043 
0044 #define DRIVER_NAME "arasan_cf"
0045 #define TIMEOUT     msecs_to_jiffies(3000)
0046 
0047 /* Registers */
0048 /* CompactFlash Interface Status */
0049 #define CFI_STS         0x000
0050     #define STS_CHG             (1)
0051     #define BIN_AUDIO_OUT           (1 << 1)
0052     #define CARD_DETECT1            (1 << 2)
0053     #define CARD_DETECT2            (1 << 3)
0054     #define INP_ACK             (1 << 4)
0055     #define CARD_READY          (1 << 5)
0056     #define IO_READY            (1 << 6)
0057     #define B16_IO_PORT_SEL         (1 << 7)
0058 /* IRQ */
0059 #define IRQ_STS         0x004
0060 /* Interrupt Enable */
0061 #define IRQ_EN          0x008
0062     #define CARD_DETECT_IRQ         (1)
0063     #define STATUS_CHNG_IRQ         (1 << 1)
0064     #define MEM_MODE_IRQ            (1 << 2)
0065     #define IO_MODE_IRQ         (1 << 3)
0066     #define TRUE_IDE_MODE_IRQ       (1 << 8)
0067     #define PIO_XFER_ERR_IRQ        (1 << 9)
0068     #define BUF_AVAIL_IRQ           (1 << 10)
0069     #define XFER_DONE_IRQ           (1 << 11)
0070     #define IGNORED_IRQS    (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
0071                     TRUE_IDE_MODE_IRQ)
0072     #define TRUE_IDE_IRQS   (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
0073                     BUF_AVAIL_IRQ | XFER_DONE_IRQ)
0074 /* Operation Mode */
0075 #define OP_MODE         0x00C
0076     #define CARD_MODE_MASK          (0x3)
0077     #define MEM_MODE            (0x0)
0078     #define IO_MODE             (0x1)
0079     #define TRUE_IDE_MODE           (0x2)
0080 
0081     #define CARD_TYPE_MASK          (1 << 2)
0082     #define CF_CARD             (0)
0083     #define CF_PLUS_CARD            (1 << 2)
0084 
0085     #define CARD_RESET          (1 << 3)
0086     #define CFHOST_ENB          (1 << 4)
0087     #define OUTPUTS_TRISTATE        (1 << 5)
0088     #define ULTRA_DMA_ENB           (1 << 8)
0089     #define MULTI_WORD_DMA_ENB      (1 << 9)
0090     #define DRQ_BLOCK_SIZE_MASK     (0x3 << 11)
0091     #define DRQ_BLOCK_SIZE_512      (0)
0092     #define DRQ_BLOCK_SIZE_1024     (1 << 11)
0093     #define DRQ_BLOCK_SIZE_2048     (2 << 11)
0094     #define DRQ_BLOCK_SIZE_4096     (3 << 11)
0095 /* CF Interface Clock Configuration */
0096 #define CLK_CFG         0x010
0097     #define CF_IF_CLK_MASK          (0XF)
0098 /* CF Timing Mode Configuration */
0099 #define TM_CFG          0x014
0100     #define MEM_MODE_TIMING_MASK        (0x3)
0101     #define MEM_MODE_TIMING_250NS       (0x0)
0102     #define MEM_MODE_TIMING_120NS       (0x1)
0103     #define MEM_MODE_TIMING_100NS       (0x2)
0104     #define MEM_MODE_TIMING_80NS        (0x3)
0105 
0106     #define IO_MODE_TIMING_MASK     (0x3 << 2)
0107     #define IO_MODE_TIMING_250NS        (0x0 << 2)
0108     #define IO_MODE_TIMING_120NS        (0x1 << 2)
0109     #define IO_MODE_TIMING_100NS        (0x2 << 2)
0110     #define IO_MODE_TIMING_80NS     (0x3 << 2)
0111 
0112     #define TRUEIDE_PIO_TIMING_MASK     (0x7 << 4)
0113     #define TRUEIDE_PIO_TIMING_SHIFT    4
0114 
0115     #define TRUEIDE_MWORD_DMA_TIMING_MASK   (0x7 << 7)
0116     #define TRUEIDE_MWORD_DMA_TIMING_SHIFT  7
0117 
0118     #define ULTRA_DMA_TIMING_MASK       (0x7 << 10)
0119     #define ULTRA_DMA_TIMING_SHIFT      10
0120 /* CF Transfer Address */
0121 #define XFER_ADDR       0x014
0122     #define XFER_ADDR_MASK          (0x7FF)
0123     #define MAX_XFER_COUNT          0x20000u
0124 /* Transfer Control */
0125 #define XFER_CTR        0x01C
0126     #define XFER_COUNT_MASK         (0x3FFFF)
0127     #define ADDR_INC_DISABLE        (1 << 24)
0128     #define XFER_WIDTH_MASK         (1 << 25)
0129     #define XFER_WIDTH_8B           (0)
0130     #define XFER_WIDTH_16B          (1 << 25)
0131 
0132     #define MEM_TYPE_MASK           (1 << 26)
0133     #define MEM_TYPE_COMMON         (0)
0134     #define MEM_TYPE_ATTRIBUTE      (1 << 26)
0135 
0136     #define MEM_IO_XFER_MASK        (1 << 27)
0137     #define MEM_XFER            (0)
0138     #define IO_XFER             (1 << 27)
0139 
0140     #define DMA_XFER_MODE           (1 << 28)
0141 
0142     #define AHB_BUS_NORMAL_PIO_OPRTN    (~(1 << 29))
0143     #define XFER_DIR_MASK           (1 << 30)
0144     #define XFER_READ           (0)
0145     #define XFER_WRITE          (1 << 30)
0146 
0147     #define XFER_START          (1 << 31)
0148 /* Write Data Port */
0149 #define WRITE_PORT      0x024
0150 /* Read Data Port */
0151 #define READ_PORT       0x028
0152 /* ATA Data Port */
0153 #define ATA_DATA_PORT       0x030
0154     #define ATA_DATA_PORT_MASK      (0xFFFF)
0155 /* ATA Error/Features */
0156 #define ATA_ERR_FTR     0x034
0157 /* ATA Sector Count */
0158 #define ATA_SC          0x038
0159 /* ATA Sector Number */
0160 #define ATA_SN          0x03C
0161 /* ATA Cylinder Low */
0162 #define ATA_CL          0x040
0163 /* ATA Cylinder High */
0164 #define ATA_CH          0x044
0165 /* ATA Select Card/Head */
0166 #define ATA_SH          0x048
0167 /* ATA Status-Command */
0168 #define ATA_STS_CMD     0x04C
0169 /* ATA Alternate Status/Device Control */
0170 #define ATA_ASTS_DCTR       0x050
0171 /* Extended Write Data Port 0x200-0x3FC */
0172 #define EXT_WRITE_PORT      0x200
0173 /* Extended Read Data Port 0x400-0x5FC */
0174 #define EXT_READ_PORT       0x400
0175     #define FIFO_SIZE   0x200u
0176 /* Global Interrupt Status */
0177 #define GIRQ_STS        0x800
0178 /* Global Interrupt Status enable */
0179 #define GIRQ_STS_EN     0x804
0180 /* Global Interrupt Signal enable */
0181 #define GIRQ_SGN_EN     0x808
0182     #define GIRQ_CF     (1)
0183     #define GIRQ_XD     (1 << 1)
0184 
0185 /* Compact Flash Controller Dev Structure */
0186 struct arasan_cf_dev {
0187     /* pointer to ata_host structure */
0188     struct ata_host *host;
0189     /* clk structure */
0190     struct clk *clk;
0191 
0192     /* physical base address of controller */
0193     dma_addr_t pbase;
0194     /* virtual base address of controller */
0195     void __iomem *vbase;
0196     /* irq number*/
0197     int irq;
0198 
0199     /* status to be updated to framework regarding DMA transfer */
0200     u8 dma_status;
0201     /* Card is present or Not */
0202     u8 card_present;
0203 
0204     /* dma specific */
0205     /* Completion for transfer complete interrupt from controller */
0206     struct completion cf_completion;
0207     /* Completion for DMA transfer complete. */
0208     struct completion dma_completion;
0209     /* Dma channel allocated */
0210     struct dma_chan *dma_chan;
0211     /* Mask for DMA transfers */
0212     dma_cap_mask_t mask;
0213     /* DMA transfer work */
0214     struct work_struct work;
0215     /* DMA delayed finish work */
0216     struct delayed_work dwork;
0217     /* qc to be transferred using DMA */
0218     struct ata_queued_cmd *qc;
0219 };
0220 
0221 static struct scsi_host_template arasan_cf_sht = {
0222     ATA_BASE_SHT(DRIVER_NAME),
0223     .dma_boundary = 0xFFFFFFFFUL,
0224 };
0225 
0226 static void cf_dumpregs(struct arasan_cf_dev *acdev)
0227 {
0228     struct device *dev = acdev->host->dev;
0229 
0230     dev_dbg(dev, ": =========== REGISTER DUMP ===========");
0231     dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
0232     dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
0233     dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
0234     dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
0235     dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
0236     dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
0237     dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
0238     dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
0239     dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
0240     dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
0241     dev_dbg(dev, ": =====================================");
0242 }
0243 
0244 /* Enable/Disable global interrupts shared between CF and XD ctrlr. */
0245 static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
0246 {
0247     /* enable should be 0 or 1 */
0248     writel(enable, acdev->vbase + GIRQ_STS_EN);
0249     writel(enable, acdev->vbase + GIRQ_SGN_EN);
0250 }
0251 
0252 /* Enable/Disable CF interrupts */
0253 static inline void
0254 cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
0255 {
0256     u32 val = readl(acdev->vbase + IRQ_EN);
0257     /* clear & enable/disable irqs */
0258     if (enable) {
0259         writel(mask, acdev->vbase + IRQ_STS);
0260         writel(val | mask, acdev->vbase + IRQ_EN);
0261     } else
0262         writel(val & ~mask, acdev->vbase + IRQ_EN);
0263 }
0264 
0265 static inline void cf_card_reset(struct arasan_cf_dev *acdev)
0266 {
0267     u32 val = readl(acdev->vbase + OP_MODE);
0268 
0269     writel(val | CARD_RESET, acdev->vbase + OP_MODE);
0270     udelay(200);
0271     writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
0272 }
0273 
0274 static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
0275 {
0276     writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
0277             acdev->vbase + OP_MODE);
0278     writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
0279             acdev->vbase + OP_MODE);
0280 }
0281 
0282 static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
0283 {
0284     struct ata_port *ap = acdev->host->ports[0];
0285     struct ata_eh_info *ehi = &ap->link.eh_info;
0286     u32 val = readl(acdev->vbase + CFI_STS);
0287 
0288     /* Both CD1 & CD2 should be low if card inserted completely */
0289     if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
0290         if (acdev->card_present)
0291             return;
0292         acdev->card_present = 1;
0293         cf_card_reset(acdev);
0294     } else {
0295         if (!acdev->card_present)
0296             return;
0297         acdev->card_present = 0;
0298     }
0299 
0300     if (hotplugged) {
0301         ata_ehi_hotplugged(ehi);
0302         ata_port_freeze(ap);
0303     }
0304 }
0305 
0306 static int cf_init(struct arasan_cf_dev *acdev)
0307 {
0308     struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
0309     unsigned int if_clk;
0310     unsigned long flags;
0311     int ret = 0;
0312 
0313     ret = clk_prepare_enable(acdev->clk);
0314     if (ret) {
0315         dev_dbg(acdev->host->dev, "clock enable failed");
0316         return ret;
0317     }
0318 
0319     ret = clk_set_rate(acdev->clk, 166000000);
0320     if (ret) {
0321         dev_warn(acdev->host->dev, "clock set rate failed");
0322         clk_disable_unprepare(acdev->clk);
0323         return ret;
0324     }
0325 
0326     spin_lock_irqsave(&acdev->host->lock, flags);
0327     /* configure CF interface clock */
0328     /* TODO: read from device tree */
0329     if_clk = CF_IF_CLK_166M;
0330     if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M)
0331         if_clk = pdata->cf_if_clk;
0332 
0333     writel(if_clk, acdev->vbase + CLK_CFG);
0334 
0335     writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
0336     cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
0337     cf_ginterrupt_enable(acdev, 1);
0338     spin_unlock_irqrestore(&acdev->host->lock, flags);
0339 
0340     return ret;
0341 }
0342 
0343 static void cf_exit(struct arasan_cf_dev *acdev)
0344 {
0345     unsigned long flags;
0346 
0347     spin_lock_irqsave(&acdev->host->lock, flags);
0348     cf_ginterrupt_enable(acdev, 0);
0349     cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
0350     cf_card_reset(acdev);
0351     writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
0352             acdev->vbase + OP_MODE);
0353     spin_unlock_irqrestore(&acdev->host->lock, flags);
0354     clk_disable_unprepare(acdev->clk);
0355 }
0356 
0357 static void dma_callback(void *dev)
0358 {
0359     struct arasan_cf_dev *acdev = dev;
0360 
0361     complete(&acdev->dma_completion);
0362 }
0363 
0364 static inline void dma_complete(struct arasan_cf_dev *acdev)
0365 {
0366     struct ata_queued_cmd *qc = acdev->qc;
0367     unsigned long flags;
0368 
0369     acdev->qc = NULL;
0370     ata_sff_interrupt(acdev->irq, acdev->host);
0371 
0372     spin_lock_irqsave(&acdev->host->lock, flags);
0373     if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
0374         ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
0375     spin_unlock_irqrestore(&acdev->host->lock, flags);
0376 }
0377 
0378 static inline int wait4buf(struct arasan_cf_dev *acdev)
0379 {
0380     if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
0381         u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
0382 
0383         dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
0384         return -ETIMEDOUT;
0385     }
0386 
0387     /* Check if PIO Error interrupt has occurred */
0388     if (acdev->dma_status & ATA_DMA_ERR)
0389         return -EAGAIN;
0390 
0391     return 0;
0392 }
0393 
0394 static int
0395 dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
0396 {
0397     struct dma_async_tx_descriptor *tx;
0398     struct dma_chan *chan = acdev->dma_chan;
0399     dma_cookie_t cookie;
0400     unsigned long flags = DMA_PREP_INTERRUPT;
0401     int ret = 0;
0402 
0403     tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
0404     if (!tx) {
0405         dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
0406         return -EAGAIN;
0407     }
0408 
0409     tx->callback = dma_callback;
0410     tx->callback_param = acdev;
0411     cookie = tx->tx_submit(tx);
0412 
0413     ret = dma_submit_error(cookie);
0414     if (ret) {
0415         dev_err(acdev->host->dev, "dma_submit_error\n");
0416         return ret;
0417     }
0418 
0419     chan->device->device_issue_pending(chan);
0420 
0421     /* Wait for DMA to complete */
0422     if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
0423         dmaengine_terminate_all(chan);
0424         dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
0425         return -ETIMEDOUT;
0426     }
0427 
0428     return ret;
0429 }
0430 
0431 static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
0432 {
0433     dma_addr_t dest = 0, src = 0;
0434     u32 xfer_cnt, sglen, dma_len, xfer_ctr;
0435     u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
0436     unsigned long flags;
0437     int ret = 0;
0438 
0439     sglen = sg_dma_len(sg);
0440     if (write) {
0441         src = sg_dma_address(sg);
0442         dest = acdev->pbase + EXT_WRITE_PORT;
0443     } else {
0444         dest = sg_dma_address(sg);
0445         src = acdev->pbase + EXT_READ_PORT;
0446     }
0447 
0448     /*
0449      * For each sg:
0450      * MAX_XFER_COUNT data will be transferred before we get transfer
0451      * complete interrupt. Between after FIFO_SIZE data
0452      * buffer available interrupt will be generated. At this time we will
0453      * fill FIFO again: max FIFO_SIZE data.
0454      */
0455     while (sglen) {
0456         xfer_cnt = min(sglen, MAX_XFER_COUNT);
0457         spin_lock_irqsave(&acdev->host->lock, flags);
0458         xfer_ctr = readl(acdev->vbase + XFER_CTR) &
0459             ~XFER_COUNT_MASK;
0460         writel(xfer_ctr | xfer_cnt | XFER_START,
0461                 acdev->vbase + XFER_CTR);
0462         spin_unlock_irqrestore(&acdev->host->lock, flags);
0463 
0464         /* continue dma xfers until current sg is completed */
0465         while (xfer_cnt) {
0466             /* wait for read to complete */
0467             if (!write) {
0468                 ret = wait4buf(acdev);
0469                 if (ret)
0470                     goto fail;
0471             }
0472 
0473             /* read/write FIFO in chunk of FIFO_SIZE */
0474             dma_len = min(xfer_cnt, FIFO_SIZE);
0475             ret = dma_xfer(acdev, src, dest, dma_len);
0476             if (ret) {
0477                 dev_err(acdev->host->dev, "dma failed");
0478                 goto fail;
0479             }
0480 
0481             if (write)
0482                 src += dma_len;
0483             else
0484                 dest += dma_len;
0485 
0486             sglen -= dma_len;
0487             xfer_cnt -= dma_len;
0488 
0489             /* wait for write to complete */
0490             if (write) {
0491                 ret = wait4buf(acdev);
0492                 if (ret)
0493                     goto fail;
0494             }
0495         }
0496     }
0497 
0498 fail:
0499     spin_lock_irqsave(&acdev->host->lock, flags);
0500     writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
0501             acdev->vbase + XFER_CTR);
0502     spin_unlock_irqrestore(&acdev->host->lock, flags);
0503 
0504     return ret;
0505 }
0506 
0507 /*
0508  * This routine uses External DMA controller to read/write data to FIFO of CF
0509  * controller. There are two xfer related interrupt supported by CF controller:
0510  * - buf_avail: This interrupt is generated as soon as we have buffer of 512
0511  *  bytes available for reading or empty buffer available for writing.
0512  * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
0513  *  data to/from FIFO. xfer_size is programmed in XFER_CTR register.
0514  *
0515  * Max buffer size = FIFO_SIZE = 512 Bytes.
0516  * Max xfer_size = MAX_XFER_COUNT = 256 KB.
0517  */
0518 static void data_xfer(struct work_struct *work)
0519 {
0520     struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
0521             work);
0522     struct ata_queued_cmd *qc = acdev->qc;
0523     struct scatterlist *sg;
0524     unsigned long flags;
0525     u32 temp;
0526     int ret = 0;
0527 
0528     /* request dma channels */
0529     /* dma_request_channel may sleep, so calling from process context */
0530     acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
0531     if (IS_ERR(acdev->dma_chan)) {
0532         dev_err(acdev->host->dev, "Unable to get dma_chan\n");
0533         acdev->dma_chan = NULL;
0534         goto chan_request_fail;
0535     }
0536 
0537     for_each_sg(qc->sg, sg, qc->n_elem, temp) {
0538         ret = sg_xfer(acdev, sg);
0539         if (ret)
0540             break;
0541     }
0542 
0543     dma_release_channel(acdev->dma_chan);
0544     acdev->dma_chan = NULL;
0545 
0546     /* data xferred successfully */
0547     if (!ret) {
0548         u32 status;
0549 
0550         spin_lock_irqsave(&acdev->host->lock, flags);
0551         status = ioread8(qc->ap->ioaddr.altstatus_addr);
0552         spin_unlock_irqrestore(&acdev->host->lock, flags);
0553         if (status & (ATA_BUSY | ATA_DRQ)) {
0554             ata_sff_queue_delayed_work(&acdev->dwork, 1);
0555             return;
0556         }
0557 
0558         goto sff_intr;
0559     }
0560 
0561     cf_dumpregs(acdev);
0562 
0563 chan_request_fail:
0564     spin_lock_irqsave(&acdev->host->lock, flags);
0565     /* error when transferring data to/from memory */
0566     qc->err_mask |= AC_ERR_HOST_BUS;
0567     qc->ap->hsm_task_state = HSM_ST_ERR;
0568 
0569     cf_ctrl_reset(acdev);
0570     spin_unlock_irqrestore(&acdev->host->lock, flags);
0571 sff_intr:
0572     dma_complete(acdev);
0573 }
0574 
0575 static void delayed_finish(struct work_struct *work)
0576 {
0577     struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
0578             dwork.work);
0579     struct ata_queued_cmd *qc = acdev->qc;
0580     unsigned long flags;
0581     u8 status;
0582 
0583     spin_lock_irqsave(&acdev->host->lock, flags);
0584     status = ioread8(qc->ap->ioaddr.altstatus_addr);
0585     spin_unlock_irqrestore(&acdev->host->lock, flags);
0586 
0587     if (status & (ATA_BUSY | ATA_DRQ))
0588         ata_sff_queue_delayed_work(&acdev->dwork, 1);
0589     else
0590         dma_complete(acdev);
0591 }
0592 
0593 static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
0594 {
0595     struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
0596     unsigned long flags;
0597     u32 irqsts;
0598 
0599     irqsts = readl(acdev->vbase + GIRQ_STS);
0600     if (!(irqsts & GIRQ_CF))
0601         return IRQ_NONE;
0602 
0603     spin_lock_irqsave(&acdev->host->lock, flags);
0604     irqsts = readl(acdev->vbase + IRQ_STS);
0605     writel(irqsts, acdev->vbase + IRQ_STS);     /* clear irqs */
0606     writel(GIRQ_CF, acdev->vbase + GIRQ_STS);   /* clear girqs */
0607 
0608     /* handle only relevant interrupts */
0609     irqsts &= ~IGNORED_IRQS;
0610 
0611     if (irqsts & CARD_DETECT_IRQ) {
0612         cf_card_detect(acdev, 1);
0613         spin_unlock_irqrestore(&acdev->host->lock, flags);
0614         return IRQ_HANDLED;
0615     }
0616 
0617     if (irqsts & PIO_XFER_ERR_IRQ) {
0618         acdev->dma_status = ATA_DMA_ERR;
0619         writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
0620                 acdev->vbase + XFER_CTR);
0621         spin_unlock_irqrestore(&acdev->host->lock, flags);
0622         complete(&acdev->cf_completion);
0623         dev_err(acdev->host->dev, "pio xfer err irq\n");
0624         return IRQ_HANDLED;
0625     }
0626 
0627     spin_unlock_irqrestore(&acdev->host->lock, flags);
0628 
0629     if (irqsts & BUF_AVAIL_IRQ) {
0630         complete(&acdev->cf_completion);
0631         return IRQ_HANDLED;
0632     }
0633 
0634     if (irqsts & XFER_DONE_IRQ) {
0635         struct ata_queued_cmd *qc = acdev->qc;
0636 
0637         /* Send Complete only for write */
0638         if (qc->tf.flags & ATA_TFLAG_WRITE)
0639             complete(&acdev->cf_completion);
0640     }
0641 
0642     return IRQ_HANDLED;
0643 }
0644 
0645 static void arasan_cf_freeze(struct ata_port *ap)
0646 {
0647     struct arasan_cf_dev *acdev = ap->host->private_data;
0648 
0649     /* stop transfer and reset controller */
0650     writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
0651             acdev->vbase + XFER_CTR);
0652     cf_ctrl_reset(acdev);
0653     acdev->dma_status = ATA_DMA_ERR;
0654 
0655     ata_sff_dma_pause(ap);
0656     ata_sff_freeze(ap);
0657 }
0658 
0659 static void arasan_cf_error_handler(struct ata_port *ap)
0660 {
0661     struct arasan_cf_dev *acdev = ap->host->private_data;
0662 
0663     /*
0664      * DMA transfers using an external DMA controller may be scheduled.
0665      * Abort them before handling error. Refer data_xfer() for further
0666      * details.
0667      */
0668     cancel_work_sync(&acdev->work);
0669     cancel_delayed_work_sync(&acdev->dwork);
0670     return ata_sff_error_handler(ap);
0671 }
0672 
0673 static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
0674 {
0675     struct ata_queued_cmd *qc = acdev->qc;
0676     struct ata_port *ap = qc->ap;
0677     struct ata_taskfile *tf = &qc->tf;
0678     u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
0679     u32 write = tf->flags & ATA_TFLAG_WRITE;
0680 
0681     xfer_ctr |= write ? XFER_WRITE : XFER_READ;
0682     writel(xfer_ctr, acdev->vbase + XFER_CTR);
0683 
0684     ap->ops->sff_exec_command(ap, tf);
0685     ata_sff_queue_work(&acdev->work);
0686 }
0687 
0688 static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
0689 {
0690     struct ata_port *ap = qc->ap;
0691     struct arasan_cf_dev *acdev = ap->host->private_data;
0692 
0693     /* defer PIO handling to sff_qc_issue */
0694     if (!ata_is_dma(qc->tf.protocol))
0695         return ata_sff_qc_issue(qc);
0696 
0697     /* select the device */
0698     ata_wait_idle(ap);
0699     ata_sff_dev_select(ap, qc->dev->devno);
0700     ata_wait_idle(ap);
0701 
0702     /* start the command */
0703     switch (qc->tf.protocol) {
0704     case ATA_PROT_DMA:
0705         WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
0706 
0707         trace_ata_tf_load(ap, &qc->tf);
0708         ap->ops->sff_tf_load(ap, &qc->tf);
0709         acdev->dma_status = 0;
0710         acdev->qc = qc;
0711         trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
0712         arasan_cf_dma_start(acdev);
0713         ap->hsm_task_state = HSM_ST_LAST;
0714         break;
0715 
0716     default:
0717         WARN_ON(1);
0718         return AC_ERR_SYSTEM;
0719     }
0720 
0721     return 0;
0722 }
0723 
0724 static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
0725 {
0726     struct arasan_cf_dev *acdev = ap->host->private_data;
0727     u8 pio = adev->pio_mode - XFER_PIO_0;
0728     unsigned long flags;
0729     u32 val;
0730 
0731     /* Arasan ctrl supports Mode0 -> Mode6 */
0732     if (pio > 6) {
0733         dev_err(ap->dev, "Unknown PIO mode\n");
0734         return;
0735     }
0736 
0737     spin_lock_irqsave(&acdev->host->lock, flags);
0738     val = readl(acdev->vbase + OP_MODE) &
0739         ~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
0740     writel(val, acdev->vbase + OP_MODE);
0741     val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
0742     val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
0743     writel(val, acdev->vbase + TM_CFG);
0744 
0745     cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
0746     cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
0747     spin_unlock_irqrestore(&acdev->host->lock, flags);
0748 }
0749 
0750 static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
0751 {
0752     struct arasan_cf_dev *acdev = ap->host->private_data;
0753     u32 opmode, tmcfg, dma_mode = adev->dma_mode;
0754     unsigned long flags;
0755 
0756     spin_lock_irqsave(&acdev->host->lock, flags);
0757     opmode = readl(acdev->vbase + OP_MODE) &
0758         ~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
0759     tmcfg = readl(acdev->vbase + TM_CFG);
0760 
0761     if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
0762         opmode |= ULTRA_DMA_ENB;
0763         tmcfg &= ~ULTRA_DMA_TIMING_MASK;
0764         tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
0765     } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
0766         opmode |= MULTI_WORD_DMA_ENB;
0767         tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
0768         tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
0769             TRUEIDE_MWORD_DMA_TIMING_SHIFT;
0770     } else {
0771         dev_err(ap->dev, "Unknown DMA mode\n");
0772         spin_unlock_irqrestore(&acdev->host->lock, flags);
0773         return;
0774     }
0775 
0776     writel(opmode, acdev->vbase + OP_MODE);
0777     writel(tmcfg, acdev->vbase + TM_CFG);
0778     writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
0779 
0780     cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
0781     cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
0782     spin_unlock_irqrestore(&acdev->host->lock, flags);
0783 }
0784 
0785 static struct ata_port_operations arasan_cf_ops = {
0786     .inherits = &ata_sff_port_ops,
0787     .freeze = arasan_cf_freeze,
0788     .error_handler = arasan_cf_error_handler,
0789     .qc_issue = arasan_cf_qc_issue,
0790     .set_piomode = arasan_cf_set_piomode,
0791     .set_dmamode = arasan_cf_set_dmamode,
0792 };
0793 
0794 static int arasan_cf_probe(struct platform_device *pdev)
0795 {
0796     struct arasan_cf_dev *acdev;
0797     struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
0798     struct ata_host *host;
0799     struct ata_port *ap;
0800     struct resource *res;
0801     u32 quirk;
0802     irq_handler_t irq_handler = NULL;
0803     int ret;
0804 
0805     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0806     if (!res)
0807         return -EINVAL;
0808 
0809     if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
0810                 DRIVER_NAME)) {
0811         dev_warn(&pdev->dev, "Failed to get memory region resource\n");
0812         return -ENOENT;
0813     }
0814 
0815     acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
0816     if (!acdev)
0817         return -ENOMEM;
0818 
0819     if (pdata)
0820         quirk = pdata->quirk;
0821     else
0822         quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
0823 
0824     /*
0825      * If there's an error getting IRQ (or we do get IRQ0),
0826      * support only PIO
0827      */
0828     ret = platform_get_irq(pdev, 0);
0829     if (ret > 0) {
0830         acdev->irq = ret;
0831         irq_handler = arasan_cf_interrupt;
0832     } else  if (ret == -EPROBE_DEFER) {
0833         return ret;
0834     } else  {
0835         quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
0836     }
0837 
0838     acdev->pbase = res->start;
0839     acdev->vbase = devm_ioremap(&pdev->dev, res->start,
0840             resource_size(res));
0841     if (!acdev->vbase) {
0842         dev_warn(&pdev->dev, "ioremap fail\n");
0843         return -ENOMEM;
0844     }
0845 
0846     acdev->clk = devm_clk_get(&pdev->dev, NULL);
0847     if (IS_ERR(acdev->clk)) {
0848         dev_warn(&pdev->dev, "Clock not found\n");
0849         return PTR_ERR(acdev->clk);
0850     }
0851 
0852     /* allocate host */
0853     host = ata_host_alloc(&pdev->dev, 1);
0854     if (!host) {
0855         dev_warn(&pdev->dev, "alloc host fail\n");
0856         return -ENOMEM;
0857     }
0858 
0859     ap = host->ports[0];
0860     host->private_data = acdev;
0861     acdev->host = host;
0862     ap->ops = &arasan_cf_ops;
0863     ap->pio_mask = ATA_PIO6;
0864     ap->mwdma_mask = ATA_MWDMA4;
0865     ap->udma_mask = ATA_UDMA6;
0866 
0867     init_completion(&acdev->cf_completion);
0868     init_completion(&acdev->dma_completion);
0869     INIT_WORK(&acdev->work, data_xfer);
0870     INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
0871     dma_cap_set(DMA_MEMCPY, acdev->mask);
0872 
0873     /* Handle platform specific quirks */
0874     if (quirk) {
0875         if (quirk & CF_BROKEN_PIO) {
0876             ap->ops->set_piomode = NULL;
0877             ap->pio_mask = 0;
0878         }
0879         if (quirk & CF_BROKEN_MWDMA)
0880             ap->mwdma_mask = 0;
0881         if (quirk & CF_BROKEN_UDMA)
0882             ap->udma_mask = 0;
0883     }
0884     ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
0885 
0886     ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
0887     ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
0888     ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
0889     ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
0890     ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
0891     ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
0892     ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
0893     ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
0894     ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
0895     ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
0896     ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
0897     ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
0898     ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
0899 
0900     ata_port_desc(ap, "phy_addr %llx virt_addr %p",
0901               (unsigned long long) res->start, acdev->vbase);
0902 
0903     ret = cf_init(acdev);
0904     if (ret)
0905         return ret;
0906 
0907     cf_card_detect(acdev, 0);
0908 
0909     ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
0910                 &arasan_cf_sht);
0911     if (!ret)
0912         return 0;
0913 
0914     cf_exit(acdev);
0915 
0916     return ret;
0917 }
0918 
0919 static int arasan_cf_remove(struct platform_device *pdev)
0920 {
0921     struct ata_host *host = platform_get_drvdata(pdev);
0922     struct arasan_cf_dev *acdev = host->ports[0]->private_data;
0923 
0924     ata_host_detach(host);
0925     cf_exit(acdev);
0926 
0927     return 0;
0928 }
0929 
0930 #ifdef CONFIG_PM_SLEEP
0931 static int arasan_cf_suspend(struct device *dev)
0932 {
0933     struct ata_host *host = dev_get_drvdata(dev);
0934     struct arasan_cf_dev *acdev = host->ports[0]->private_data;
0935 
0936     if (acdev->dma_chan)
0937         dmaengine_terminate_all(acdev->dma_chan);
0938 
0939     cf_exit(acdev);
0940     ata_host_suspend(host, PMSG_SUSPEND);
0941     return 0;
0942 }
0943 
0944 static int arasan_cf_resume(struct device *dev)
0945 {
0946     struct ata_host *host = dev_get_drvdata(dev);
0947     struct arasan_cf_dev *acdev = host->ports[0]->private_data;
0948 
0949     cf_init(acdev);
0950     ata_host_resume(host);
0951 
0952     return 0;
0953 }
0954 #endif
0955 
0956 static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
0957 
0958 #ifdef CONFIG_OF
0959 static const struct of_device_id arasan_cf_id_table[] = {
0960     { .compatible = "arasan,cf-spear1340" },
0961     {}
0962 };
0963 MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
0964 #endif
0965 
0966 static struct platform_driver arasan_cf_driver = {
0967     .probe      = arasan_cf_probe,
0968     .remove     = arasan_cf_remove,
0969     .driver     = {
0970         .name   = DRIVER_NAME,
0971         .pm = &arasan_cf_pm_ops,
0972         .of_match_table = of_match_ptr(arasan_cf_id_table),
0973     },
0974 };
0975 
0976 module_platform_driver(arasan_cf_driver);
0977 
0978 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
0979 MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
0980 MODULE_LICENSE("GPL");
0981 MODULE_ALIAS("platform:" DRIVER_NAME);