0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/mman.h>
0010 #include <linux/init.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/io.h>
0014
0015 #include <asm/page.h>
0016 #include <asm/dma.h>
0017 #include <asm/fiq.h>
0018 #include <asm/irq.h>
0019 #include <mach/hardware.h>
0020 #include <linux/uaccess.h>
0021
0022 #include <asm/mach/dma.h>
0023 #include <asm/hardware/iomd.h>
0024
0025 struct iomd_dma {
0026 struct dma_struct dma;
0027 void __iomem *base;
0028 int irq;
0029 unsigned int state;
0030 dma_addr_t cur_addr;
0031 unsigned int cur_len;
0032 dma_addr_t dma_addr;
0033 unsigned int dma_len;
0034 };
0035
0036 #if 0
0037 typedef enum {
0038 dma_size_8 = 1,
0039 dma_size_16 = 2,
0040 dma_size_32 = 4,
0041 dma_size_128 = 16
0042 } dma_size_t;
0043 #endif
0044
0045 #define TRANSFER_SIZE 2
0046
0047 #define CURA (0)
0048 #define ENDA (IOMD_IO0ENDA - IOMD_IO0CURA)
0049 #define CURB (IOMD_IO0CURB - IOMD_IO0CURA)
0050 #define ENDB (IOMD_IO0ENDB - IOMD_IO0CURA)
0051 #define CR (IOMD_IO0CR - IOMD_IO0CURA)
0052 #define ST (IOMD_IO0ST - IOMD_IO0CURA)
0053
0054 static void iomd_get_next_sg(struct iomd_dma *idma)
0055 {
0056 unsigned long end, offset, flags = 0;
0057
0058 if (idma->dma.sg) {
0059 idma->cur_addr = idma->dma_addr;
0060 offset = idma->cur_addr & ~PAGE_MASK;
0061
0062 end = offset + idma->dma_len;
0063
0064 if (end > PAGE_SIZE)
0065 end = PAGE_SIZE;
0066
0067 if (offset + TRANSFER_SIZE >= end)
0068 flags |= DMA_END_L;
0069
0070 idma->cur_len = end - TRANSFER_SIZE;
0071
0072 idma->dma_len -= end - offset;
0073 idma->dma_addr += end - offset;
0074
0075 if (idma->dma_len == 0) {
0076 if (idma->dma.sgcount > 1) {
0077 idma->dma.sg = sg_next(idma->dma.sg);
0078 idma->dma_addr = idma->dma.sg->dma_address;
0079 idma->dma_len = idma->dma.sg->length;
0080 idma->dma.sgcount--;
0081 } else {
0082 idma->dma.sg = NULL;
0083 flags |= DMA_END_S;
0084 }
0085 }
0086 } else {
0087 flags = DMA_END_S | DMA_END_L;
0088 idma->cur_addr = 0;
0089 idma->cur_len = 0;
0090 }
0091
0092 idma->cur_len |= flags;
0093 }
0094
0095 static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
0096 {
0097 struct iomd_dma *idma = dev_id;
0098 void __iomem *base = idma->base;
0099 unsigned int state = idma->state;
0100 unsigned int status, cur, end;
0101
0102 do {
0103 status = readb(base + ST);
0104 if (!(status & DMA_ST_INT))
0105 goto out;
0106
0107 if ((state ^ status) & DMA_ST_AB)
0108 iomd_get_next_sg(idma);
0109
0110
0111 state = ((status >> 2) ^ status) & DMA_ST_AB;
0112 if (state) {
0113 cur = CURA;
0114 end = ENDA;
0115 } else {
0116 cur = CURB;
0117 end = ENDB;
0118 }
0119 writel(idma->cur_addr, base + cur);
0120 writel(idma->cur_len, base + end);
0121
0122 if (status & DMA_ST_OFL &&
0123 idma->cur_len == (DMA_END_S|DMA_END_L))
0124 break;
0125 } while (1);
0126
0127 state = ~DMA_ST_AB;
0128 disable_irq_nosync(irq);
0129 out:
0130 idma->state = state;
0131 return IRQ_HANDLED;
0132 }
0133
0134 static int iomd_request_dma(unsigned int chan, dma_t *dma)
0135 {
0136 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
0137
0138 return request_irq(idma->irq, iomd_dma_handle,
0139 0, idma->dma.device_id, idma);
0140 }
0141
0142 static void iomd_free_dma(unsigned int chan, dma_t *dma)
0143 {
0144 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
0145
0146 free_irq(idma->irq, idma);
0147 }
0148
0149 static struct device isa_dma_dev = {
0150 .init_name = "fallback device",
0151 .coherent_dma_mask = ~(dma_addr_t)0,
0152 .dma_mask = &isa_dma_dev.coherent_dma_mask,
0153 };
0154
0155 static void iomd_enable_dma(unsigned int chan, dma_t *dma)
0156 {
0157 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
0158 void __iomem *base = idma->base;
0159 unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
0160
0161 if (idma->dma.invalid) {
0162 idma->dma.invalid = 0;
0163
0164
0165
0166
0167
0168 if (!idma->dma.sg) {
0169 idma->dma.sg = &idma->dma.buf;
0170 idma->dma.sgcount = 1;
0171 idma->dma.buf.length = idma->dma.count;
0172 idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev,
0173 idma->dma.addr, idma->dma.count,
0174 idma->dma.dma_mode == DMA_MODE_READ ?
0175 DMA_FROM_DEVICE : DMA_TO_DEVICE);
0176 }
0177
0178 idma->dma_addr = idma->dma.sg->dma_address;
0179 idma->dma_len = idma->dma.sg->length;
0180
0181 writeb(DMA_CR_C, base + CR);
0182 idma->state = DMA_ST_AB;
0183 }
0184
0185 if (idma->dma.dma_mode == DMA_MODE_READ)
0186 ctrl |= DMA_CR_D;
0187
0188 writeb(ctrl, base + CR);
0189 enable_irq(idma->irq);
0190 }
0191
0192 static void iomd_disable_dma(unsigned int chan, dma_t *dma)
0193 {
0194 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
0195 void __iomem *base = idma->base;
0196 unsigned long flags;
0197
0198 local_irq_save(flags);
0199 if (idma->state != ~DMA_ST_AB)
0200 disable_irq(idma->irq);
0201 writeb(0, base + CR);
0202 local_irq_restore(flags);
0203 }
0204
0205 static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
0206 {
0207 int tcr, speed;
0208
0209 if (cycle < 188)
0210 speed = 3;
0211 else if (cycle <= 250)
0212 speed = 2;
0213 else if (cycle < 438)
0214 speed = 1;
0215 else
0216 speed = 0;
0217
0218 tcr = iomd_readb(IOMD_DMATCR);
0219 speed &= 3;
0220
0221 switch (chan) {
0222 case DMA_0:
0223 tcr = (tcr & ~0x03) | speed;
0224 break;
0225
0226 case DMA_1:
0227 tcr = (tcr & ~0x0c) | (speed << 2);
0228 break;
0229
0230 case DMA_2:
0231 tcr = (tcr & ~0x30) | (speed << 4);
0232 break;
0233
0234 case DMA_3:
0235 tcr = (tcr & ~0xc0) | (speed << 6);
0236 break;
0237
0238 default:
0239 break;
0240 }
0241
0242 iomd_writeb(tcr, IOMD_DMATCR);
0243
0244 return speed;
0245 }
0246
0247 static struct dma_ops iomd_dma_ops = {
0248 .type = "IOMD",
0249 .request = iomd_request_dma,
0250 .free = iomd_free_dma,
0251 .enable = iomd_enable_dma,
0252 .disable = iomd_disable_dma,
0253 .setspeed = iomd_set_dma_speed,
0254 };
0255
0256 static struct fiq_handler fh = {
0257 .name = "floppydma"
0258 };
0259
0260 struct floppy_dma {
0261 struct dma_struct dma;
0262 unsigned int fiq;
0263 };
0264
0265 static void floppy_enable_dma(unsigned int chan, dma_t *dma)
0266 {
0267 struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
0268 void *fiqhandler_start;
0269 unsigned int fiqhandler_length;
0270 struct pt_regs regs;
0271
0272 if (fdma->dma.sg)
0273 BUG();
0274
0275 if (fdma->dma.dma_mode == DMA_MODE_READ) {
0276 extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
0277 fiqhandler_start = &floppy_fiqin_start;
0278 fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
0279 } else {
0280 extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
0281 fiqhandler_start = &floppy_fiqout_start;
0282 fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
0283 }
0284
0285 regs.ARM_r9 = fdma->dma.count;
0286 regs.ARM_r10 = (unsigned long)fdma->dma.addr;
0287 regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE;
0288
0289 if (claim_fiq(&fh)) {
0290 printk("floppydma: couldn't claim FIQ.\n");
0291 return;
0292 }
0293
0294 set_fiq_handler(fiqhandler_start, fiqhandler_length);
0295 set_fiq_regs(®s);
0296 enable_fiq(fdma->fiq);
0297 }
0298
0299 static void floppy_disable_dma(unsigned int chan, dma_t *dma)
0300 {
0301 struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
0302 disable_fiq(fdma->fiq);
0303 release_fiq(&fh);
0304 }
0305
0306 static int floppy_get_residue(unsigned int chan, dma_t *dma)
0307 {
0308 struct pt_regs regs;
0309 get_fiq_regs(®s);
0310 return regs.ARM_r9;
0311 }
0312
0313 static struct dma_ops floppy_dma_ops = {
0314 .type = "FIQDMA",
0315 .enable = floppy_enable_dma,
0316 .disable = floppy_disable_dma,
0317 .residue = floppy_get_residue,
0318 };
0319
0320
0321
0322
0323 static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
0324 {
0325 }
0326
0327 static struct dma_ops sound_dma_ops = {
0328 .type = "VIRTUAL",
0329 .enable = sound_enable_disable_dma,
0330 .disable = sound_enable_disable_dma,
0331 };
0332
0333 static struct iomd_dma iomd_dma[6];
0334
0335 static struct floppy_dma floppy_dma = {
0336 .dma = {
0337 .d_ops = &floppy_dma_ops,
0338 },
0339 .fiq = FIQ_FLOPPYDATA,
0340 };
0341
0342 static dma_t sound_dma = {
0343 .d_ops = &sound_dma_ops,
0344 };
0345
0346 static int __init rpc_dma_init(void)
0347 {
0348 unsigned int i;
0349 int ret;
0350
0351 iomd_writeb(0, IOMD_IO0CR);
0352 iomd_writeb(0, IOMD_IO1CR);
0353 iomd_writeb(0, IOMD_IO2CR);
0354 iomd_writeb(0, IOMD_IO3CR);
0355
0356 iomd_writeb(0xa0, IOMD_DMATCR);
0357
0358
0359
0360
0361
0362 iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
0363
0364 iomd_dma[DMA_0].base = IOMD_BASE + IOMD_IO0CURA;
0365 iomd_dma[DMA_0].irq = IRQ_DMA0;
0366 iomd_dma[DMA_1].base = IOMD_BASE + IOMD_IO1CURA;
0367 iomd_dma[DMA_1].irq = IRQ_DMA1;
0368 iomd_dma[DMA_2].base = IOMD_BASE + IOMD_IO2CURA;
0369 iomd_dma[DMA_2].irq = IRQ_DMA2;
0370 iomd_dma[DMA_3].base = IOMD_BASE + IOMD_IO3CURA;
0371 iomd_dma[DMA_3].irq = IRQ_DMA3;
0372 iomd_dma[DMA_S0].base = IOMD_BASE + IOMD_SD0CURA;
0373 iomd_dma[DMA_S0].irq = IRQ_DMAS0;
0374 iomd_dma[DMA_S1].base = IOMD_BASE + IOMD_SD1CURA;
0375 iomd_dma[DMA_S1].irq = IRQ_DMAS1;
0376
0377 for (i = DMA_0; i <= DMA_S1; i++) {
0378 iomd_dma[i].dma.d_ops = &iomd_dma_ops;
0379
0380 ret = isa_dma_add(i, &iomd_dma[i].dma);
0381 if (ret)
0382 printk("IOMDDMA%u: unable to register: %d\n", i, ret);
0383 }
0384
0385 ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
0386 if (ret)
0387 printk("IOMDFLOPPY: unable to register: %d\n", ret);
0388 ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
0389 if (ret)
0390 printk("IOMDSOUND: unable to register: %d\n", ret);
0391 return 0;
0392 }
0393 core_initcall(rpc_dma_init);