Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Mips Jazz DMA controller support
0004  * Copyright (C) 1995, 1996 by Andreas Busse
0005  *
0006  * NOTE: Some of the argument checking could be removed when
0007  * things have settled down. Also, instead of returning 0xffffffff
0008  * on failure of vdma_alloc() one could leave page #0 unused
0009  * and return the more usual NULL pointer as logical address.
0010  */
0011 #include <linux/kernel.h>
0012 #include <linux/init.h>
0013 #include <linux/export.h>
0014 #include <linux/errno.h>
0015 #include <linux/mm.h>
0016 #include <linux/memblock.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/gfp.h>
0019 #include <linux/dma-map-ops.h>
0020 #include <asm/mipsregs.h>
0021 #include <asm/jazz.h>
0022 #include <asm/io.h>
0023 #include <linux/uaccess.h>
0024 #include <asm/dma.h>
0025 #include <asm/jazzdma.h>
0026 
0027 /*
0028  * Set this to one to enable additional vdma debug code.
0029  */
0030 #define CONF_DEBUG_VDMA 0
0031 
0032 static VDMA_PGTBL_ENTRY *pgtbl;
0033 
0034 static DEFINE_SPINLOCK(vdma_lock);
0035 
0036 /*
0037  * Debug stuff
0038  */
0039 #define vdma_debug     ((CONF_DEBUG_VDMA) ? debuglvl : 0)
0040 
0041 static int debuglvl = 3;
0042 
0043 /*
0044  * Initialize the pagetable with a one-to-one mapping of
0045  * the first 16 Mbytes of main memory and declare all
0046  * entries to be unused. Using this method will at least
0047  * allow some early device driver operations to work.
0048  */
0049 static inline void vdma_pgtbl_init(void)
0050 {
0051     unsigned long paddr = 0;
0052     int i;
0053 
0054     for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
0055         pgtbl[i].frame = paddr;
0056         pgtbl[i].owner = VDMA_PAGE_EMPTY;
0057         paddr += VDMA_PAGESIZE;
0058     }
0059 }
0060 
0061 /*
0062  * Initialize the Jazz R4030 dma controller
0063  */
0064 static int __init vdma_init(void)
0065 {
0066     /*
0067      * Allocate 32k of memory for DMA page tables.  This needs to be page
0068      * aligned and should be uncached to avoid cache flushing after every
0069      * update.
0070      */
0071     pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
0072                             get_order(VDMA_PGTBL_SIZE));
0073     BUG_ON(!pgtbl);
0074     dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
0075     pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
0076 
0077     /*
0078      * Clear the R4030 translation table
0079      */
0080     vdma_pgtbl_init();
0081 
0082     r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
0083               CPHYSADDR((unsigned long)pgtbl));
0084     r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
0085     r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
0086 
0087     printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
0088     return 0;
0089 }
0090 arch_initcall(vdma_init);
0091 
0092 /*
0093  * Allocate DMA pagetables using a simple first-fit algorithm
0094  */
0095 unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
0096 {
0097     int first, last, pages, frame, i;
0098     unsigned long laddr, flags;
0099 
0100     /* check arguments */
0101 
0102     if (paddr > 0x1fffffff) {
0103         if (vdma_debug)
0104             printk("vdma_alloc: Invalid physical address: %08lx\n",
0105                    paddr);
0106         return DMA_MAPPING_ERROR;   /* invalid physical address */
0107     }
0108     if (size > 0x400000 || size == 0) {
0109         if (vdma_debug)
0110             printk("vdma_alloc: Invalid size: %08lx\n", size);
0111         return DMA_MAPPING_ERROR;   /* invalid physical address */
0112     }
0113 
0114     spin_lock_irqsave(&vdma_lock, flags);
0115     /*
0116      * Find free chunk
0117      */
0118     pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1;
0119     first = 0;
0120     while (1) {
0121         while (pgtbl[first].owner != VDMA_PAGE_EMPTY &&
0122                first < VDMA_PGTBL_ENTRIES) first++;
0123         if (first + pages > VDMA_PGTBL_ENTRIES) {   /* nothing free */
0124             spin_unlock_irqrestore(&vdma_lock, flags);
0125             return DMA_MAPPING_ERROR;
0126         }
0127 
0128         last = first + 1;
0129         while (pgtbl[last].owner == VDMA_PAGE_EMPTY
0130                && last - first < pages)
0131             last++;
0132 
0133         if (last - first == pages)
0134             break;  /* found */
0135         first = last + 1;
0136     }
0137 
0138     /*
0139      * Mark pages as allocated
0140      */
0141     laddr = (first << 12) + (paddr & (VDMA_PAGESIZE - 1));
0142     frame = paddr & ~(VDMA_PAGESIZE - 1);
0143 
0144     for (i = first; i < last; i++) {
0145         pgtbl[i].frame = frame;
0146         pgtbl[i].owner = laddr;
0147         frame += VDMA_PAGESIZE;
0148     }
0149 
0150     /*
0151      * Update translation table and return logical start address
0152      */
0153     r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
0154 
0155     if (vdma_debug > 1)
0156         printk("vdma_alloc: Allocated %d pages starting from %08lx\n",
0157              pages, laddr);
0158 
0159     if (vdma_debug > 2) {
0160         printk("LADDR: ");
0161         for (i = first; i < last; i++)
0162             printk("%08x ", i << 12);
0163         printk("\nPADDR: ");
0164         for (i = first; i < last; i++)
0165             printk("%08x ", pgtbl[i].frame);
0166         printk("\nOWNER: ");
0167         for (i = first; i < last; i++)
0168             printk("%08x ", pgtbl[i].owner);
0169         printk("\n");
0170     }
0171 
0172     spin_unlock_irqrestore(&vdma_lock, flags);
0173 
0174     return laddr;
0175 }
0176 
0177 EXPORT_SYMBOL(vdma_alloc);
0178 
0179 /*
0180  * Free previously allocated dma translation pages
0181  * Note that this does NOT change the translation table,
0182  * it just marks the free'd pages as unused!
0183  */
0184 int vdma_free(unsigned long laddr)
0185 {
0186     int i;
0187 
0188     i = laddr >> 12;
0189 
0190     if (pgtbl[i].owner != laddr) {
0191         printk
0192             ("vdma_free: trying to free other's dma pages, laddr=%8lx\n",
0193              laddr);
0194         return -1;
0195     }
0196 
0197     while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) {
0198         pgtbl[i].owner = VDMA_PAGE_EMPTY;
0199         i++;
0200     }
0201 
0202     if (vdma_debug > 1)
0203         printk("vdma_free: freed %ld pages starting from %08lx\n",
0204                i - (laddr >> 12), laddr);
0205 
0206     return 0;
0207 }
0208 
0209 EXPORT_SYMBOL(vdma_free);
0210 
0211 /*
0212  * Translate a physical address to a logical address.
0213  * This will return the logical address of the first
0214  * match.
0215  */
0216 unsigned long vdma_phys2log(unsigned long paddr)
0217 {
0218     int i;
0219     int frame;
0220 
0221     frame = paddr & ~(VDMA_PAGESIZE - 1);
0222 
0223     for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
0224         if (pgtbl[i].frame == frame)
0225             break;
0226     }
0227 
0228     if (i == VDMA_PGTBL_ENTRIES)
0229         return ~0UL;
0230 
0231     return (i << 12) + (paddr & (VDMA_PAGESIZE - 1));
0232 }
0233 
0234 EXPORT_SYMBOL(vdma_phys2log);
0235 
0236 /*
0237  * Translate a logical DMA address to a physical address
0238  */
0239 unsigned long vdma_log2phys(unsigned long laddr)
0240 {
0241     return pgtbl[laddr >> 12].frame + (laddr & (VDMA_PAGESIZE - 1));
0242 }
0243 
0244 EXPORT_SYMBOL(vdma_log2phys);
0245 
0246 /*
0247  * Print DMA statistics
0248  */
0249 void vdma_stats(void)
0250 {
0251     int i;
0252 
0253     printk("vdma_stats: CONFIG: %08x\n",
0254            r4030_read_reg32(JAZZ_R4030_CONFIG));
0255     printk("R4030 translation table base: %08x\n",
0256            r4030_read_reg32(JAZZ_R4030_TRSTBL_BASE));
0257     printk("R4030 translation table limit: %08x\n",
0258            r4030_read_reg32(JAZZ_R4030_TRSTBL_LIM));
0259     printk("vdma_stats: INV_ADDR: %08x\n",
0260            r4030_read_reg32(JAZZ_R4030_INV_ADDR));
0261     printk("vdma_stats: R_FAIL_ADDR: %08x\n",
0262            r4030_read_reg32(JAZZ_R4030_R_FAIL_ADDR));
0263     printk("vdma_stats: M_FAIL_ADDR: %08x\n",
0264            r4030_read_reg32(JAZZ_R4030_M_FAIL_ADDR));
0265     printk("vdma_stats: IRQ_SOURCE: %08x\n",
0266            r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE));
0267     printk("vdma_stats: I386_ERROR: %08x\n",
0268            r4030_read_reg32(JAZZ_R4030_I386_ERROR));
0269     printk("vdma_chnl_modes:   ");
0270     for (i = 0; i < 8; i++)
0271         printk("%04x ",
0272                (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
0273                            (i << 5)));
0274     printk("\n");
0275     printk("vdma_chnl_enables: ");
0276     for (i = 0; i < 8; i++)
0277         printk("%04x ",
0278                (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
0279                            (i << 5)));
0280     printk("\n");
0281 }
0282 
0283 /*
0284  * DMA transfer functions
0285  */
0286 
0287 /*
0288  * Enable a DMA channel. Also clear any error conditions.
0289  */
0290 void vdma_enable(int channel)
0291 {
0292     int status;
0293 
0294     if (vdma_debug)
0295         printk("vdma_enable: channel %d\n", channel);
0296 
0297     /*
0298      * Check error conditions first
0299      */
0300     status = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
0301     if (status & 0x400)
0302         printk("VDMA: Channel %d: Address error!\n", channel);
0303     if (status & 0x200)
0304         printk("VDMA: Channel %d: Memory error!\n", channel);
0305 
0306     /*
0307      * Clear all interrupt flags
0308      */
0309     r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
0310               r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
0311                        (channel << 5)) | R4030_TC_INTR
0312               | R4030_MEM_INTR | R4030_ADDR_INTR);
0313 
0314     /*
0315      * Enable the desired channel
0316      */
0317     r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
0318               r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
0319                        (channel << 5)) |
0320               R4030_CHNL_ENABLE);
0321 }
0322 
0323 EXPORT_SYMBOL(vdma_enable);
0324 
0325 /*
0326  * Disable a DMA channel
0327  */
0328 void vdma_disable(int channel)
0329 {
0330     if (vdma_debug) {
0331         int status =
0332             r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
0333                      (channel << 5));
0334 
0335         printk("vdma_disable: channel %d\n", channel);
0336         printk("VDMA: channel %d status: %04x (%s) mode: "
0337                "%02x addr: %06x count: %06x\n",
0338                channel, status,
0339                ((status & 0x600) ? "ERROR" : "OK"),
0340                (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
0341                            (channel << 5)),
0342                (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ADDR +
0343                            (channel << 5)),
0344                (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_COUNT +
0345                            (channel << 5)));
0346     }
0347 
0348     r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
0349               r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
0350                        (channel << 5)) &
0351               ~R4030_CHNL_ENABLE);
0352 
0353     /*
0354      * After disabling a DMA channel a remote bus register should be
0355      * read to ensure that the current DMA acknowledge cycle is completed.
0356      */
0357     *((volatile unsigned int *) JAZZ_DUMMY_DEVICE);
0358 }
0359 
0360 EXPORT_SYMBOL(vdma_disable);
0361 
0362 /*
0363  * Set DMA mode. This function accepts the mode values used
0364  * to set a PC-style DMA controller. For the SCSI and FDC
0365  * channels, we also set the default modes each time we're
0366  * called.
0367  * NOTE: The FAST and BURST dma modes are supported by the
0368  * R4030 Rev. 2 and PICA chipsets only. I leave them disabled
0369  * for now.
0370  */
0371 void vdma_set_mode(int channel, int mode)
0372 {
0373     if (vdma_debug)
0374         printk("vdma_set_mode: channel %d, mode 0x%x\n", channel,
0375                mode);
0376 
0377     switch (channel) {
0378     case JAZZ_SCSI_DMA: /* scsi */
0379         r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
0380 /*            R4030_MODE_FAST | */
0381 /*            R4030_MODE_BURST | */
0382                   R4030_MODE_INTR_EN |
0383                   R4030_MODE_WIDTH_16 |
0384                   R4030_MODE_ATIME_80);
0385         break;
0386 
0387     case JAZZ_FLOPPY_DMA:   /* floppy */
0388         r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
0389 /*            R4030_MODE_FAST | */
0390 /*            R4030_MODE_BURST | */
0391                   R4030_MODE_INTR_EN |
0392                   R4030_MODE_WIDTH_8 |
0393                   R4030_MODE_ATIME_120);
0394         break;
0395 
0396     case JAZZ_AUDIOL_DMA:
0397     case JAZZ_AUDIOR_DMA:
0398         printk("VDMA: Audio DMA not supported yet.\n");
0399         break;
0400 
0401     default:
0402         printk
0403             ("VDMA: vdma_set_mode() called with unsupported channel %d!\n",
0404              channel);
0405     }
0406 
0407     switch (mode) {
0408     case DMA_MODE_READ:
0409         r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
0410                   r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
0411                            (channel << 5)) &
0412                   ~R4030_CHNL_WRITE);
0413         break;
0414 
0415     case DMA_MODE_WRITE:
0416         r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
0417                   r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
0418                            (channel << 5)) |
0419                   R4030_CHNL_WRITE);
0420         break;
0421 
0422     default:
0423         printk
0424             ("VDMA: vdma_set_mode() called with unknown dma mode 0x%x\n",
0425              mode);
0426     }
0427 }
0428 
0429 EXPORT_SYMBOL(vdma_set_mode);
0430 
0431 /*
0432  * Set Transfer Address
0433  */
0434 void vdma_set_addr(int channel, long addr)
0435 {
0436     if (vdma_debug)
0437         printk("vdma_set_addr: channel %d, addr %lx\n", channel,
0438                addr);
0439 
0440     r4030_write_reg32(JAZZ_R4030_CHNL_ADDR + (channel << 5), addr);
0441 }
0442 
0443 EXPORT_SYMBOL(vdma_set_addr);
0444 
0445 /*
0446  * Set Transfer Count
0447  */
0448 void vdma_set_count(int channel, int count)
0449 {
0450     if (vdma_debug)
0451         printk("vdma_set_count: channel %d, count %08x\n", channel,
0452                (unsigned) count);
0453 
0454     r4030_write_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5), count);
0455 }
0456 
0457 EXPORT_SYMBOL(vdma_set_count);
0458 
0459 /*
0460  * Get Residual
0461  */
0462 int vdma_get_residue(int channel)
0463 {
0464     int residual;
0465 
0466     residual = r4030_read_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5));
0467 
0468     if (vdma_debug)
0469         printk("vdma_get_residual: channel %d: residual=%d\n",
0470                channel, residual);
0471 
0472     return residual;
0473 }
0474 
0475 /*
0476  * Get DMA channel enable register
0477  */
0478 int vdma_get_enable(int channel)
0479 {
0480     int enable;
0481 
0482     enable = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
0483 
0484     if (vdma_debug)
0485         printk("vdma_get_enable: channel %d: enable=%d\n", channel,
0486                enable);
0487 
0488     return enable;
0489 }
0490 
0491 static void *jazz_dma_alloc(struct device *dev, size_t size,
0492         dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
0493 {
0494     struct page *page;
0495     void *ret;
0496 
0497     if (attrs & DMA_ATTR_NO_WARN)
0498         gfp |= __GFP_NOWARN;
0499 
0500     size = PAGE_ALIGN(size);
0501     page = alloc_pages(gfp, get_order(size));
0502     if (!page)
0503         return NULL;
0504     ret = page_address(page);
0505     memset(ret, 0, size);
0506     *dma_handle = vdma_alloc(virt_to_phys(ret), size);
0507     if (*dma_handle == DMA_MAPPING_ERROR)
0508         goto out_free_pages;
0509     arch_dma_prep_coherent(page, size);
0510     return (void *)(UNCAC_BASE + __pa(ret));
0511 
0512 out_free_pages:
0513     __free_pages(page, get_order(size));
0514     return NULL;
0515 }
0516 
0517 static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
0518         dma_addr_t dma_handle, unsigned long attrs)
0519 {
0520     vdma_free(dma_handle);
0521     __free_pages(virt_to_page(vaddr), get_order(size));
0522 }
0523 
0524 static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
0525         unsigned long offset, size_t size, enum dma_data_direction dir,
0526         unsigned long attrs)
0527 {
0528     phys_addr_t phys = page_to_phys(page) + offset;
0529 
0530     if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
0531         arch_sync_dma_for_device(phys, size, dir);
0532     return vdma_alloc(phys, size);
0533 }
0534 
0535 static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
0536         size_t size, enum dma_data_direction dir, unsigned long attrs)
0537 {
0538     if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
0539         arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
0540     vdma_free(dma_addr);
0541 }
0542 
0543 static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
0544         int nents, enum dma_data_direction dir, unsigned long attrs)
0545 {
0546     int i;
0547     struct scatterlist *sg;
0548 
0549     for_each_sg(sglist, sg, nents, i) {
0550         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
0551             arch_sync_dma_for_device(sg_phys(sg), sg->length,
0552                 dir);
0553         sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
0554         if (sg->dma_address == DMA_MAPPING_ERROR)
0555             return -EIO;
0556         sg_dma_len(sg) = sg->length;
0557     }
0558 
0559     return nents;
0560 }
0561 
0562 static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
0563         int nents, enum dma_data_direction dir, unsigned long attrs)
0564 {
0565     int i;
0566     struct scatterlist *sg;
0567 
0568     for_each_sg(sglist, sg, nents, i) {
0569         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
0570             arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
0571         vdma_free(sg->dma_address);
0572     }
0573 }
0574 
0575 static void jazz_dma_sync_single_for_device(struct device *dev,
0576         dma_addr_t addr, size_t size, enum dma_data_direction dir)
0577 {
0578     arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
0579 }
0580 
0581 static void jazz_dma_sync_single_for_cpu(struct device *dev,
0582         dma_addr_t addr, size_t size, enum dma_data_direction dir)
0583 {
0584     arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
0585 }
0586 
0587 static void jazz_dma_sync_sg_for_device(struct device *dev,
0588         struct scatterlist *sgl, int nents, enum dma_data_direction dir)
0589 {
0590     struct scatterlist *sg;
0591     int i;
0592 
0593     for_each_sg(sgl, sg, nents, i)
0594         arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
0595 }
0596 
0597 static void jazz_dma_sync_sg_for_cpu(struct device *dev,
0598         struct scatterlist *sgl, int nents, enum dma_data_direction dir)
0599 {
0600     struct scatterlist *sg;
0601     int i;
0602 
0603     for_each_sg(sgl, sg, nents, i)
0604         arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
0605 }
0606 
0607 const struct dma_map_ops jazz_dma_ops = {
0608     .alloc          = jazz_dma_alloc,
0609     .free           = jazz_dma_free,
0610     .map_page       = jazz_dma_map_page,
0611     .unmap_page     = jazz_dma_unmap_page,
0612     .map_sg         = jazz_dma_map_sg,
0613     .unmap_sg       = jazz_dma_unmap_sg,
0614     .sync_single_for_cpu    = jazz_dma_sync_single_for_cpu,
0615     .sync_single_for_device = jazz_dma_sync_single_for_device,
0616     .sync_sg_for_cpu    = jazz_dma_sync_sg_for_cpu,
0617     .sync_sg_for_device = jazz_dma_sync_sg_for_device,
0618     .mmap           = dma_common_mmap,
0619     .get_sgtable        = dma_common_get_sgtable,
0620     .alloc_pages        = dma_common_alloc_pages,
0621     .free_pages     = dma_common_free_pages,
0622 };
0623 EXPORT_SYMBOL(jazz_dma_ops);