0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/init.h>
0013 #include <linux/mm.h>
0014 #include <linux/slab.h>
0015 #include <linux/dma-map-ops.h>
0016 #include <linux/of.h>
0017 #include <linux/of_device.h>
0018
0019 #include <asm/io.h>
0020 #include <asm/mxcc.h>
0021 #include <asm/mbus.h>
0022 #include <asm/cacheflush.h>
0023 #include <asm/tlbflush.h>
0024 #include <asm/bitext.h>
0025 #include <asm/iommu.h>
0026 #include <asm/dma.h>
0027
0028 #include "mm_32.h"
0029
0030
0031
0032
0033
0034 #define IOMMU_RNGE IOMMU_RNGE_256MB
0035 #define IOMMU_START 0xF0000000
0036 #define IOMMU_WINSIZE (256*1024*1024U)
0037 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE)
0038 #define IOMMU_ORDER 6
0039
0040 static int viking_flush;
0041
0042 extern void viking_flush_page(unsigned long page);
0043 extern void viking_mxcc_flush_page(unsigned long page);
0044
0045
0046
0047
0048 static unsigned int ioperm_noc;
0049 static pgprot_t dvma_prot;
0050
0051 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
0052 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
0053
0054 static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
0055 static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
0056
0057 static void __init sbus_iommu_init(struct platform_device *op)
0058 {
0059 struct iommu_struct *iommu;
0060 unsigned int impl, vers;
0061 unsigned long *bitmap;
0062 unsigned long control;
0063 unsigned long base;
0064 unsigned long tmp;
0065
0066 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
0067 if (!iommu) {
0068 prom_printf("Unable to allocate iommu structure\n");
0069 prom_halt();
0070 }
0071
0072 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
0073 "iommu_regs");
0074 if (!iommu->regs) {
0075 prom_printf("Cannot map IOMMU registers\n");
0076 prom_halt();
0077 }
0078
0079 control = sbus_readl(&iommu->regs->control);
0080 impl = (control & IOMMU_CTRL_IMPL) >> 28;
0081 vers = (control & IOMMU_CTRL_VERS) >> 24;
0082 control &= ~(IOMMU_CTRL_RNGE);
0083 control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
0084 sbus_writel(control, &iommu->regs->control);
0085
0086 iommu_invalidate(iommu->regs);
0087 iommu->start = IOMMU_START;
0088 iommu->end = 0xffffffff;
0089
0090
0091
0092
0093
0094
0095 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
0096 if (!tmp) {
0097 prom_printf("Unable to allocate iommu table [0x%lx]\n",
0098 IOMMU_NPTES * sizeof(iopte_t));
0099 prom_halt();
0100 }
0101 iommu->page_table = (iopte_t *)tmp;
0102
0103
0104 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
0105 flush_cache_all();
0106 flush_tlb_all();
0107
0108 base = __pa((unsigned long)iommu->page_table) >> 4;
0109 sbus_writel(base, &iommu->regs->base);
0110 iommu_invalidate(iommu->regs);
0111
0112 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
0113 if (!bitmap) {
0114 prom_printf("Unable to allocate iommu bitmap [%d]\n",
0115 (int)(IOMMU_NPTES>>3));
0116 prom_halt();
0117 }
0118 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
0119
0120
0121
0122 if (srmmu_modtype == HyperSparc)
0123 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
0124 else
0125 iommu->usemap.num_colors = 1;
0126
0127 printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
0128 impl, vers, iommu->page_table,
0129 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
0130
0131 op->dev.archdata.iommu = iommu;
0132
0133 if (flush_page_for_dma_global)
0134 op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
0135 else
0136 op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
0137 }
0138
0139 static int __init iommu_init(void)
0140 {
0141 struct device_node *dp;
0142
0143 for_each_node_by_name(dp, "iommu") {
0144 struct platform_device *op = of_find_device_by_node(dp);
0145
0146 sbus_iommu_init(op);
0147 of_propagate_archdata(op);
0148 }
0149
0150 return 0;
0151 }
0152
0153 subsys_initcall(iommu_init);
0154
0155
0156
0157 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
0158 {
0159 unsigned long start;
0160 unsigned long end;
0161
0162 start = (unsigned long)iopte;
0163 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
0164 start &= PAGE_MASK;
0165 if (viking_mxcc_present) {
0166 while(start < end) {
0167 viking_mxcc_flush_page(start);
0168 start += PAGE_SIZE;
0169 }
0170 } else if (viking_flush) {
0171 while(start < end) {
0172 viking_flush_page(start);
0173 start += PAGE_SIZE;
0174 }
0175 } else {
0176 while(start < end) {
0177 __flush_page_to_ram(start);
0178 start += PAGE_SIZE;
0179 }
0180 }
0181 }
0182
0183 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
0184 unsigned long offset, size_t len, bool per_page_flush)
0185 {
0186 struct iommu_struct *iommu = dev->archdata.iommu;
0187 phys_addr_t paddr = page_to_phys(page) + offset;
0188 unsigned long off = paddr & ~PAGE_MASK;
0189 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
0190 unsigned long pfn = __phys_to_pfn(paddr);
0191 unsigned int busa, busa0;
0192 iopte_t *iopte, *iopte0;
0193 int ioptex, i;
0194
0195
0196 if (!len || len > 256 * 1024)
0197 return DMA_MAPPING_ERROR;
0198
0199
0200
0201
0202
0203
0204 if (per_page_flush && !PageHighMem(page)) {
0205 unsigned long vaddr, p;
0206
0207 vaddr = (unsigned long)page_address(page) + offset;
0208 for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
0209 flush_page_for_dma(p);
0210 }
0211
0212
0213 ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
0214 if (ioptex < 0)
0215 panic("iommu out");
0216 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
0217 iopte0 = &iommu->page_table[ioptex];
0218
0219 busa = busa0;
0220 iopte = iopte0;
0221 for (i = 0; i < npages; i++) {
0222 iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
0223 iommu_invalidate_page(iommu->regs, busa);
0224 busa += PAGE_SIZE;
0225 iopte++;
0226 pfn++;
0227 }
0228
0229 iommu_flush_iotlb(iopte0, npages);
0230 return busa0 + off;
0231 }
0232
0233 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
0234 struct page *page, unsigned long offset, size_t len,
0235 enum dma_data_direction dir, unsigned long attrs)
0236 {
0237 flush_page_for_dma(0);
0238 return __sbus_iommu_map_page(dev, page, offset, len, false);
0239 }
0240
0241 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
0242 struct page *page, unsigned long offset, size_t len,
0243 enum dma_data_direction dir, unsigned long attrs)
0244 {
0245 return __sbus_iommu_map_page(dev, page, offset, len, true);
0246 }
0247
0248 static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
0249 int nents, enum dma_data_direction dir, unsigned long attrs,
0250 bool per_page_flush)
0251 {
0252 struct scatterlist *sg;
0253 int j;
0254
0255 for_each_sg(sgl, sg, nents, j) {
0256 sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
0257 sg->offset, sg->length, per_page_flush);
0258 if (sg->dma_address == DMA_MAPPING_ERROR)
0259 return -EIO;
0260 sg->dma_length = sg->length;
0261 }
0262
0263 return nents;
0264 }
0265
0266 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
0267 int nents, enum dma_data_direction dir, unsigned long attrs)
0268 {
0269 flush_page_for_dma(0);
0270 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
0271 }
0272
0273 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
0274 int nents, enum dma_data_direction dir, unsigned long attrs)
0275 {
0276 return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
0277 }
0278
0279 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
0280 size_t len, enum dma_data_direction dir, unsigned long attrs)
0281 {
0282 struct iommu_struct *iommu = dev->archdata.iommu;
0283 unsigned int busa = dma_addr & PAGE_MASK;
0284 unsigned long off = dma_addr & ~PAGE_MASK;
0285 unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
0286 unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
0287 unsigned int i;
0288
0289 BUG_ON(busa < iommu->start);
0290 for (i = 0; i < npages; i++) {
0291 iopte_val(iommu->page_table[ioptex + i]) = 0;
0292 iommu_invalidate_page(iommu->regs, busa);
0293 busa += PAGE_SIZE;
0294 }
0295 bit_map_clear(&iommu->usemap, ioptex, npages);
0296 }
0297
0298 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
0299 int nents, enum dma_data_direction dir, unsigned long attrs)
0300 {
0301 struct scatterlist *sg;
0302 int i;
0303
0304 for_each_sg(sgl, sg, nents, i) {
0305 sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
0306 attrs);
0307 sg->dma_address = 0x21212121;
0308 }
0309 }
0310
0311 #ifdef CONFIG_SBUS
0312 static void *sbus_iommu_alloc(struct device *dev, size_t len,
0313 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
0314 {
0315 struct iommu_struct *iommu = dev->archdata.iommu;
0316 unsigned long va, addr, page, end, ret;
0317 iopte_t *iopte = iommu->page_table;
0318 iopte_t *first;
0319 int ioptex;
0320
0321
0322 if (!len || len > 256 * 1024)
0323 return NULL;
0324
0325 len = PAGE_ALIGN(len);
0326 va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
0327 if (va == 0)
0328 return NULL;
0329
0330 addr = ret = sparc_dma_alloc_resource(dev, len);
0331 if (!addr)
0332 goto out_free_pages;
0333
0334 BUG_ON((va & ~PAGE_MASK) != 0);
0335 BUG_ON((addr & ~PAGE_MASK) != 0);
0336 BUG_ON((len & ~PAGE_MASK) != 0);
0337
0338
0339 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
0340 addr >> PAGE_SHIFT);
0341 if (ioptex < 0)
0342 panic("iommu out");
0343
0344 iopte += ioptex;
0345 first = iopte;
0346 end = addr + len;
0347 while(addr < end) {
0348 page = va;
0349 {
0350 pmd_t *pmdp;
0351 pte_t *ptep;
0352
0353 if (viking_mxcc_present)
0354 viking_mxcc_flush_page(page);
0355 else if (viking_flush)
0356 viking_flush_page(page);
0357 else
0358 __flush_page_to_ram(page);
0359
0360 pmdp = pmd_off_k(addr);
0361 ptep = pte_offset_map(pmdp, addr);
0362
0363 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
0364 }
0365 iopte_val(*iopte++) =
0366 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
0367 addr += PAGE_SIZE;
0368 va += PAGE_SIZE;
0369 }
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381 flush_cache_all();
0382 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
0383 flush_tlb_all();
0384 iommu_invalidate(iommu->regs);
0385
0386 *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
0387 return (void *)ret;
0388
0389 out_free_pages:
0390 free_pages(va, get_order(len));
0391 return NULL;
0392 }
0393
0394 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
0395 dma_addr_t busa, unsigned long attrs)
0396 {
0397 struct iommu_struct *iommu = dev->archdata.iommu;
0398 iopte_t *iopte = iommu->page_table;
0399 struct page *page = virt_to_page(cpu_addr);
0400 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
0401 unsigned long end;
0402
0403 if (!sparc_dma_free_resource(cpu_addr, len))
0404 return;
0405
0406 BUG_ON((busa & ~PAGE_MASK) != 0);
0407 BUG_ON((len & ~PAGE_MASK) != 0);
0408
0409 iopte += ioptex;
0410 end = busa + len;
0411 while (busa < end) {
0412 iopte_val(*iopte++) = 0;
0413 busa += PAGE_SIZE;
0414 }
0415 flush_tlb_all();
0416 iommu_invalidate(iommu->regs);
0417 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
0418
0419 __free_pages(page, get_order(len));
0420 }
0421 #endif
0422
0423 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
0424 #ifdef CONFIG_SBUS
0425 .alloc = sbus_iommu_alloc,
0426 .free = sbus_iommu_free,
0427 #endif
0428 .map_page = sbus_iommu_map_page_gflush,
0429 .unmap_page = sbus_iommu_unmap_page,
0430 .map_sg = sbus_iommu_map_sg_gflush,
0431 .unmap_sg = sbus_iommu_unmap_sg,
0432 };
0433
0434 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
0435 #ifdef CONFIG_SBUS
0436 .alloc = sbus_iommu_alloc,
0437 .free = sbus_iommu_free,
0438 #endif
0439 .map_page = sbus_iommu_map_page_pflush,
0440 .unmap_page = sbus_iommu_unmap_page,
0441 .map_sg = sbus_iommu_map_sg_pflush,
0442 .unmap_sg = sbus_iommu_unmap_sg,
0443 };
0444
0445 void __init ld_mmu_iommu(void)
0446 {
0447 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
0448 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
0449 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
0450 } else {
0451 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
0452 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
0453 }
0454 }