0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/export.h>
0010 #include <linux/slab.h>
0011 #include <linux/delay.h>
0012 #include <linux/device.h>
0013 #include <linux/dma-map-ops.h>
0014 #include <linux/errno.h>
0015 #include <linux/iommu-helper.h>
0016 #include <linux/bitmap.h>
0017 #include <asm/iommu-common.h>
0018
0019 #ifdef CONFIG_PCI
0020 #include <linux/pci.h>
0021 #endif
0022
0023 #include <asm/iommu.h>
0024
0025 #include "iommu_common.h"
0026 #include "kernel.h"
0027
0028 #define STC_CTXMATCH_ADDR(STC, CTX) \
0029 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
0030 #define STC_FLUSHFLAG_INIT(STC) \
0031 (*((STC)->strbuf_flushflag) = 0UL)
0032 #define STC_FLUSHFLAG_SET(STC) \
0033 (*((STC)->strbuf_flushflag) != 0UL)
0034
0035 #define iommu_read(__reg) \
0036 ({ u64 __ret; \
0037 __asm__ __volatile__("ldxa [%1] %2, %0" \
0038 : "=r" (__ret) \
0039 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
0040 : "memory"); \
0041 __ret; \
0042 })
0043 #define iommu_write(__reg, __val) \
0044 __asm__ __volatile__("stxa %0, [%1] %2" \
0045 : \
0046 : "r" (__val), "r" (__reg), \
0047 "i" (ASI_PHYS_BYPASS_EC_E))
0048
0049
0050 static void iommu_flushall(struct iommu_map_table *iommu_map_table)
0051 {
0052 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
0053 if (iommu->iommu_flushinv) {
0054 iommu_write(iommu->iommu_flushinv, ~(u64)0);
0055 } else {
0056 unsigned long tag;
0057 int entry;
0058
0059 tag = iommu->iommu_tags;
0060 for (entry = 0; entry < 16; entry++) {
0061 iommu_write(tag, 0);
0062 tag += 8;
0063 }
0064
0065
0066 (void) iommu_read(iommu->write_complete_reg);
0067 }
0068 }
0069
0070 #define IOPTE_CONSISTENT(CTX) \
0071 (IOPTE_VALID | IOPTE_CACHE | \
0072 (((CTX) << 47) & IOPTE_CONTEXT))
0073
0074 #define IOPTE_STREAMING(CTX) \
0075 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
0076
0077
0078
0079
0080 #define IOPTE_IS_DUMMY(iommu, iopte) \
0081 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
0082
0083 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
0084 {
0085 unsigned long val = iopte_val(*iopte);
0086
0087 val &= ~IOPTE_PAGE;
0088 val |= iommu->dummy_page_pa;
0089
0090 iopte_val(*iopte) = val;
0091 }
0092
0093 int iommu_table_init(struct iommu *iommu, int tsbsize,
0094 u32 dma_offset, u32 dma_addr_mask,
0095 int numa_node)
0096 {
0097 unsigned long i, order, sz, num_tsb_entries;
0098 struct page *page;
0099
0100 num_tsb_entries = tsbsize / sizeof(iopte_t);
0101
0102
0103 spin_lock_init(&iommu->lock);
0104 iommu->ctx_lowest_free = 1;
0105 iommu->tbl.table_map_base = dma_offset;
0106 iommu->dma_addr_mask = dma_addr_mask;
0107
0108
0109 sz = num_tsb_entries / 8;
0110 sz = (sz + 7UL) & ~7UL;
0111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
0112 if (!iommu->tbl.map)
0113 return -ENOMEM;
0114
0115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
0116 (tlb_type != hypervisor ? iommu_flushall : NULL),
0117 false, 1, false);
0118
0119
0120
0121
0122 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
0123 if (!page) {
0124 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
0125 goto out_free_map;
0126 }
0127 iommu->dummy_page = (unsigned long) page_address(page);
0128 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
0129 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
0130
0131
0132 order = get_order(tsbsize);
0133 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
0134 if (!page) {
0135 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
0136 goto out_free_dummy_page;
0137 }
0138 iommu->page_table = (iopte_t *)page_address(page);
0139
0140 for (i = 0; i < num_tsb_entries; i++)
0141 iopte_make_dummy(iommu, &iommu->page_table[i]);
0142
0143 return 0;
0144
0145 out_free_dummy_page:
0146 free_page(iommu->dummy_page);
0147 iommu->dummy_page = 0UL;
0148
0149 out_free_map:
0150 kfree(iommu->tbl.map);
0151 iommu->tbl.map = NULL;
0152
0153 return -ENOMEM;
0154 }
0155
0156 static inline iopte_t *alloc_npages(struct device *dev,
0157 struct iommu *iommu,
0158 unsigned long npages)
0159 {
0160 unsigned long entry;
0161
0162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
0163 (unsigned long)(-1), 0);
0164 if (unlikely(entry == IOMMU_ERROR_CODE))
0165 return NULL;
0166
0167 return iommu->page_table + entry;
0168 }
0169
0170 static int iommu_alloc_ctx(struct iommu *iommu)
0171 {
0172 int lowest = iommu->ctx_lowest_free;
0173 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
0174
0175 if (unlikely(n == IOMMU_NUM_CTXS)) {
0176 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
0177 if (unlikely(n == lowest)) {
0178 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
0179 n = 0;
0180 }
0181 }
0182 if (n)
0183 __set_bit(n, iommu->ctx_bitmap);
0184
0185 return n;
0186 }
0187
0188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
0189 {
0190 if (likely(ctx)) {
0191 __clear_bit(ctx, iommu->ctx_bitmap);
0192 if (ctx < iommu->ctx_lowest_free)
0193 iommu->ctx_lowest_free = ctx;
0194 }
0195 }
0196
0197 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
0198 dma_addr_t *dma_addrp, gfp_t gfp,
0199 unsigned long attrs)
0200 {
0201 unsigned long order, first_page;
0202 struct iommu *iommu;
0203 struct page *page;
0204 int npages, nid;
0205 iopte_t *iopte;
0206 void *ret;
0207
0208 size = IO_PAGE_ALIGN(size);
0209 order = get_order(size);
0210 if (order >= 10)
0211 return NULL;
0212
0213 nid = dev->archdata.numa_node;
0214 page = alloc_pages_node(nid, gfp, order);
0215 if (unlikely(!page))
0216 return NULL;
0217
0218 first_page = (unsigned long) page_address(page);
0219 memset((char *)first_page, 0, PAGE_SIZE << order);
0220
0221 iommu = dev->archdata.iommu;
0222
0223 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
0224
0225 if (unlikely(iopte == NULL)) {
0226 free_pages(first_page, order);
0227 return NULL;
0228 }
0229
0230 *dma_addrp = (iommu->tbl.table_map_base +
0231 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
0232 ret = (void *) first_page;
0233 npages = size >> IO_PAGE_SHIFT;
0234 first_page = __pa(first_page);
0235 while (npages--) {
0236 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
0237 IOPTE_WRITE |
0238 (first_page & IOPTE_PAGE));
0239 iopte++;
0240 first_page += IO_PAGE_SIZE;
0241 }
0242
0243 return ret;
0244 }
0245
0246 static void dma_4u_free_coherent(struct device *dev, size_t size,
0247 void *cpu, dma_addr_t dvma,
0248 unsigned long attrs)
0249 {
0250 struct iommu *iommu;
0251 unsigned long order, npages;
0252
0253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
0254 iommu = dev->archdata.iommu;
0255
0256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
0257
0258 order = get_order(size);
0259 if (order < 10)
0260 free_pages((unsigned long)cpu, order);
0261 }
0262
0263 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
0264 unsigned long offset, size_t sz,
0265 enum dma_data_direction direction,
0266 unsigned long attrs)
0267 {
0268 struct iommu *iommu;
0269 struct strbuf *strbuf;
0270 iopte_t *base;
0271 unsigned long flags, npages, oaddr;
0272 unsigned long i, base_paddr, ctx;
0273 u32 bus_addr, ret;
0274 unsigned long iopte_protection;
0275
0276 iommu = dev->archdata.iommu;
0277 strbuf = dev->archdata.stc;
0278
0279 if (unlikely(direction == DMA_NONE))
0280 goto bad_no_ctx;
0281
0282 oaddr = (unsigned long)(page_address(page) + offset);
0283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
0284 npages >>= IO_PAGE_SHIFT;
0285
0286 base = alloc_npages(dev, iommu, npages);
0287 spin_lock_irqsave(&iommu->lock, flags);
0288 ctx = 0;
0289 if (iommu->iommu_ctxflush)
0290 ctx = iommu_alloc_ctx(iommu);
0291 spin_unlock_irqrestore(&iommu->lock, flags);
0292
0293 if (unlikely(!base))
0294 goto bad;
0295
0296 bus_addr = (iommu->tbl.table_map_base +
0297 ((base - iommu->page_table) << IO_PAGE_SHIFT));
0298 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
0299 base_paddr = __pa(oaddr & IO_PAGE_MASK);
0300 if (strbuf->strbuf_enabled)
0301 iopte_protection = IOPTE_STREAMING(ctx);
0302 else
0303 iopte_protection = IOPTE_CONSISTENT(ctx);
0304 if (direction != DMA_TO_DEVICE)
0305 iopte_protection |= IOPTE_WRITE;
0306
0307 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
0308 iopte_val(*base) = iopte_protection | base_paddr;
0309
0310 return ret;
0311
0312 bad:
0313 iommu_free_ctx(iommu, ctx);
0314 bad_no_ctx:
0315 if (printk_ratelimit())
0316 WARN_ON(1);
0317 return DMA_MAPPING_ERROR;
0318 }
0319
0320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
0321 u32 vaddr, unsigned long ctx, unsigned long npages,
0322 enum dma_data_direction direction)
0323 {
0324 int limit;
0325
0326 if (strbuf->strbuf_ctxflush &&
0327 iommu->iommu_ctxflush) {
0328 unsigned long matchreg, flushreg;
0329 u64 val;
0330
0331 flushreg = strbuf->strbuf_ctxflush;
0332 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
0333
0334 iommu_write(flushreg, ctx);
0335 val = iommu_read(matchreg);
0336 val &= 0xffff;
0337 if (!val)
0338 goto do_flush_sync;
0339
0340 while (val) {
0341 if (val & 0x1)
0342 iommu_write(flushreg, ctx);
0343 val >>= 1;
0344 }
0345 val = iommu_read(matchreg);
0346 if (unlikely(val)) {
0347 printk(KERN_WARNING "strbuf_flush: ctx flush "
0348 "timeout matchreg[%llx] ctx[%lx]\n",
0349 val, ctx);
0350 goto do_page_flush;
0351 }
0352 } else {
0353 unsigned long i;
0354
0355 do_page_flush:
0356 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
0357 iommu_write(strbuf->strbuf_pflush, vaddr);
0358 }
0359
0360 do_flush_sync:
0361
0362
0363
0364
0365 if (direction == DMA_TO_DEVICE)
0366 return;
0367
0368 STC_FLUSHFLAG_INIT(strbuf);
0369 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
0370 (void) iommu_read(iommu->write_complete_reg);
0371
0372 limit = 100000;
0373 while (!STC_FLUSHFLAG_SET(strbuf)) {
0374 limit--;
0375 if (!limit)
0376 break;
0377 udelay(1);
0378 rmb();
0379 }
0380 if (!limit)
0381 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
0382 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
0383 vaddr, ctx, npages);
0384 }
0385
0386 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
0387 size_t sz, enum dma_data_direction direction,
0388 unsigned long attrs)
0389 {
0390 struct iommu *iommu;
0391 struct strbuf *strbuf;
0392 iopte_t *base;
0393 unsigned long flags, npages, ctx, i;
0394
0395 if (unlikely(direction == DMA_NONE)) {
0396 if (printk_ratelimit())
0397 WARN_ON(1);
0398 return;
0399 }
0400
0401 iommu = dev->archdata.iommu;
0402 strbuf = dev->archdata.stc;
0403
0404 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
0405 npages >>= IO_PAGE_SHIFT;
0406 base = iommu->page_table +
0407 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
0408 bus_addr &= IO_PAGE_MASK;
0409
0410 spin_lock_irqsave(&iommu->lock, flags);
0411
0412
0413 ctx = 0;
0414 if (iommu->iommu_ctxflush)
0415 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
0416
0417
0418 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
0419 strbuf_flush(strbuf, iommu, bus_addr, ctx,
0420 npages, direction);
0421
0422
0423 for (i = 0; i < npages; i++)
0424 iopte_make_dummy(iommu, base + i);
0425
0426 iommu_free_ctx(iommu, ctx);
0427 spin_unlock_irqrestore(&iommu->lock, flags);
0428
0429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
0430 }
0431
0432 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
0433 int nelems, enum dma_data_direction direction,
0434 unsigned long attrs)
0435 {
0436 struct scatterlist *s, *outs, *segstart;
0437 unsigned long flags, handle, prot, ctx;
0438 dma_addr_t dma_next = 0, dma_addr;
0439 unsigned int max_seg_size;
0440 unsigned long seg_boundary_size;
0441 int outcount, incount, i;
0442 struct strbuf *strbuf;
0443 struct iommu *iommu;
0444 unsigned long base_shift;
0445
0446 BUG_ON(direction == DMA_NONE);
0447
0448 iommu = dev->archdata.iommu;
0449 strbuf = dev->archdata.stc;
0450 if (nelems == 0 || !iommu)
0451 return -EINVAL;
0452
0453 spin_lock_irqsave(&iommu->lock, flags);
0454
0455 ctx = 0;
0456 if (iommu->iommu_ctxflush)
0457 ctx = iommu_alloc_ctx(iommu);
0458
0459 if (strbuf->strbuf_enabled)
0460 prot = IOPTE_STREAMING(ctx);
0461 else
0462 prot = IOPTE_CONSISTENT(ctx);
0463 if (direction != DMA_TO_DEVICE)
0464 prot |= IOPTE_WRITE;
0465
0466 outs = s = segstart = &sglist[0];
0467 outcount = 1;
0468 incount = nelems;
0469 handle = 0;
0470
0471
0472 outs->dma_length = 0;
0473
0474 max_seg_size = dma_get_max_seg_size(dev);
0475 seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
0476 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
0477 for_each_sg(sglist, s, nelems, i) {
0478 unsigned long paddr, npages, entry, out_entry = 0, slen;
0479 iopte_t *base;
0480
0481 slen = s->length;
0482
0483 if (slen == 0) {
0484 dma_next = 0;
0485 continue;
0486 }
0487
0488 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
0489 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
0490 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
0491 &handle, (unsigned long)(-1), 0);
0492
0493
0494 if (unlikely(entry == IOMMU_ERROR_CODE)) {
0495 if (printk_ratelimit())
0496 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
0497 " npages %lx\n", iommu, paddr, npages);
0498 goto iommu_map_failed;
0499 }
0500
0501 base = iommu->page_table + entry;
0502
0503
0504 dma_addr = iommu->tbl.table_map_base +
0505 (entry << IO_PAGE_SHIFT);
0506 dma_addr |= (s->offset & ~IO_PAGE_MASK);
0507
0508
0509 paddr &= IO_PAGE_MASK;
0510 while (npages--) {
0511 iopte_val(*base) = prot | paddr;
0512 base++;
0513 paddr += IO_PAGE_SIZE;
0514 }
0515
0516
0517 if (segstart != s) {
0518
0519
0520
0521 if ((dma_addr != dma_next) ||
0522 (outs->dma_length + s->length > max_seg_size) ||
0523 (is_span_boundary(out_entry, base_shift,
0524 seg_boundary_size, outs, s))) {
0525
0526 segstart = s;
0527 outcount++;
0528 outs = sg_next(outs);
0529 } else {
0530 outs->dma_length += s->length;
0531 }
0532 }
0533
0534 if (segstart == s) {
0535
0536 outs->dma_address = dma_addr;
0537 outs->dma_length = slen;
0538 out_entry = entry;
0539 }
0540
0541
0542 dma_next = dma_addr + slen;
0543 }
0544
0545 spin_unlock_irqrestore(&iommu->lock, flags);
0546
0547 if (outcount < incount) {
0548 outs = sg_next(outs);
0549 outs->dma_length = 0;
0550 }
0551
0552 return outcount;
0553
0554 iommu_map_failed:
0555 for_each_sg(sglist, s, nelems, i) {
0556 if (s->dma_length != 0) {
0557 unsigned long vaddr, npages, entry, j;
0558 iopte_t *base;
0559
0560 vaddr = s->dma_address & IO_PAGE_MASK;
0561 npages = iommu_num_pages(s->dma_address, s->dma_length,
0562 IO_PAGE_SIZE);
0563
0564 entry = (vaddr - iommu->tbl.table_map_base)
0565 >> IO_PAGE_SHIFT;
0566 base = iommu->page_table + entry;
0567
0568 for (j = 0; j < npages; j++)
0569 iopte_make_dummy(iommu, base + j);
0570
0571 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
0572 IOMMU_ERROR_CODE);
0573
0574 s->dma_length = 0;
0575 }
0576 if (s == outs)
0577 break;
0578 }
0579 spin_unlock_irqrestore(&iommu->lock, flags);
0580
0581 return -EINVAL;
0582 }
0583
0584
0585
0586
0587 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
0588 {
0589 unsigned long ctx = 0;
0590
0591 if (iommu->iommu_ctxflush) {
0592 iopte_t *base;
0593 u32 bus_addr;
0594 struct iommu_map_table *tbl = &iommu->tbl;
0595
0596 bus_addr = sg->dma_address & IO_PAGE_MASK;
0597 base = iommu->page_table +
0598 ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
0599
0600 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
0601 }
0602 return ctx;
0603 }
0604
0605 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
0606 int nelems, enum dma_data_direction direction,
0607 unsigned long attrs)
0608 {
0609 unsigned long flags, ctx;
0610 struct scatterlist *sg;
0611 struct strbuf *strbuf;
0612 struct iommu *iommu;
0613
0614 BUG_ON(direction == DMA_NONE);
0615
0616 iommu = dev->archdata.iommu;
0617 strbuf = dev->archdata.stc;
0618
0619 ctx = fetch_sg_ctx(iommu, sglist);
0620
0621 spin_lock_irqsave(&iommu->lock, flags);
0622
0623 sg = sglist;
0624 while (nelems--) {
0625 dma_addr_t dma_handle = sg->dma_address;
0626 unsigned int len = sg->dma_length;
0627 unsigned long npages, entry;
0628 iopte_t *base;
0629 int i;
0630
0631 if (!len)
0632 break;
0633 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
0634
0635 entry = ((dma_handle - iommu->tbl.table_map_base)
0636 >> IO_PAGE_SHIFT);
0637 base = iommu->page_table + entry;
0638
0639 dma_handle &= IO_PAGE_MASK;
0640 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
0641 strbuf_flush(strbuf, iommu, dma_handle, ctx,
0642 npages, direction);
0643
0644 for (i = 0; i < npages; i++)
0645 iopte_make_dummy(iommu, base + i);
0646
0647 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
0648 IOMMU_ERROR_CODE);
0649 sg = sg_next(sg);
0650 }
0651
0652 iommu_free_ctx(iommu, ctx);
0653
0654 spin_unlock_irqrestore(&iommu->lock, flags);
0655 }
0656
0657 static void dma_4u_sync_single_for_cpu(struct device *dev,
0658 dma_addr_t bus_addr, size_t sz,
0659 enum dma_data_direction direction)
0660 {
0661 struct iommu *iommu;
0662 struct strbuf *strbuf;
0663 unsigned long flags, ctx, npages;
0664
0665 iommu = dev->archdata.iommu;
0666 strbuf = dev->archdata.stc;
0667
0668 if (!strbuf->strbuf_enabled)
0669 return;
0670
0671 spin_lock_irqsave(&iommu->lock, flags);
0672
0673 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
0674 npages >>= IO_PAGE_SHIFT;
0675 bus_addr &= IO_PAGE_MASK;
0676
0677
0678 ctx = 0;
0679 if (iommu->iommu_ctxflush &&
0680 strbuf->strbuf_ctxflush) {
0681 iopte_t *iopte;
0682 struct iommu_map_table *tbl = &iommu->tbl;
0683
0684 iopte = iommu->page_table +
0685 ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
0686 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
0687 }
0688
0689
0690 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
0691
0692 spin_unlock_irqrestore(&iommu->lock, flags);
0693 }
0694
0695 static void dma_4u_sync_sg_for_cpu(struct device *dev,
0696 struct scatterlist *sglist, int nelems,
0697 enum dma_data_direction direction)
0698 {
0699 struct iommu *iommu;
0700 struct strbuf *strbuf;
0701 unsigned long flags, ctx, npages, i;
0702 struct scatterlist *sg, *sgprv;
0703 u32 bus_addr;
0704
0705 iommu = dev->archdata.iommu;
0706 strbuf = dev->archdata.stc;
0707
0708 if (!strbuf->strbuf_enabled)
0709 return;
0710
0711 spin_lock_irqsave(&iommu->lock, flags);
0712
0713
0714 ctx = 0;
0715 if (iommu->iommu_ctxflush &&
0716 strbuf->strbuf_ctxflush) {
0717 iopte_t *iopte;
0718 struct iommu_map_table *tbl = &iommu->tbl;
0719
0720 iopte = iommu->page_table + ((sglist[0].dma_address -
0721 tbl->table_map_base) >> IO_PAGE_SHIFT);
0722 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
0723 }
0724
0725
0726 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
0727 sgprv = NULL;
0728 for_each_sg(sglist, sg, nelems, i) {
0729 if (sg->dma_length == 0)
0730 break;
0731 sgprv = sg;
0732 }
0733
0734 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
0735 - bus_addr) >> IO_PAGE_SHIFT;
0736 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
0737
0738 spin_unlock_irqrestore(&iommu->lock, flags);
0739 }
0740
0741 static int dma_4u_supported(struct device *dev, u64 device_mask)
0742 {
0743 struct iommu *iommu = dev->archdata.iommu;
0744
0745 if (ali_sound_dma_hack(dev, device_mask))
0746 return 1;
0747
0748 if (device_mask < iommu->dma_addr_mask)
0749 return 0;
0750 return 1;
0751 }
0752
0753 static const struct dma_map_ops sun4u_dma_ops = {
0754 .alloc = dma_4u_alloc_coherent,
0755 .free = dma_4u_free_coherent,
0756 .map_page = dma_4u_map_page,
0757 .unmap_page = dma_4u_unmap_page,
0758 .map_sg = dma_4u_map_sg,
0759 .unmap_sg = dma_4u_unmap_sg,
0760 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
0761 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
0762 .dma_supported = dma_4u_supported,
0763 };
0764
0765 const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
0766 EXPORT_SYMBOL(dma_ops);