0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/slab.h>
0011 #include <linux/export.h>
0012 #include <linux/iommu-helper.h>
0013 #include <linux/dma-map-ops.h>
0014 #include <linux/vmalloc.h>
0015 #include <linux/pci.h>
0016 #include <asm/pci_dma.h>
0017
0018 static struct kmem_cache *dma_region_table_cache;
0019 static struct kmem_cache *dma_page_table_cache;
0020 static int s390_iommu_strict;
0021 static u64 s390_iommu_aperture;
0022 static u32 s390_iommu_aperture_factor = 1;
0023
0024 static int zpci_refresh_global(struct zpci_dev *zdev)
0025 {
0026 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
0027 zdev->iommu_pages * PAGE_SIZE);
0028 }
0029
0030 unsigned long *dma_alloc_cpu_table(void)
0031 {
0032 unsigned long *table, *entry;
0033
0034 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
0035 if (!table)
0036 return NULL;
0037
0038 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
0039 *entry = ZPCI_TABLE_INVALID;
0040 return table;
0041 }
0042
0043 static void dma_free_cpu_table(void *table)
0044 {
0045 kmem_cache_free(dma_region_table_cache, table);
0046 }
0047
0048 static unsigned long *dma_alloc_page_table(void)
0049 {
0050 unsigned long *table, *entry;
0051
0052 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
0053 if (!table)
0054 return NULL;
0055
0056 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
0057 *entry = ZPCI_PTE_INVALID;
0058 return table;
0059 }
0060
0061 static void dma_free_page_table(void *table)
0062 {
0063 kmem_cache_free(dma_page_table_cache, table);
0064 }
0065
0066 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
0067 {
0068 unsigned long *sto;
0069
0070 if (reg_entry_isvalid(*entry))
0071 sto = get_rt_sto(*entry);
0072 else {
0073 sto = dma_alloc_cpu_table();
0074 if (!sto)
0075 return NULL;
0076
0077 set_rt_sto(entry, virt_to_phys(sto));
0078 validate_rt_entry(entry);
0079 entry_clr_protected(entry);
0080 }
0081 return sto;
0082 }
0083
0084 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
0085 {
0086 unsigned long *pto;
0087
0088 if (reg_entry_isvalid(*entry))
0089 pto = get_st_pto(*entry);
0090 else {
0091 pto = dma_alloc_page_table();
0092 if (!pto)
0093 return NULL;
0094 set_st_pto(entry, virt_to_phys(pto));
0095 validate_st_entry(entry);
0096 entry_clr_protected(entry);
0097 }
0098 return pto;
0099 }
0100
0101 unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
0102 {
0103 unsigned long *sto, *pto;
0104 unsigned int rtx, sx, px;
0105
0106 rtx = calc_rtx(dma_addr);
0107 sto = dma_get_seg_table_origin(&rto[rtx]);
0108 if (!sto)
0109 return NULL;
0110
0111 sx = calc_sx(dma_addr);
0112 pto = dma_get_page_table_origin(&sto[sx]);
0113 if (!pto)
0114 return NULL;
0115
0116 px = calc_px(dma_addr);
0117 return &pto[px];
0118 }
0119
0120 void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags)
0121 {
0122 if (flags & ZPCI_PTE_INVALID) {
0123 invalidate_pt_entry(entry);
0124 } else {
0125 set_pt_pfaa(entry, page_addr);
0126 validate_pt_entry(entry);
0127 }
0128
0129 if (flags & ZPCI_TABLE_PROTECTED)
0130 entry_set_protected(entry);
0131 else
0132 entry_clr_protected(entry);
0133 }
0134
0135 static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
0136 dma_addr_t dma_addr, size_t size, int flags)
0137 {
0138 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
0139 phys_addr_t page_addr = (pa & PAGE_MASK);
0140 unsigned long irq_flags;
0141 unsigned long *entry;
0142 int i, rc = 0;
0143
0144 if (!nr_pages)
0145 return -EINVAL;
0146
0147 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
0148 if (!zdev->dma_table) {
0149 rc = -EINVAL;
0150 goto out_unlock;
0151 }
0152
0153 for (i = 0; i < nr_pages; i++) {
0154 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
0155 if (!entry) {
0156 rc = -ENOMEM;
0157 goto undo_cpu_trans;
0158 }
0159 dma_update_cpu_trans(entry, page_addr, flags);
0160 page_addr += PAGE_SIZE;
0161 dma_addr += PAGE_SIZE;
0162 }
0163
0164 undo_cpu_trans:
0165 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
0166 flags = ZPCI_PTE_INVALID;
0167 while (i-- > 0) {
0168 page_addr -= PAGE_SIZE;
0169 dma_addr -= PAGE_SIZE;
0170 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
0171 if (!entry)
0172 break;
0173 dma_update_cpu_trans(entry, page_addr, flags);
0174 }
0175 }
0176 out_unlock:
0177 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
0178 return rc;
0179 }
0180
0181 static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
0182 size_t size, int flags)
0183 {
0184 unsigned long irqflags;
0185 int ret;
0186
0187
0188
0189
0190
0191
0192
0193
0194 if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
0195 if (!zdev->tlb_refresh)
0196 return 0;
0197 } else {
0198 if (!s390_iommu_strict)
0199 return 0;
0200 }
0201
0202 ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
0203 PAGE_ALIGN(size));
0204 if (ret == -ENOMEM && !s390_iommu_strict) {
0205
0206 if (zpci_refresh_global(zdev))
0207 goto out;
0208
0209 spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
0210 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
0211 zdev->lazy_bitmap, zdev->iommu_pages);
0212 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
0213 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
0214 ret = 0;
0215 }
0216 out:
0217 return ret;
0218 }
0219
0220 static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
0221 dma_addr_t dma_addr, size_t size, int flags)
0222 {
0223 int rc;
0224
0225 rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
0226 if (rc)
0227 return rc;
0228
0229 rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
0230 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
0231 __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
0232
0233 return rc;
0234 }
0235
0236 void dma_free_seg_table(unsigned long entry)
0237 {
0238 unsigned long *sto = get_rt_sto(entry);
0239 int sx;
0240
0241 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
0242 if (reg_entry_isvalid(sto[sx]))
0243 dma_free_page_table(get_st_pto(sto[sx]));
0244
0245 dma_free_cpu_table(sto);
0246 }
0247
0248 void dma_cleanup_tables(unsigned long *table)
0249 {
0250 int rtx;
0251
0252 if (!table)
0253 return;
0254
0255 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
0256 if (reg_entry_isvalid(table[rtx]))
0257 dma_free_seg_table(table[rtx]);
0258
0259 dma_free_cpu_table(table);
0260 }
0261
0262 static unsigned long __dma_alloc_iommu(struct device *dev,
0263 unsigned long start, int size)
0264 {
0265 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0266
0267 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
0268 start, size, zdev->start_dma >> PAGE_SHIFT,
0269 dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
0270 0);
0271 }
0272
0273 static dma_addr_t dma_alloc_address(struct device *dev, int size)
0274 {
0275 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0276 unsigned long offset, flags;
0277
0278 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
0279 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
0280 if (offset == -1) {
0281 if (!s390_iommu_strict) {
0282
0283 if (zpci_refresh_global(zdev))
0284 goto out_error;
0285
0286 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
0287 zdev->lazy_bitmap, zdev->iommu_pages);
0288 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
0289 }
0290
0291 offset = __dma_alloc_iommu(dev, 0, size);
0292 if (offset == -1)
0293 goto out_error;
0294 }
0295 zdev->next_bit = offset + size;
0296 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
0297
0298 return zdev->start_dma + offset * PAGE_SIZE;
0299
0300 out_error:
0301 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
0302 return DMA_MAPPING_ERROR;
0303 }
0304
0305 static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
0306 {
0307 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0308 unsigned long flags, offset;
0309
0310 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
0311
0312 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
0313 if (!zdev->iommu_bitmap)
0314 goto out;
0315
0316 if (s390_iommu_strict)
0317 bitmap_clear(zdev->iommu_bitmap, offset, size);
0318 else
0319 bitmap_set(zdev->lazy_bitmap, offset, size);
0320
0321 out:
0322 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
0323 }
0324
0325 static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
0326 {
0327 struct {
0328 unsigned long rc;
0329 unsigned long addr;
0330 } __packed data = {rc, addr};
0331
0332 zpci_err_hex(&data, sizeof(data));
0333 }
0334
0335 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
0336 unsigned long offset, size_t size,
0337 enum dma_data_direction direction,
0338 unsigned long attrs)
0339 {
0340 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0341 unsigned long pa = page_to_phys(page) + offset;
0342 int flags = ZPCI_PTE_VALID;
0343 unsigned long nr_pages;
0344 dma_addr_t dma_addr;
0345 int ret;
0346
0347
0348 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
0349 dma_addr = dma_alloc_address(dev, nr_pages);
0350 if (dma_addr == DMA_MAPPING_ERROR) {
0351 ret = -ENOSPC;
0352 goto out_err;
0353 }
0354
0355
0356 size = nr_pages * PAGE_SIZE;
0357
0358 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
0359 flags |= ZPCI_TABLE_PROTECTED;
0360
0361 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
0362 if (ret)
0363 goto out_free;
0364
0365 atomic64_add(nr_pages, &zdev->mapped_pages);
0366 return dma_addr + (offset & ~PAGE_MASK);
0367
0368 out_free:
0369 dma_free_address(dev, dma_addr, nr_pages);
0370 out_err:
0371 zpci_err("map error:\n");
0372 zpci_err_dma(ret, pa);
0373 return DMA_MAPPING_ERROR;
0374 }
0375
0376 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
0377 size_t size, enum dma_data_direction direction,
0378 unsigned long attrs)
0379 {
0380 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0381 int npages, ret;
0382
0383 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
0384 dma_addr = dma_addr & PAGE_MASK;
0385 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
0386 ZPCI_PTE_INVALID);
0387 if (ret) {
0388 zpci_err("unmap error:\n");
0389 zpci_err_dma(ret, dma_addr);
0390 return;
0391 }
0392
0393 atomic64_add(npages, &zdev->unmapped_pages);
0394 dma_free_address(dev, dma_addr, npages);
0395 }
0396
0397 static void *s390_dma_alloc(struct device *dev, size_t size,
0398 dma_addr_t *dma_handle, gfp_t flag,
0399 unsigned long attrs)
0400 {
0401 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0402 struct page *page;
0403 phys_addr_t pa;
0404 dma_addr_t map;
0405
0406 size = PAGE_ALIGN(size);
0407 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
0408 if (!page)
0409 return NULL;
0410
0411 pa = page_to_phys(page);
0412 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
0413 if (dma_mapping_error(dev, map)) {
0414 __free_pages(page, get_order(size));
0415 return NULL;
0416 }
0417
0418 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
0419 if (dma_handle)
0420 *dma_handle = map;
0421 return phys_to_virt(pa);
0422 }
0423
0424 static void s390_dma_free(struct device *dev, size_t size,
0425 void *vaddr, dma_addr_t dma_handle,
0426 unsigned long attrs)
0427 {
0428 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0429
0430 size = PAGE_ALIGN(size);
0431 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
0432 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
0433 free_pages((unsigned long)vaddr, get_order(size));
0434 }
0435
0436
0437 static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
0438 size_t size, dma_addr_t *handle,
0439 enum dma_data_direction dir)
0440 {
0441 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
0442 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
0443 dma_addr_t dma_addr_base, dma_addr;
0444 int flags = ZPCI_PTE_VALID;
0445 struct scatterlist *s;
0446 phys_addr_t pa = 0;
0447 int ret;
0448
0449 dma_addr_base = dma_alloc_address(dev, nr_pages);
0450 if (dma_addr_base == DMA_MAPPING_ERROR)
0451 return -ENOMEM;
0452
0453 dma_addr = dma_addr_base;
0454 if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
0455 flags |= ZPCI_TABLE_PROTECTED;
0456
0457 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
0458 pa = page_to_phys(sg_page(s));
0459 ret = __dma_update_trans(zdev, pa, dma_addr,
0460 s->offset + s->length, flags);
0461 if (ret)
0462 goto unmap;
0463
0464 dma_addr += s->offset + s->length;
0465 }
0466 ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
0467 if (ret)
0468 goto unmap;
0469
0470 *handle = dma_addr_base;
0471 atomic64_add(nr_pages, &zdev->mapped_pages);
0472
0473 return ret;
0474
0475 unmap:
0476 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
0477 ZPCI_PTE_INVALID);
0478 dma_free_address(dev, dma_addr_base, nr_pages);
0479 zpci_err("map error:\n");
0480 zpci_err_dma(ret, pa);
0481 return ret;
0482 }
0483
0484 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
0485 int nr_elements, enum dma_data_direction dir,
0486 unsigned long attrs)
0487 {
0488 struct scatterlist *s = sg, *start = sg, *dma = sg;
0489 unsigned int max = dma_get_max_seg_size(dev);
0490 unsigned int size = s->offset + s->length;
0491 unsigned int offset = s->offset;
0492 int count = 0, i, ret;
0493
0494 for (i = 1; i < nr_elements; i++) {
0495 s = sg_next(s);
0496
0497 s->dma_length = 0;
0498
0499 if (s->offset || (size & ~PAGE_MASK) ||
0500 size + s->length > max) {
0501 ret = __s390_dma_map_sg(dev, start, size,
0502 &dma->dma_address, dir);
0503 if (ret)
0504 goto unmap;
0505
0506 dma->dma_address += offset;
0507 dma->dma_length = size - offset;
0508
0509 size = offset = s->offset;
0510 start = s;
0511 dma = sg_next(dma);
0512 count++;
0513 }
0514 size += s->length;
0515 }
0516 ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
0517 if (ret)
0518 goto unmap;
0519
0520 dma->dma_address += offset;
0521 dma->dma_length = size - offset;
0522
0523 return count + 1;
0524 unmap:
0525 for_each_sg(sg, s, count, i)
0526 s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
0527 dir, attrs);
0528
0529 return ret;
0530 }
0531
0532 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
0533 int nr_elements, enum dma_data_direction dir,
0534 unsigned long attrs)
0535 {
0536 struct scatterlist *s;
0537 int i;
0538
0539 for_each_sg(sg, s, nr_elements, i) {
0540 if (s->dma_length)
0541 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
0542 dir, attrs);
0543 s->dma_address = 0;
0544 s->dma_length = 0;
0545 }
0546 }
0547
0548 int zpci_dma_init_device(struct zpci_dev *zdev)
0549 {
0550 int rc;
0551
0552
0553
0554
0555
0556
0557 WARN_ON(zdev->s390_domain);
0558
0559 spin_lock_init(&zdev->iommu_bitmap_lock);
0560 spin_lock_init(&zdev->dma_table_lock);
0561
0562 zdev->dma_table = dma_alloc_cpu_table();
0563 if (!zdev->dma_table) {
0564 rc = -ENOMEM;
0565 goto out;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
0582 zdev->iommu_size = min3(s390_iommu_aperture,
0583 ZPCI_TABLE_SIZE_RT - zdev->start_dma,
0584 zdev->end_dma - zdev->start_dma + 1);
0585 zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
0586 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
0587 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
0588 if (!zdev->iommu_bitmap) {
0589 rc = -ENOMEM;
0590 goto free_dma_table;
0591 }
0592 if (!s390_iommu_strict) {
0593 zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
0594 if (!zdev->lazy_bitmap) {
0595 rc = -ENOMEM;
0596 goto free_bitmap;
0597 }
0598
0599 }
0600 if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
0601 virt_to_phys(zdev->dma_table))) {
0602 rc = -EIO;
0603 goto free_bitmap;
0604 }
0605
0606 return 0;
0607 free_bitmap:
0608 vfree(zdev->iommu_bitmap);
0609 zdev->iommu_bitmap = NULL;
0610 vfree(zdev->lazy_bitmap);
0611 zdev->lazy_bitmap = NULL;
0612 free_dma_table:
0613 dma_free_cpu_table(zdev->dma_table);
0614 zdev->dma_table = NULL;
0615 out:
0616 return rc;
0617 }
0618
0619 int zpci_dma_exit_device(struct zpci_dev *zdev)
0620 {
0621 int cc = 0;
0622
0623
0624
0625
0626
0627
0628 WARN_ON(zdev->s390_domain);
0629 if (zdev_enabled(zdev))
0630 cc = zpci_unregister_ioat(zdev, 0);
0631
0632
0633
0634
0635
0636 if (cc && cc != 3)
0637 return -EIO;
0638
0639 dma_cleanup_tables(zdev->dma_table);
0640 zdev->dma_table = NULL;
0641 vfree(zdev->iommu_bitmap);
0642 zdev->iommu_bitmap = NULL;
0643 vfree(zdev->lazy_bitmap);
0644 zdev->lazy_bitmap = NULL;
0645 zdev->next_bit = 0;
0646 return 0;
0647 }
0648
0649 static int __init dma_alloc_cpu_table_caches(void)
0650 {
0651 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
0652 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
0653 0, NULL);
0654 if (!dma_region_table_cache)
0655 return -ENOMEM;
0656
0657 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
0658 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
0659 0, NULL);
0660 if (!dma_page_table_cache) {
0661 kmem_cache_destroy(dma_region_table_cache);
0662 return -ENOMEM;
0663 }
0664 return 0;
0665 }
0666
0667 int __init zpci_dma_init(void)
0668 {
0669 s390_iommu_aperture = (u64)high_memory;
0670 if (!s390_iommu_aperture_factor)
0671 s390_iommu_aperture = ULONG_MAX;
0672 else
0673 s390_iommu_aperture *= s390_iommu_aperture_factor;
0674
0675 return dma_alloc_cpu_table_caches();
0676 }
0677
0678 void zpci_dma_exit(void)
0679 {
0680 kmem_cache_destroy(dma_page_table_cache);
0681 kmem_cache_destroy(dma_region_table_cache);
0682 }
0683
0684 const struct dma_map_ops s390_pci_dma_ops = {
0685 .alloc = s390_dma_alloc,
0686 .free = s390_dma_free,
0687 .map_sg = s390_dma_map_sg,
0688 .unmap_sg = s390_dma_unmap_sg,
0689 .map_page = s390_dma_map_pages,
0690 .unmap_page = s390_dma_unmap_pages,
0691 .mmap = dma_common_mmap,
0692 .get_sgtable = dma_common_get_sgtable,
0693 .alloc_pages = dma_common_alloc_pages,
0694 .free_pages = dma_common_free_pages,
0695
0696 };
0697 EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
0698
0699 static int __init s390_iommu_setup(char *str)
0700 {
0701 if (!strcmp(str, "strict"))
0702 s390_iommu_strict = 1;
0703 return 1;
0704 }
0705
0706 __setup("s390_iommu=", s390_iommu_setup);
0707
0708 static int __init s390_iommu_aperture_setup(char *str)
0709 {
0710 if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
0711 s390_iommu_aperture_factor = 1;
0712 return 1;
0713 }
0714
0715 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup);