0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/types.h>
0015 #include <linux/ctype.h>
0016 #include <linux/agp_backend.h>
0017 #include <linux/init.h>
0018 #include <linux/mm.h>
0019 #include <linux/sched.h>
0020 #include <linux/sched/debug.h>
0021 #include <linux/string.h>
0022 #include <linux/spinlock.h>
0023 #include <linux/pci.h>
0024 #include <linux/topology.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/bitmap.h>
0027 #include <linux/kdebug.h>
0028 #include <linux/scatterlist.h>
0029 #include <linux/iommu-helper.h>
0030 #include <linux/syscore_ops.h>
0031 #include <linux/io.h>
0032 #include <linux/gfp.h>
0033 #include <linux/atomic.h>
0034 #include <linux/dma-direct.h>
0035 #include <linux/dma-map-ops.h>
0036 #include <asm/mtrr.h>
0037 #include <asm/proto.h>
0038 #include <asm/iommu.h>
0039 #include <asm/gart.h>
0040 #include <asm/set_memory.h>
0041 #include <asm/dma.h>
0042 #include <asm/amd_nb.h>
0043 #include <asm/x86_init.h>
0044
0045 static unsigned long iommu_bus_base;
0046 static unsigned long iommu_size;
0047 static unsigned long iommu_pages;
0048
0049 static u32 *iommu_gatt_base;
0050
0051
0052
0053
0054
0055
0056
0057
0058 static int iommu_fullflush = 1;
0059
0060
0061 static DEFINE_SPINLOCK(iommu_bitmap_lock);
0062
0063 static unsigned long *iommu_gart_bitmap;
0064
0065 static u32 gart_unmapped_entry;
0066
0067 #define GPTE_VALID 1
0068 #define GPTE_COHERENT 2
0069 #define GPTE_ENCODE(x) \
0070 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
0071 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
0072
0073 #ifdef CONFIG_AGP
0074 #define AGPEXTERN extern
0075 #else
0076 #define AGPEXTERN
0077 #endif
0078
0079
0080 #define GART_MAX_PHYS_ADDR (1ULL << 40)
0081
0082
0083 AGPEXTERN int agp_memory_reserved;
0084 AGPEXTERN __u32 *agp_gatt_table;
0085
0086 static unsigned long next_bit;
0087 static bool need_flush;
0088
0089 static unsigned long alloc_iommu(struct device *dev, int size,
0090 unsigned long align_mask)
0091 {
0092 unsigned long offset, flags;
0093 unsigned long boundary_size;
0094 unsigned long base_index;
0095
0096 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
0097 PAGE_SIZE) >> PAGE_SHIFT;
0098 boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
0099
0100 spin_lock_irqsave(&iommu_bitmap_lock, flags);
0101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
0102 size, base_index, boundary_size, align_mask);
0103 if (offset == -1) {
0104 need_flush = true;
0105 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
0106 size, base_index, boundary_size,
0107 align_mask);
0108 }
0109 if (offset != -1) {
0110 next_bit = offset+size;
0111 if (next_bit >= iommu_pages) {
0112 next_bit = 0;
0113 need_flush = true;
0114 }
0115 }
0116 if (iommu_fullflush)
0117 need_flush = true;
0118 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
0119
0120 return offset;
0121 }
0122
0123 static void free_iommu(unsigned long offset, int size)
0124 {
0125 unsigned long flags;
0126
0127 spin_lock_irqsave(&iommu_bitmap_lock, flags);
0128 bitmap_clear(iommu_gart_bitmap, offset, size);
0129 if (offset >= next_bit)
0130 next_bit = offset + size;
0131 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
0132 }
0133
0134
0135
0136
0137 static void flush_gart(void)
0138 {
0139 unsigned long flags;
0140
0141 spin_lock_irqsave(&iommu_bitmap_lock, flags);
0142 if (need_flush) {
0143 amd_flush_garts();
0144 need_flush = false;
0145 }
0146 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
0147 }
0148
0149 #ifdef CONFIG_IOMMU_LEAK
0150
0151 static void dump_leak(void)
0152 {
0153 static int dump;
0154
0155 if (dump)
0156 return;
0157 dump = 1;
0158
0159 show_stack(NULL, NULL, KERN_ERR);
0160 debug_dma_dump_mappings(NULL);
0161 }
0162 #endif
0163
0164 static void iommu_full(struct device *dev, size_t size, int dir)
0165 {
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
0177 #ifdef CONFIG_IOMMU_LEAK
0178 dump_leak();
0179 #endif
0180 }
0181
0182 static inline int
0183 need_iommu(struct device *dev, unsigned long addr, size_t size)
0184 {
0185 return force_iommu || !dma_capable(dev, addr, size, true);
0186 }
0187
0188 static inline int
0189 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
0190 {
0191 return !dma_capable(dev, addr, size, true);
0192 }
0193
0194
0195
0196
0197 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
0198 size_t size, int dir, unsigned long align_mask)
0199 {
0200 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
0201 unsigned long iommu_page;
0202 int i;
0203
0204 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
0205 return DMA_MAPPING_ERROR;
0206
0207 iommu_page = alloc_iommu(dev, npages, align_mask);
0208 if (iommu_page == -1) {
0209 if (!nonforced_iommu(dev, phys_mem, size))
0210 return phys_mem;
0211 if (panic_on_overflow)
0212 panic("dma_map_area overflow %lu bytes\n", size);
0213 iommu_full(dev, size, dir);
0214 return DMA_MAPPING_ERROR;
0215 }
0216
0217 for (i = 0; i < npages; i++) {
0218 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
0219 phys_mem += PAGE_SIZE;
0220 }
0221 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
0222 }
0223
0224
0225 static dma_addr_t gart_map_page(struct device *dev, struct page *page,
0226 unsigned long offset, size_t size,
0227 enum dma_data_direction dir,
0228 unsigned long attrs)
0229 {
0230 unsigned long bus;
0231 phys_addr_t paddr = page_to_phys(page) + offset;
0232
0233 if (!need_iommu(dev, paddr, size))
0234 return paddr;
0235
0236 bus = dma_map_area(dev, paddr, size, dir, 0);
0237 flush_gart();
0238
0239 return bus;
0240 }
0241
0242
0243
0244
0245 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
0246 size_t size, enum dma_data_direction dir,
0247 unsigned long attrs)
0248 {
0249 unsigned long iommu_page;
0250 int npages;
0251 int i;
0252
0253 if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
0254 return;
0255
0256
0257
0258
0259
0260
0261 if (dma_addr < iommu_bus_base ||
0262 dma_addr >= iommu_bus_base + iommu_size)
0263 return;
0264
0265 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
0266 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
0267 for (i = 0; i < npages; i++) {
0268 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
0269 }
0270 free_iommu(iommu_page, npages);
0271 }
0272
0273
0274
0275
0276 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
0277 enum dma_data_direction dir, unsigned long attrs)
0278 {
0279 struct scatterlist *s;
0280 int i;
0281
0282 for_each_sg(sg, s, nents, i) {
0283 if (!s->dma_length || !s->length)
0284 break;
0285 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
0286 }
0287 }
0288
0289
0290 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
0291 int nents, int dir)
0292 {
0293 struct scatterlist *s;
0294 int i;
0295
0296 #ifdef CONFIG_IOMMU_DEBUG
0297 pr_debug("dma_map_sg overflow\n");
0298 #endif
0299
0300 for_each_sg(sg, s, nents, i) {
0301 unsigned long addr = sg_phys(s);
0302
0303 if (nonforced_iommu(dev, addr, s->length)) {
0304 addr = dma_map_area(dev, addr, s->length, dir, 0);
0305 if (addr == DMA_MAPPING_ERROR) {
0306 if (i > 0)
0307 gart_unmap_sg(dev, sg, i, dir, 0);
0308 nents = 0;
0309 sg[0].dma_length = 0;
0310 break;
0311 }
0312 }
0313 s->dma_address = addr;
0314 s->dma_length = s->length;
0315 }
0316 flush_gart();
0317
0318 return nents;
0319 }
0320
0321
0322 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
0323 int nelems, struct scatterlist *sout,
0324 unsigned long pages)
0325 {
0326 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
0327 unsigned long iommu_page = iommu_start;
0328 struct scatterlist *s;
0329 int i;
0330
0331 if (iommu_start == -1)
0332 return -ENOMEM;
0333
0334 for_each_sg(start, s, nelems, i) {
0335 unsigned long pages, addr;
0336 unsigned long phys_addr = s->dma_address;
0337
0338 BUG_ON(s != start && s->offset);
0339 if (s == start) {
0340 sout->dma_address = iommu_bus_base;
0341 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
0342 sout->dma_length = s->length;
0343 } else {
0344 sout->dma_length += s->length;
0345 }
0346
0347 addr = phys_addr;
0348 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
0349 while (pages--) {
0350 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
0351 addr += PAGE_SIZE;
0352 iommu_page++;
0353 }
0354 }
0355 BUG_ON(iommu_page - iommu_start != pages);
0356
0357 return 0;
0358 }
0359
0360 static inline int
0361 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
0362 struct scatterlist *sout, unsigned long pages, int need)
0363 {
0364 if (!need) {
0365 BUG_ON(nelems != 1);
0366 sout->dma_address = start->dma_address;
0367 sout->dma_length = start->length;
0368 return 0;
0369 }
0370 return __dma_map_cont(dev, start, nelems, sout, pages);
0371 }
0372
0373
0374
0375
0376
0377 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
0378 enum dma_data_direction dir, unsigned long attrs)
0379 {
0380 struct scatterlist *s, *ps, *start_sg, *sgmap;
0381 int need = 0, nextneed, i, out, start, ret;
0382 unsigned long pages = 0;
0383 unsigned int seg_size;
0384 unsigned int max_seg_size;
0385
0386 if (nents == 0)
0387 return -EINVAL;
0388
0389 out = 0;
0390 start = 0;
0391 start_sg = sg;
0392 sgmap = sg;
0393 seg_size = 0;
0394 max_seg_size = dma_get_max_seg_size(dev);
0395 ps = NULL;
0396
0397 for_each_sg(sg, s, nents, i) {
0398 dma_addr_t addr = sg_phys(s);
0399
0400 s->dma_address = addr;
0401 BUG_ON(s->length == 0);
0402
0403 nextneed = need_iommu(dev, addr, s->length);
0404
0405
0406 if (i > start) {
0407
0408
0409
0410
0411
0412 if (!iommu_merge || !nextneed || !need || s->offset ||
0413 (s->length + seg_size > max_seg_size) ||
0414 (ps->offset + ps->length) % PAGE_SIZE) {
0415 ret = dma_map_cont(dev, start_sg, i - start,
0416 sgmap, pages, need);
0417 if (ret < 0)
0418 goto error;
0419 out++;
0420
0421 seg_size = 0;
0422 sgmap = sg_next(sgmap);
0423 pages = 0;
0424 start = i;
0425 start_sg = s;
0426 }
0427 }
0428
0429 seg_size += s->length;
0430 need = nextneed;
0431 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
0432 ps = s;
0433 }
0434 ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need);
0435 if (ret < 0)
0436 goto error;
0437 out++;
0438 flush_gart();
0439 if (out < nents) {
0440 sgmap = sg_next(sgmap);
0441 sgmap->dma_length = 0;
0442 }
0443 return out;
0444
0445 error:
0446 flush_gart();
0447 gart_unmap_sg(dev, sg, out, dir, 0);
0448
0449
0450 if (force_iommu || iommu_merge) {
0451 out = dma_map_sg_nonforce(dev, sg, nents, dir);
0452 if (out > 0)
0453 return out;
0454 }
0455 if (panic_on_overflow)
0456 panic("dma_map_sg: overflow on %lu pages\n", pages);
0457
0458 iommu_full(dev, pages << PAGE_SHIFT, dir);
0459 return ret;
0460 }
0461
0462
0463 static void *
0464 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
0465 gfp_t flag, unsigned long attrs)
0466 {
0467 void *vaddr;
0468
0469 vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
0470 if (!vaddr ||
0471 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
0472 return vaddr;
0473
0474 *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
0475 DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
0476 flush_gart();
0477 if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
0478 goto out_free;
0479 return vaddr;
0480 out_free:
0481 dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
0482 return NULL;
0483 }
0484
0485
0486 static void
0487 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
0488 dma_addr_t dma_addr, unsigned long attrs)
0489 {
0490 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
0491 dma_direct_free(dev, size, vaddr, dma_addr, attrs);
0492 }
0493
0494 static int no_agp;
0495
0496 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
0497 {
0498 unsigned long a;
0499
0500 if (!iommu_size) {
0501 iommu_size = aper_size;
0502 if (!no_agp)
0503 iommu_size /= 2;
0504 }
0505
0506 a = aper + iommu_size;
0507 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
0508
0509 if (iommu_size < 64*1024*1024) {
0510 pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
0511 " Consider increasing the AGP aperture in BIOS\n",
0512 iommu_size >> 20);
0513 }
0514
0515 return iommu_size;
0516 }
0517
0518 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
0519 {
0520 unsigned aper_size = 0, aper_base_32, aper_order;
0521 u64 aper_base;
0522
0523 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
0524 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
0525 aper_order = (aper_order >> 1) & 7;
0526
0527 aper_base = aper_base_32 & 0x7fff;
0528 aper_base <<= 25;
0529
0530 aper_size = (32 * 1024 * 1024) << aper_order;
0531 if (aper_base + aper_size > 0x100000000UL || !aper_size)
0532 aper_base = 0;
0533
0534 *size = aper_size;
0535 return aper_base;
0536 }
0537
0538 static void enable_gart_translations(void)
0539 {
0540 int i;
0541
0542 if (!amd_nb_has_feature(AMD_NB_GART))
0543 return;
0544
0545 for (i = 0; i < amd_nb_num(); i++) {
0546 struct pci_dev *dev = node_to_amd_nb(i)->misc;
0547
0548 enable_gart_translation(dev, __pa(agp_gatt_table));
0549 }
0550
0551
0552 amd_flush_garts();
0553 }
0554
0555
0556
0557
0558
0559 static bool fix_up_north_bridges;
0560 static u32 aperture_order;
0561 static u32 aperture_alloc;
0562
0563 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
0564 {
0565 fix_up_north_bridges = true;
0566 aperture_order = aper_order;
0567 aperture_alloc = aper_alloc;
0568 }
0569
0570 static void gart_fixup_northbridges(void)
0571 {
0572 int i;
0573
0574 if (!fix_up_north_bridges)
0575 return;
0576
0577 if (!amd_nb_has_feature(AMD_NB_GART))
0578 return;
0579
0580 pr_info("PCI-DMA: Restoring GART aperture settings\n");
0581
0582 for (i = 0; i < amd_nb_num(); i++) {
0583 struct pci_dev *dev = node_to_amd_nb(i)->misc;
0584
0585
0586
0587
0588
0589 gart_set_size_and_enable(dev, aperture_order);
0590 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
0591 }
0592 }
0593
0594 static void gart_resume(void)
0595 {
0596 pr_info("PCI-DMA: Resuming GART IOMMU\n");
0597
0598 gart_fixup_northbridges();
0599
0600 enable_gart_translations();
0601 }
0602
0603 static struct syscore_ops gart_syscore_ops = {
0604 .resume = gart_resume,
0605
0606 };
0607
0608
0609
0610
0611
0612 static __init int init_amd_gatt(struct agp_kern_info *info)
0613 {
0614 unsigned aper_size, gatt_size, new_aper_size;
0615 unsigned aper_base, new_aper_base;
0616 struct pci_dev *dev;
0617 void *gatt;
0618 int i;
0619
0620 pr_info("PCI-DMA: Disabling AGP.\n");
0621
0622 aper_size = aper_base = info->aper_size = 0;
0623 dev = NULL;
0624 for (i = 0; i < amd_nb_num(); i++) {
0625 dev = node_to_amd_nb(i)->misc;
0626 new_aper_base = read_aperture(dev, &new_aper_size);
0627 if (!new_aper_base)
0628 goto nommu;
0629
0630 if (!aper_base) {
0631 aper_size = new_aper_size;
0632 aper_base = new_aper_base;
0633 }
0634 if (aper_size != new_aper_size || aper_base != new_aper_base)
0635 goto nommu;
0636 }
0637 if (!aper_base)
0638 goto nommu;
0639
0640 info->aper_base = aper_base;
0641 info->aper_size = aper_size >> 20;
0642
0643 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
0644 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
0645 get_order(gatt_size));
0646 if (!gatt)
0647 panic("Cannot allocate GATT table");
0648 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
0649 panic("Could not set GART PTEs to uncacheable pages");
0650
0651 agp_gatt_table = gatt;
0652
0653 register_syscore_ops(&gart_syscore_ops);
0654
0655 flush_gart();
0656
0657 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
0658 aper_base, aper_size>>10);
0659
0660 return 0;
0661
0662 nommu:
0663
0664 pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n");
0665 return -1;
0666 }
0667
0668 static const struct dma_map_ops gart_dma_ops = {
0669 .map_sg = gart_map_sg,
0670 .unmap_sg = gart_unmap_sg,
0671 .map_page = gart_map_page,
0672 .unmap_page = gart_unmap_page,
0673 .alloc = gart_alloc_coherent,
0674 .free = gart_free_coherent,
0675 .mmap = dma_common_mmap,
0676 .get_sgtable = dma_common_get_sgtable,
0677 .dma_supported = dma_direct_supported,
0678 .get_required_mask = dma_direct_get_required_mask,
0679 .alloc_pages = dma_direct_alloc_pages,
0680 .free_pages = dma_direct_free_pages,
0681 };
0682
0683 static void gart_iommu_shutdown(void)
0684 {
0685 struct pci_dev *dev;
0686 int i;
0687
0688
0689 if (!no_agp)
0690 return;
0691
0692 if (!amd_nb_has_feature(AMD_NB_GART))
0693 return;
0694
0695 for (i = 0; i < amd_nb_num(); i++) {
0696 u32 ctl;
0697
0698 dev = node_to_amd_nb(i)->misc;
0699 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
0700
0701 ctl &= ~GARTEN;
0702
0703 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
0704 }
0705 }
0706
0707 int __init gart_iommu_init(void)
0708 {
0709 struct agp_kern_info info;
0710 unsigned long iommu_start;
0711 unsigned long aper_base, aper_size;
0712 unsigned long start_pfn, end_pfn;
0713 unsigned long scratch;
0714
0715 if (!amd_nb_has_feature(AMD_NB_GART))
0716 return 0;
0717
0718 #ifndef CONFIG_AGP_AMD64
0719 no_agp = 1;
0720 #else
0721
0722
0723 no_agp = no_agp ||
0724 (agp_amd64_init() < 0) ||
0725 (agp_copy_info(agp_bridge, &info) < 0);
0726 #endif
0727
0728 if (no_iommu ||
0729 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
0730 !gart_iommu_aperture ||
0731 (no_agp && init_amd_gatt(&info) < 0)) {
0732 if (max_pfn > MAX_DMA32_PFN) {
0733 pr_warn("More than 4GB of memory but GART IOMMU not available.\n");
0734 pr_warn("falling back to iommu=soft.\n");
0735 }
0736 return 0;
0737 }
0738
0739
0740 aper_size = info.aper_size << 20;
0741 aper_base = info.aper_base;
0742 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
0743
0744 start_pfn = PFN_DOWN(aper_base);
0745 if (!pfn_range_is_mapped(start_pfn, end_pfn))
0746 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
0747 PAGE_KERNEL);
0748
0749 pr_info("PCI-DMA: using GART IOMMU.\n");
0750 iommu_size = check_iommu_size(info.aper_base, aper_size);
0751 iommu_pages = iommu_size >> PAGE_SHIFT;
0752
0753 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
0754 get_order(iommu_pages/8));
0755 if (!iommu_gart_bitmap)
0756 panic("Cannot allocate iommu bitmap\n");
0757
0758 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
0759 iommu_size >> 20);
0760
0761 agp_memory_reserved = iommu_size;
0762 iommu_start = aper_size - iommu_size;
0763 iommu_bus_base = info.aper_base + iommu_start;
0764 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 set_memory_np((unsigned long)__va(iommu_bus_base),
0776 iommu_size >> PAGE_SHIFT);
0777
0778
0779
0780
0781
0782
0783
0784
0785 wbinvd();
0786
0787
0788
0789
0790
0791
0792
0793 enable_gart_translations();
0794
0795
0796
0797
0798
0799
0800
0801 scratch = get_zeroed_page(GFP_KERNEL);
0802 if (!scratch)
0803 panic("Cannot allocate iommu scratch page");
0804 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
0805
0806 flush_gart();
0807 dma_ops = &gart_dma_ops;
0808 x86_platform.iommu_shutdown = gart_iommu_shutdown;
0809 x86_swiotlb_enable = false;
0810
0811 return 0;
0812 }
0813
0814 void __init gart_parse_options(char *p)
0815 {
0816 int arg;
0817
0818 if (isdigit(*p) && get_option(&p, &arg))
0819 iommu_size = arg;
0820 if (!strncmp(p, "fullflush", 9))
0821 iommu_fullflush = 1;
0822 if (!strncmp(p, "nofullflush", 11))
0823 iommu_fullflush = 0;
0824 if (!strncmp(p, "noagp", 5))
0825 no_agp = 1;
0826 if (!strncmp(p, "noaperture", 10))
0827 fix_aperture = 0;
0828
0829 if (!strncmp(p, "force", 5))
0830 gart_iommu_aperture_allowed = 1;
0831 if (!strncmp(p, "allowed", 7))
0832 gart_iommu_aperture_allowed = 1;
0833 if (!strncmp(p, "memaper", 7)) {
0834 fallback_aper_force = 1;
0835 p += 7;
0836 if (*p == '=') {
0837 ++p;
0838 if (get_option(&p, &arg))
0839 fallback_aper_order = arg;
0840 }
0841 }
0842 }