0001
0002
0003
0004
0005
0006 #include "intel_ggtt_gmch.h"
0007
0008 #include <drm/intel-gtt.h>
0009 #include <drm/i915_drm.h>
0010
0011 #include <linux/agp_backend.h>
0012
0013 #include "i915_drv.h"
0014 #include "i915_utils.h"
0015 #include "intel_gtt.h"
0016 #include "intel_gt_regs.h"
0017 #include "intel_gt.h"
0018
0019 static void gmch_ggtt_insert_page(struct i915_address_space *vm,
0020 dma_addr_t addr,
0021 u64 offset,
0022 enum i915_cache_level cache_level,
0023 u32 unused)
0024 {
0025 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
0026 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
0027
0028 intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
0029 }
0030
0031 static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
0032 struct i915_vma_resource *vma_res,
0033 enum i915_cache_level cache_level,
0034 u32 unused)
0035 {
0036 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
0037 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
0038
0039 intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
0040 flags);
0041 }
0042
0043 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
0044 {
0045 intel_gmch_gtt_flush();
0046 }
0047
0048 static void gmch_ggtt_clear_range(struct i915_address_space *vm,
0049 u64 start, u64 length)
0050 {
0051 intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
0052 }
0053
0054 static void gmch_ggtt_remove(struct i915_address_space *vm)
0055 {
0056 intel_gmch_remove();
0057 }
0058
0059
0060
0061
0062
0063 static bool needs_idle_maps(struct drm_i915_private *i915)
0064 {
0065
0066
0067
0068
0069 if (!i915_vtd_active(i915))
0070 return false;
0071
0072 if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
0073 return true;
0074
0075 return false;
0076 }
0077
0078 int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
0079 {
0080 struct drm_i915_private *i915 = ggtt->vm.i915;
0081 phys_addr_t gmadr_base;
0082 int ret;
0083
0084 ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
0085 if (!ret) {
0086 drm_err(&i915->drm, "failed to set up gmch\n");
0087 return -EIO;
0088 }
0089
0090 intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
0091
0092 ggtt->gmadr =
0093 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
0094
0095 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
0096 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
0097
0098 if (needs_idle_maps(i915)) {
0099 drm_notice(&i915->drm,
0100 "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
0101 ggtt->do_idle_maps = true;
0102 }
0103
0104 ggtt->vm.insert_page = gmch_ggtt_insert_page;
0105 ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
0106 ggtt->vm.clear_range = gmch_ggtt_clear_range;
0107 ggtt->vm.cleanup = gmch_ggtt_remove;
0108
0109 ggtt->invalidate = gmch_ggtt_invalidate;
0110
0111 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
0112 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
0113
0114 if (unlikely(ggtt->do_idle_maps))
0115 drm_notice(&i915->drm,
0116 "Applying Ironlake quirks for intel_iommu\n");
0117
0118 return 0;
0119 }
0120
0121 int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
0122 {
0123 if (!intel_gmch_enable_gtt())
0124 return -EIO;
0125
0126 return 0;
0127 }
0128
0129 void intel_ggtt_gmch_flush(void)
0130 {
0131 intel_gmch_gtt_flush();
0132 }