0001
0002
0003
0004
0005
0006
0007 #include <linux/slab.h> /* fault-inject.h is not standalone! */
0008
0009 #include <linux/fault-inject.h>
0010 #include <linux/log2.h>
0011 #include <linux/random.h>
0012 #include <linux/seq_file.h>
0013 #include <linux/stop_machine.h>
0014
0015 #include <asm/set_memory.h>
0016 #include <asm/smp.h>
0017
0018 #include "display/intel_frontbuffer.h"
0019 #include "gt/intel_gt.h"
0020 #include "gt/intel_gt_requests.h"
0021
0022 #include "i915_drv.h"
0023 #include "i915_gem_evict.h"
0024 #include "i915_scatterlist.h"
0025 #include "i915_trace.h"
0026 #include "i915_vgpu.h"
0027
0028 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
0029 struct sg_table *pages)
0030 {
0031 do {
0032 if (dma_map_sg_attrs(obj->base.dev->dev,
0033 pages->sgl, pages->nents,
0034 DMA_BIDIRECTIONAL,
0035 DMA_ATTR_SKIP_CPU_SYNC |
0036 DMA_ATTR_NO_KERNEL_MAPPING |
0037 DMA_ATTR_NO_WARN))
0038 return 0;
0039
0040
0041
0042
0043
0044
0045
0046
0047 GEM_BUG_ON(obj->mm.pages == pages);
0048 } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
0049 obj->base.size >> PAGE_SHIFT, NULL,
0050 I915_SHRINK_BOUND |
0051 I915_SHRINK_UNBOUND));
0052
0053 return -ENOSPC;
0054 }
0055
0056 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
0057 struct sg_table *pages)
0058 {
0059 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0060 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0061
0062
0063 if (unlikely(ggtt->do_idle_maps))
0064
0065 usleep_range(100, 250);
0066
0067 dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
0068 DMA_BIDIRECTIONAL);
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 int i915_gem_gtt_reserve(struct i915_address_space *vm,
0098 struct i915_gem_ww_ctx *ww,
0099 struct drm_mm_node *node,
0100 u64 size, u64 offset, unsigned long color,
0101 unsigned int flags)
0102 {
0103 int err;
0104
0105 GEM_BUG_ON(!size);
0106 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
0107 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
0108 GEM_BUG_ON(range_overflows(offset, size, vm->total));
0109 GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
0110 GEM_BUG_ON(drm_mm_node_allocated(node));
0111
0112 node->size = size;
0113 node->start = offset;
0114 node->color = color;
0115
0116 err = drm_mm_reserve_node(&vm->mm, node);
0117 if (err != -ENOSPC)
0118 return err;
0119
0120 if (flags & PIN_NOEVICT)
0121 return -ENOSPC;
0122
0123 err = i915_gem_evict_for_node(vm, ww, node, flags);
0124 if (err == 0)
0125 err = drm_mm_reserve_node(&vm->mm, node);
0126
0127 return err;
0128 }
0129
0130 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
0131 {
0132 u64 range, addr;
0133
0134 GEM_BUG_ON(range_overflows(start, len, end));
0135 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
0136
0137 range = round_down(end - len, align) - round_up(start, align);
0138 if (range) {
0139 if (sizeof(unsigned long) == sizeof(u64)) {
0140 addr = get_random_long();
0141 } else {
0142 addr = get_random_int();
0143 if (range > U32_MAX) {
0144 addr <<= 32;
0145 addr |= get_random_int();
0146 }
0147 }
0148 div64_u64_rem(addr, range, &addr);
0149 start += addr;
0150 }
0151
0152 return round_up(start, align);
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190 int i915_gem_gtt_insert(struct i915_address_space *vm,
0191 struct i915_gem_ww_ctx *ww,
0192 struct drm_mm_node *node,
0193 u64 size, u64 alignment, unsigned long color,
0194 u64 start, u64 end, unsigned int flags)
0195 {
0196 enum drm_mm_insert_mode mode;
0197 u64 offset;
0198 int err;
0199
0200 lockdep_assert_held(&vm->mutex);
0201
0202 GEM_BUG_ON(!size);
0203 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
0204 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
0205 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
0206 GEM_BUG_ON(start >= end);
0207 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
0208 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
0209 GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
0210 GEM_BUG_ON(drm_mm_node_allocated(node));
0211
0212 if (unlikely(range_overflows(start, size, end)))
0213 return -ENOSPC;
0214
0215 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
0216 return -ENOSPC;
0217
0218 mode = DRM_MM_INSERT_BEST;
0219 if (flags & PIN_HIGH)
0220 mode = DRM_MM_INSERT_HIGHEST;
0221 if (flags & PIN_MAPPABLE)
0222 mode = DRM_MM_INSERT_LOW;
0223
0224
0225
0226
0227
0228
0229
0230 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
0231 if (alignment <= I915_GTT_MIN_ALIGNMENT)
0232 alignment = 0;
0233
0234 err = drm_mm_insert_node_in_range(&vm->mm, node,
0235 size, alignment, color,
0236 start, end, mode);
0237 if (err != -ENOSPC)
0238 return err;
0239
0240 if (mode & DRM_MM_INSERT_ONCE) {
0241 err = drm_mm_insert_node_in_range(&vm->mm, node,
0242 size, alignment, color,
0243 start, end,
0244 DRM_MM_INSERT_BEST);
0245 if (err != -ENOSPC)
0246 return err;
0247 }
0248
0249 if (flags & PIN_NOEVICT)
0250 return -ENOSPC;
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 offset = random_offset(start, end,
0276 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
0277 err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
0278 if (err != -ENOSPC)
0279 return err;
0280
0281 if (flags & PIN_NOSEARCH)
0282 return -ENOSPC;
0283
0284
0285 err = i915_gem_evict_something(vm, ww, size, alignment, color,
0286 start, end, flags);
0287 if (err)
0288 return err;
0289
0290 return drm_mm_insert_node_in_range(&vm->mm, node,
0291 size, alignment, color,
0292 start, end, DRM_MM_INSERT_EVICT);
0293 }
0294
0295 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0296 #include "selftests/i915_gem_gtt.c"
0297 #endif