0001
0002
0003
0004
0005
0006 #include "gem/i915_gem_internal.h"
0007 #include "gem/i915_gem_lmem.h"
0008 #include "gem/i915_gem_object.h"
0009
0010 #include "i915_drv.h"
0011 #include "i915_vma.h"
0012 #include "intel_engine.h"
0013 #include "intel_engine_regs.h"
0014 #include "intel_gpu_commands.h"
0015 #include "intel_ring.h"
0016 #include "intel_timeline.h"
0017
0018 unsigned int intel_ring_update_space(struct intel_ring *ring)
0019 {
0020 unsigned int space;
0021
0022 space = __intel_ring_space(ring->head, ring->emit, ring->size);
0023
0024 ring->space = space;
0025 return space;
0026 }
0027
0028 void __intel_ring_pin(struct intel_ring *ring)
0029 {
0030 GEM_BUG_ON(!atomic_read(&ring->pin_count));
0031 atomic_inc(&ring->pin_count);
0032 }
0033
0034 int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
0035 {
0036 struct i915_vma *vma = ring->vma;
0037 unsigned int flags;
0038 void *addr;
0039 int ret;
0040
0041 if (atomic_fetch_inc(&ring->pin_count))
0042 return 0;
0043
0044
0045 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
0046
0047 if (i915_gem_object_is_stolen(vma->obj))
0048 flags |= PIN_MAPPABLE;
0049 else
0050 flags |= PIN_HIGH;
0051
0052 ret = i915_ggtt_pin(vma, ww, 0, flags);
0053 if (unlikely(ret))
0054 goto err_unpin;
0055
0056 if (i915_vma_is_map_and_fenceable(vma)) {
0057 addr = (void __force *)i915_vma_pin_iomap(vma);
0058 } else {
0059 int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
0060
0061 addr = i915_gem_object_pin_map(vma->obj, type);
0062 }
0063
0064 if (IS_ERR(addr)) {
0065 ret = PTR_ERR(addr);
0066 goto err_ring;
0067 }
0068
0069 i915_vma_make_unshrinkable(vma);
0070
0071
0072 intel_ring_reset(ring, ring->emit);
0073
0074 ring->vaddr = addr;
0075 return 0;
0076
0077 err_ring:
0078 i915_vma_unpin(vma);
0079 err_unpin:
0080 atomic_dec(&ring->pin_count);
0081 return ret;
0082 }
0083
0084 void intel_ring_reset(struct intel_ring *ring, u32 tail)
0085 {
0086 tail = intel_ring_wrap(ring, tail);
0087 ring->tail = tail;
0088 ring->head = tail;
0089 ring->emit = tail;
0090 intel_ring_update_space(ring);
0091 }
0092
0093 void intel_ring_unpin(struct intel_ring *ring)
0094 {
0095 struct i915_vma *vma = ring->vma;
0096
0097 if (!atomic_dec_and_test(&ring->pin_count))
0098 return;
0099
0100 i915_vma_unset_ggtt_write(vma);
0101 if (i915_vma_is_map_and_fenceable(vma))
0102 i915_vma_unpin_iomap(vma);
0103 else
0104 i915_gem_object_unpin_map(vma->obj);
0105
0106 i915_vma_make_purgeable(vma);
0107 i915_vma_unpin(vma);
0108 }
0109
0110 static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
0111 {
0112 struct i915_address_space *vm = &ggtt->vm;
0113 struct drm_i915_private *i915 = vm->i915;
0114 struct drm_i915_gem_object *obj;
0115 struct i915_vma *vma;
0116
0117 obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
0118 I915_BO_ALLOC_PM_VOLATILE);
0119 if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
0120 obj = i915_gem_object_create_stolen(i915, size);
0121 if (IS_ERR(obj))
0122 obj = i915_gem_object_create_internal(i915, size);
0123 if (IS_ERR(obj))
0124 return ERR_CAST(obj);
0125
0126
0127
0128
0129
0130 if (vm->has_read_only)
0131 i915_gem_object_set_readonly(obj);
0132
0133 vma = i915_vma_instance(obj, vm, NULL);
0134 if (IS_ERR(vma))
0135 goto err;
0136
0137 return vma;
0138
0139 err:
0140 i915_gem_object_put(obj);
0141 return vma;
0142 }
0143
0144 struct intel_ring *
0145 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
0146 {
0147 struct drm_i915_private *i915 = engine->i915;
0148 struct intel_ring *ring;
0149 struct i915_vma *vma;
0150
0151 GEM_BUG_ON(!is_power_of_2(size));
0152 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
0153
0154 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
0155 if (!ring)
0156 return ERR_PTR(-ENOMEM);
0157
0158 kref_init(&ring->ref);
0159 ring->size = size;
0160 ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
0161
0162
0163
0164
0165
0166
0167 ring->effective_size = size;
0168 if (IS_I830(i915) || IS_I845G(i915))
0169 ring->effective_size -= 2 * CACHELINE_BYTES;
0170
0171 intel_ring_update_space(ring);
0172
0173 vma = create_ring_vma(engine->gt->ggtt, size);
0174 if (IS_ERR(vma)) {
0175 kfree(ring);
0176 return ERR_CAST(vma);
0177 }
0178 ring->vma = vma;
0179
0180 return ring;
0181 }
0182
0183 void intel_ring_free(struct kref *ref)
0184 {
0185 struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
0186
0187 i915_vma_put(ring->vma);
0188 kfree(ring);
0189 }
0190
0191 static noinline int
0192 wait_for_space(struct intel_ring *ring,
0193 struct intel_timeline *tl,
0194 unsigned int bytes)
0195 {
0196 struct i915_request *target;
0197 long timeout;
0198
0199 if (intel_ring_update_space(ring) >= bytes)
0200 return 0;
0201
0202 GEM_BUG_ON(list_empty(&tl->requests));
0203 list_for_each_entry(target, &tl->requests, link) {
0204 if (target->ring != ring)
0205 continue;
0206
0207
0208 if (bytes <= __intel_ring_space(target->postfix,
0209 ring->emit, ring->size))
0210 break;
0211 }
0212
0213 if (GEM_WARN_ON(&target->link == &tl->requests))
0214 return -ENOSPC;
0215
0216 timeout = i915_request_wait(target,
0217 I915_WAIT_INTERRUPTIBLE,
0218 MAX_SCHEDULE_TIMEOUT);
0219 if (timeout < 0)
0220 return timeout;
0221
0222 i915_request_retire_upto(target);
0223
0224 intel_ring_update_space(ring);
0225 GEM_BUG_ON(ring->space < bytes);
0226 return 0;
0227 }
0228
0229 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
0230 {
0231 struct intel_ring *ring = rq->ring;
0232 const unsigned int remain_usable = ring->effective_size - ring->emit;
0233 const unsigned int bytes = num_dwords * sizeof(u32);
0234 unsigned int need_wrap = 0;
0235 unsigned int total_bytes;
0236 u32 *cs;
0237
0238
0239 GEM_BUG_ON(num_dwords & 1);
0240
0241 total_bytes = bytes + rq->reserved_space;
0242 GEM_BUG_ON(total_bytes > ring->effective_size);
0243
0244 if (unlikely(total_bytes > remain_usable)) {
0245 const int remain_actual = ring->size - ring->emit;
0246
0247 if (bytes > remain_usable) {
0248
0249
0250
0251
0252
0253 total_bytes += remain_actual;
0254 need_wrap = remain_actual | 1;
0255 } else {
0256
0257
0258
0259
0260
0261
0262 total_bytes = rq->reserved_space + remain_actual;
0263 }
0264 }
0265
0266 if (unlikely(total_bytes > ring->space)) {
0267 int ret;
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 GEM_BUG_ON(!rq->reserved_space);
0279
0280 ret = wait_for_space(ring,
0281 i915_request_timeline(rq),
0282 total_bytes);
0283 if (unlikely(ret))
0284 return ERR_PTR(ret);
0285 }
0286
0287 if (unlikely(need_wrap)) {
0288 need_wrap &= ~1;
0289 GEM_BUG_ON(need_wrap > ring->space);
0290 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
0291 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
0292
0293
0294 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
0295 ring->space -= need_wrap;
0296 ring->emit = 0;
0297 }
0298
0299 GEM_BUG_ON(ring->emit > ring->size - bytes);
0300 GEM_BUG_ON(ring->space < bytes);
0301 cs = ring->vaddr + ring->emit;
0302 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
0303 memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
0304 ring->emit += bytes;
0305 ring->space -= bytes;
0306
0307 return cs;
0308 }
0309
0310
0311 int intel_ring_cacheline_align(struct i915_request *rq)
0312 {
0313 int num_dwords;
0314 void *cs;
0315
0316 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
0317 if (num_dwords == 0)
0318 return 0;
0319
0320 num_dwords = CACHELINE_DWORDS - num_dwords;
0321 GEM_BUG_ON(num_dwords & 1);
0322
0323 cs = intel_ring_begin(rq, num_dwords);
0324 if (IS_ERR(cs))
0325 return PTR_ERR(cs);
0326
0327 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
0328 intel_ring_advance(rq, cs + num_dwords);
0329
0330 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
0331 return 0;
0332 }
0333
0334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0335 #include "selftest_ring.c"
0336 #endif