0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-fence-array.h>
0008 #include <linux/dma-fence-chain.h>
0009 #include <linux/jiffies.h>
0010
0011 #include "gt/intel_engine.h"
0012 #include "gt/intel_rps.h"
0013
0014 #include "i915_gem_ioctls.h"
0015 #include "i915_gem_object.h"
0016
0017 static long
0018 i915_gem_object_wait_fence(struct dma_fence *fence,
0019 unsigned int flags,
0020 long timeout)
0021 {
0022 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
0023
0024 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
0025 return timeout;
0026
0027 if (dma_fence_is_i915(fence))
0028 return i915_request_wait_timeout(to_request(fence), flags, timeout);
0029
0030 return dma_fence_wait_timeout(fence,
0031 flags & I915_WAIT_INTERRUPTIBLE,
0032 timeout);
0033 }
0034
0035 static void
0036 i915_gem_object_boost(struct dma_resv *resv, unsigned int flags)
0037 {
0038 struct dma_resv_iter cursor;
0039 struct dma_fence *fence;
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 dma_resv_iter_begin(&cursor, resv,
0058 dma_resv_usage_rw(flags & I915_WAIT_ALL));
0059 dma_resv_for_each_fence_unlocked(&cursor, fence)
0060 if (dma_fence_is_i915(fence) &&
0061 !i915_request_started(to_request(fence)))
0062 intel_rps_boost(to_request(fence));
0063 dma_resv_iter_end(&cursor);
0064 }
0065
0066 static long
0067 i915_gem_object_wait_reservation(struct dma_resv *resv,
0068 unsigned int flags,
0069 long timeout)
0070 {
0071 struct dma_resv_iter cursor;
0072 struct dma_fence *fence;
0073 long ret = timeout ?: 1;
0074
0075 i915_gem_object_boost(resv, flags);
0076
0077 dma_resv_iter_begin(&cursor, resv,
0078 dma_resv_usage_rw(flags & I915_WAIT_ALL));
0079 dma_resv_for_each_fence_unlocked(&cursor, fence) {
0080 ret = i915_gem_object_wait_fence(fence, flags, timeout);
0081 if (ret <= 0)
0082 break;
0083
0084 if (timeout)
0085 timeout = ret;
0086 }
0087 dma_resv_iter_end(&cursor);
0088
0089 return ret;
0090 }
0091
0092 static void fence_set_priority(struct dma_fence *fence,
0093 const struct i915_sched_attr *attr)
0094 {
0095 struct i915_request *rq;
0096 struct intel_engine_cs *engine;
0097
0098 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
0099 return;
0100
0101 rq = to_request(fence);
0102 engine = rq->engine;
0103
0104 rcu_read_lock();
0105 if (engine->sched_engine->schedule)
0106 engine->sched_engine->schedule(rq, attr);
0107 rcu_read_unlock();
0108 }
0109
0110 static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
0111 {
0112 return fence->ops == &dma_fence_chain_ops;
0113 }
0114
0115 void i915_gem_fence_wait_priority(struct dma_fence *fence,
0116 const struct i915_sched_attr *attr)
0117 {
0118 if (dma_fence_is_signaled(fence))
0119 return;
0120
0121 local_bh_disable();
0122
0123
0124 if (dma_fence_is_array(fence)) {
0125 struct dma_fence_array *array = to_dma_fence_array(fence);
0126 int i;
0127
0128 for (i = 0; i < array->num_fences; i++)
0129 fence_set_priority(array->fences[i], attr);
0130 } else if (__dma_fence_is_chain(fence)) {
0131 struct dma_fence *iter;
0132
0133
0134 dma_fence_chain_for_each(iter, fence) {
0135 fence_set_priority(to_dma_fence_chain(iter)->fence,
0136 attr);
0137 break;
0138 }
0139 dma_fence_put(iter);
0140 } else {
0141 fence_set_priority(fence, attr);
0142 }
0143
0144 local_bh_enable();
0145 }
0146
0147 int
0148 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
0149 unsigned int flags,
0150 const struct i915_sched_attr *attr)
0151 {
0152 struct dma_resv_iter cursor;
0153 struct dma_fence *fence;
0154
0155 dma_resv_iter_begin(&cursor, obj->base.resv,
0156 dma_resv_usage_rw(flags & I915_WAIT_ALL));
0157 dma_resv_for_each_fence_unlocked(&cursor, fence)
0158 i915_gem_fence_wait_priority(fence, attr);
0159 dma_resv_iter_end(&cursor);
0160 return 0;
0161 }
0162
0163
0164
0165
0166
0167
0168
0169 int
0170 i915_gem_object_wait(struct drm_i915_gem_object *obj,
0171 unsigned int flags,
0172 long timeout)
0173 {
0174 might_sleep();
0175 GEM_BUG_ON(timeout < 0);
0176
0177 timeout = i915_gem_object_wait_reservation(obj->base.resv,
0178 flags, timeout);
0179
0180 if (timeout < 0)
0181 return timeout;
0182
0183 return !timeout ? -ETIME : 0;
0184 }
0185
0186 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
0187 {
0188
0189 if (NSEC_PER_SEC % HZ &&
0190 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
0191 return MAX_JIFFY_OFFSET;
0192
0193 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
0194 }
0195
0196 static unsigned long to_wait_timeout(s64 timeout_ns)
0197 {
0198 if (timeout_ns < 0)
0199 return MAX_SCHEDULE_TIMEOUT;
0200
0201 if (timeout_ns == 0)
0202 return 0;
0203
0204 return nsecs_to_jiffies_timeout(timeout_ns);
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 int
0232 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
0233 {
0234 struct drm_i915_gem_wait *args = data;
0235 struct drm_i915_gem_object *obj;
0236 ktime_t start;
0237 long ret;
0238
0239 if (args->flags != 0)
0240 return -EINVAL;
0241
0242 obj = i915_gem_object_lookup(file, args->bo_handle);
0243 if (!obj)
0244 return -ENOENT;
0245
0246 start = ktime_get();
0247
0248 ret = i915_gem_object_wait(obj,
0249 I915_WAIT_INTERRUPTIBLE |
0250 I915_WAIT_PRIORITY |
0251 I915_WAIT_ALL,
0252 to_wait_timeout(args->timeout_ns));
0253
0254 if (args->timeout_ns > 0) {
0255 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
0256 if (args->timeout_ns < 0)
0257 args->timeout_ns = 0;
0258
0259
0260
0261
0262
0263
0264
0265
0266 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
0267 args->timeout_ns = 0;
0268
0269
0270 if (ret == -ETIME && args->timeout_ns)
0271 ret = -EAGAIN;
0272 }
0273
0274 i915_gem_object_put(obj);
0275 return ret;
0276 }
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
0290 unsigned int flags)
0291 {
0292 might_sleep();
0293
0294 return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE));
0295 }