0001
0002
0003
0004
0005
0006 #include <linux/sort.h>
0007
0008 #include "intel_gpu_commands.h"
0009 #include "intel_gt_pm.h"
0010 #include "intel_rps.h"
0011
0012 #include "i915_selftest.h"
0013 #include "selftests/igt_flush_test.h"
0014
0015 #define COUNT 5
0016
0017 static int cmp_u32(const void *A, const void *B)
0018 {
0019 const u32 *a = A, *b = B;
0020
0021 return *a - *b;
0022 }
0023
0024 static void perf_begin(struct intel_gt *gt)
0025 {
0026 intel_gt_pm_get(gt);
0027
0028
0029 atomic_inc(>->rps.num_waiters);
0030 schedule_work(>->rps.work);
0031 flush_work(>->rps.work);
0032 }
0033
0034 static int perf_end(struct intel_gt *gt)
0035 {
0036 atomic_dec(>->rps.num_waiters);
0037 intel_gt_pm_put(gt);
0038
0039 return igt_flush_test(gt->i915);
0040 }
0041
0042 static int write_timestamp(struct i915_request *rq, int slot)
0043 {
0044 struct intel_timeline *tl =
0045 rcu_dereference_protected(rq->timeline,
0046 !i915_request_signaled(rq));
0047 u32 cmd;
0048 u32 *cs;
0049
0050 cs = intel_ring_begin(rq, 4);
0051 if (IS_ERR(cs))
0052 return PTR_ERR(cs);
0053
0054 cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
0055 if (GRAPHICS_VER(rq->engine->i915) >= 8)
0056 cmd++;
0057 *cs++ = cmd;
0058 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
0059 *cs++ = tl->hwsp_offset + slot * sizeof(u32);
0060 *cs++ = 0;
0061
0062 intel_ring_advance(rq, cs);
0063
0064 return 0;
0065 }
0066
0067 static struct i915_vma *create_empty_batch(struct intel_context *ce)
0068 {
0069 struct drm_i915_gem_object *obj;
0070 struct i915_vma *vma;
0071 u32 *cs;
0072 int err;
0073
0074 obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
0075 if (IS_ERR(obj))
0076 return ERR_CAST(obj);
0077
0078 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
0079 if (IS_ERR(cs)) {
0080 err = PTR_ERR(cs);
0081 goto err_put;
0082 }
0083
0084 cs[0] = MI_BATCH_BUFFER_END;
0085
0086 i915_gem_object_flush_map(obj);
0087
0088 vma = i915_vma_instance(obj, ce->vm, NULL);
0089 if (IS_ERR(vma)) {
0090 err = PTR_ERR(vma);
0091 goto err_unpin;
0092 }
0093
0094 err = i915_vma_pin(vma, 0, 0, PIN_USER);
0095 if (err)
0096 goto err_unpin;
0097
0098 i915_gem_object_unpin_map(obj);
0099 return vma;
0100
0101 err_unpin:
0102 i915_gem_object_unpin_map(obj);
0103 err_put:
0104 i915_gem_object_put(obj);
0105 return ERR_PTR(err);
0106 }
0107
0108 static u32 trifilter(u32 *a)
0109 {
0110 u64 sum;
0111
0112 sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
0113
0114 sum = mul_u32_u32(a[2], 2);
0115 sum += a[1];
0116 sum += a[3];
0117
0118 return sum >> 2;
0119 }
0120
0121 static int perf_mi_bb_start(void *arg)
0122 {
0123 struct intel_gt *gt = arg;
0124 struct intel_engine_cs *engine;
0125 enum intel_engine_id id;
0126 int err = 0;
0127
0128 if (GRAPHICS_VER(gt->i915) < 7)
0129 return 0;
0130
0131 perf_begin(gt);
0132 for_each_engine(engine, gt, id) {
0133 struct intel_context *ce = engine->kernel_context;
0134 struct i915_vma *batch;
0135 u32 cycles[COUNT];
0136 int i;
0137
0138 intel_engine_pm_get(engine);
0139
0140 batch = create_empty_batch(ce);
0141 if (IS_ERR(batch)) {
0142 err = PTR_ERR(batch);
0143 intel_engine_pm_put(engine);
0144 break;
0145 }
0146
0147 err = i915_vma_sync(batch);
0148 if (err) {
0149 intel_engine_pm_put(engine);
0150 i915_vma_put(batch);
0151 break;
0152 }
0153
0154 for (i = 0; i < ARRAY_SIZE(cycles); i++) {
0155 struct i915_request *rq;
0156
0157 rq = i915_request_create(ce);
0158 if (IS_ERR(rq)) {
0159 err = PTR_ERR(rq);
0160 break;
0161 }
0162
0163 err = write_timestamp(rq, 2);
0164 if (err)
0165 goto out;
0166
0167 err = rq->engine->emit_bb_start(rq,
0168 batch->node.start, 8,
0169 0);
0170 if (err)
0171 goto out;
0172
0173 err = write_timestamp(rq, 3);
0174 if (err)
0175 goto out;
0176
0177 out:
0178 i915_request_get(rq);
0179 i915_request_add(rq);
0180
0181 if (i915_request_wait(rq, 0, HZ / 5) < 0)
0182 err = -EIO;
0183 i915_request_put(rq);
0184 if (err)
0185 break;
0186
0187 cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
0188 }
0189 i915_vma_put(batch);
0190 intel_engine_pm_put(engine);
0191 if (err)
0192 break;
0193
0194 pr_info("%s: MI_BB_START cycles: %u\n",
0195 engine->name, trifilter(cycles));
0196 }
0197 if (perf_end(gt))
0198 err = -EIO;
0199
0200 return err;
0201 }
0202
0203 static struct i915_vma *create_nop_batch(struct intel_context *ce)
0204 {
0205 struct drm_i915_gem_object *obj;
0206 struct i915_vma *vma;
0207 u32 *cs;
0208 int err;
0209
0210 obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
0211 if (IS_ERR(obj))
0212 return ERR_CAST(obj);
0213
0214 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
0215 if (IS_ERR(cs)) {
0216 err = PTR_ERR(cs);
0217 goto err_put;
0218 }
0219
0220 memset(cs, 0, SZ_64K);
0221 cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
0222
0223 i915_gem_object_flush_map(obj);
0224
0225 vma = i915_vma_instance(obj, ce->vm, NULL);
0226 if (IS_ERR(vma)) {
0227 err = PTR_ERR(vma);
0228 goto err_unpin;
0229 }
0230
0231 err = i915_vma_pin(vma, 0, 0, PIN_USER);
0232 if (err)
0233 goto err_unpin;
0234
0235 i915_gem_object_unpin_map(obj);
0236 return vma;
0237
0238 err_unpin:
0239 i915_gem_object_unpin_map(obj);
0240 err_put:
0241 i915_gem_object_put(obj);
0242 return ERR_PTR(err);
0243 }
0244
0245 static int perf_mi_noop(void *arg)
0246 {
0247 struct intel_gt *gt = arg;
0248 struct intel_engine_cs *engine;
0249 enum intel_engine_id id;
0250 int err = 0;
0251
0252 if (GRAPHICS_VER(gt->i915) < 7)
0253 return 0;
0254
0255 perf_begin(gt);
0256 for_each_engine(engine, gt, id) {
0257 struct intel_context *ce = engine->kernel_context;
0258 struct i915_vma *base, *nop;
0259 u32 cycles[COUNT];
0260 int i;
0261
0262 intel_engine_pm_get(engine);
0263
0264 base = create_empty_batch(ce);
0265 if (IS_ERR(base)) {
0266 err = PTR_ERR(base);
0267 intel_engine_pm_put(engine);
0268 break;
0269 }
0270
0271 err = i915_vma_sync(base);
0272 if (err) {
0273 i915_vma_put(base);
0274 intel_engine_pm_put(engine);
0275 break;
0276 }
0277
0278 nop = create_nop_batch(ce);
0279 if (IS_ERR(nop)) {
0280 err = PTR_ERR(nop);
0281 i915_vma_put(base);
0282 intel_engine_pm_put(engine);
0283 break;
0284 }
0285
0286 err = i915_vma_sync(nop);
0287 if (err) {
0288 i915_vma_put(nop);
0289 i915_vma_put(base);
0290 intel_engine_pm_put(engine);
0291 break;
0292 }
0293
0294 for (i = 0; i < ARRAY_SIZE(cycles); i++) {
0295 struct i915_request *rq;
0296
0297 rq = i915_request_create(ce);
0298 if (IS_ERR(rq)) {
0299 err = PTR_ERR(rq);
0300 break;
0301 }
0302
0303 err = write_timestamp(rq, 2);
0304 if (err)
0305 goto out;
0306
0307 err = rq->engine->emit_bb_start(rq,
0308 base->node.start, 8,
0309 0);
0310 if (err)
0311 goto out;
0312
0313 err = write_timestamp(rq, 3);
0314 if (err)
0315 goto out;
0316
0317 err = rq->engine->emit_bb_start(rq,
0318 nop->node.start,
0319 nop->node.size,
0320 0);
0321 if (err)
0322 goto out;
0323
0324 err = write_timestamp(rq, 4);
0325 if (err)
0326 goto out;
0327
0328 out:
0329 i915_request_get(rq);
0330 i915_request_add(rq);
0331
0332 if (i915_request_wait(rq, 0, HZ / 5) < 0)
0333 err = -EIO;
0334 i915_request_put(rq);
0335 if (err)
0336 break;
0337
0338 cycles[i] =
0339 (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
0340 (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
0341 }
0342 i915_vma_put(nop);
0343 i915_vma_put(base);
0344 intel_engine_pm_put(engine);
0345 if (err)
0346 break;
0347
0348 pr_info("%s: 16K MI_NOOP cycles: %u\n",
0349 engine->name, trifilter(cycles));
0350 }
0351 if (perf_end(gt))
0352 err = -EIO;
0353
0354 return err;
0355 }
0356
0357 int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
0358 {
0359 static const struct i915_subtest tests[] = {
0360 SUBTEST(perf_mi_bb_start),
0361 SUBTEST(perf_mi_noop),
0362 };
0363
0364 if (intel_gt_is_wedged(to_gt(i915)))
0365 return 0;
0366
0367 return intel_gt_live_subtests(tests, to_gt(i915));
0368 }
0369
0370 static int intel_mmio_bases_check(void *arg)
0371 {
0372 int i, j;
0373
0374 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
0375 const struct engine_info *info = &intel_engines[i];
0376 u8 prev = U8_MAX;
0377
0378 for (j = 0; j < MAX_MMIO_BASES; j++) {
0379 u8 ver = info->mmio_bases[j].graphics_ver;
0380 u32 base = info->mmio_bases[j].base;
0381
0382 if (ver >= prev) {
0383 pr_err("%s(%s, class:%d, instance:%d): mmio base for graphics ver %u is before the one for ver %u\n",
0384 __func__,
0385 intel_engine_class_repr(info->class),
0386 info->class, info->instance,
0387 prev, ver);
0388 return -EINVAL;
0389 }
0390
0391 if (ver == 0)
0392 break;
0393
0394 if (!base) {
0395 pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for graphics ver %u at entry %u\n",
0396 __func__,
0397 intel_engine_class_repr(info->class),
0398 info->class, info->instance,
0399 base, ver, j);
0400 return -EINVAL;
0401 }
0402
0403 prev = ver;
0404 }
0405
0406 pr_debug("%s: min graphics version supported for %s%d is %u\n",
0407 __func__,
0408 intel_engine_class_repr(info->class),
0409 info->instance,
0410 prev);
0411 }
0412
0413 return 0;
0414 }
0415
0416 int intel_engine_cs_mock_selftests(void)
0417 {
0418 static const struct i915_subtest tests[] = {
0419 SUBTEST(intel_mmio_bases_check),
0420 };
0421
0422 return i915_subtests(tests, NULL);
0423 }