0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 #include <linux/highmem.h>
0068 #include <linux/log2.h>
0069 #include <linux/nospec.h>
0070
0071 #include <drm/drm_cache.h>
0072 #include <drm/drm_syncobj.h>
0073
0074 #include "gt/gen6_ppgtt.h"
0075 #include "gt/intel_context.h"
0076 #include "gt/intel_context_param.h"
0077 #include "gt/intel_engine_heartbeat.h"
0078 #include "gt/intel_engine_user.h"
0079 #include "gt/intel_gpu_commands.h"
0080 #include "gt/intel_ring.h"
0081
0082 #include "pxp/intel_pxp.h"
0083
0084 #include "i915_file_private.h"
0085 #include "i915_gem_context.h"
0086 #include "i915_trace.h"
0087 #include "i915_user_extensions.h"
0088
0089 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
0090
0091 static struct kmem_cache *slab_luts;
0092
0093 struct i915_lut_handle *i915_lut_handle_alloc(void)
0094 {
0095 return kmem_cache_alloc(slab_luts, GFP_KERNEL);
0096 }
0097
0098 void i915_lut_handle_free(struct i915_lut_handle *lut)
0099 {
0100 return kmem_cache_free(slab_luts, lut);
0101 }
0102
0103 static void lut_close(struct i915_gem_context *ctx)
0104 {
0105 struct radix_tree_iter iter;
0106 void __rcu **slot;
0107
0108 mutex_lock(&ctx->lut_mutex);
0109 rcu_read_lock();
0110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
0111 struct i915_vma *vma = rcu_dereference_raw(*slot);
0112 struct drm_i915_gem_object *obj = vma->obj;
0113 struct i915_lut_handle *lut;
0114
0115 if (!kref_get_unless_zero(&obj->base.refcount))
0116 continue;
0117
0118 spin_lock(&obj->lut_lock);
0119 list_for_each_entry(lut, &obj->lut_list, obj_link) {
0120 if (lut->ctx != ctx)
0121 continue;
0122
0123 if (lut->handle != iter.index)
0124 continue;
0125
0126 list_del(&lut->obj_link);
0127 break;
0128 }
0129 spin_unlock(&obj->lut_lock);
0130
0131 if (&lut->obj_link != &obj->lut_list) {
0132 i915_lut_handle_free(lut);
0133 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
0134 i915_vma_close(vma);
0135 i915_gem_object_put(obj);
0136 }
0137
0138 i915_gem_object_put(obj);
0139 }
0140 rcu_read_unlock();
0141 mutex_unlock(&ctx->lut_mutex);
0142 }
0143
0144 static struct intel_context *
0145 lookup_user_engine(struct i915_gem_context *ctx,
0146 unsigned long flags,
0147 const struct i915_engine_class_instance *ci)
0148 #define LOOKUP_USER_INDEX BIT(0)
0149 {
0150 int idx;
0151
0152 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
0153 return ERR_PTR(-EINVAL);
0154
0155 if (!i915_gem_context_user_engines(ctx)) {
0156 struct intel_engine_cs *engine;
0157
0158 engine = intel_engine_lookup_user(ctx->i915,
0159 ci->engine_class,
0160 ci->engine_instance);
0161 if (!engine)
0162 return ERR_PTR(-EINVAL);
0163
0164 idx = engine->legacy_idx;
0165 } else {
0166 idx = ci->engine_instance;
0167 }
0168
0169 return i915_gem_context_get_engine(ctx, idx);
0170 }
0171
0172 static int validate_priority(struct drm_i915_private *i915,
0173 const struct drm_i915_gem_context_param *args)
0174 {
0175 s64 priority = args->value;
0176
0177 if (args->size)
0178 return -EINVAL;
0179
0180 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
0181 return -ENODEV;
0182
0183 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
0184 priority < I915_CONTEXT_MIN_USER_PRIORITY)
0185 return -EINVAL;
0186
0187 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
0188 !capable(CAP_SYS_NICE))
0189 return -EPERM;
0190
0191 return 0;
0192 }
0193
0194 static void proto_context_close(struct drm_i915_private *i915,
0195 struct i915_gem_proto_context *pc)
0196 {
0197 int i;
0198
0199 if (pc->pxp_wakeref)
0200 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
0201 if (pc->vm)
0202 i915_vm_put(pc->vm);
0203 if (pc->user_engines) {
0204 for (i = 0; i < pc->num_user_engines; i++)
0205 kfree(pc->user_engines[i].siblings);
0206 kfree(pc->user_engines);
0207 }
0208 kfree(pc);
0209 }
0210
0211 static int proto_context_set_persistence(struct drm_i915_private *i915,
0212 struct i915_gem_proto_context *pc,
0213 bool persist)
0214 {
0215 if (persist) {
0216
0217
0218
0219
0220
0221 if (!i915->params.enable_hangcheck)
0222 return -EINVAL;
0223
0224 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
0225 } else {
0226
0227 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
0228 return -ENODEV;
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243 if (!intel_has_reset_engine(to_gt(i915)))
0244 return -ENODEV;
0245
0246 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
0247 }
0248
0249 return 0;
0250 }
0251
0252 static int proto_context_set_protected(struct drm_i915_private *i915,
0253 struct i915_gem_proto_context *pc,
0254 bool protected)
0255 {
0256 int ret = 0;
0257
0258 if (!protected) {
0259 pc->uses_protected_content = false;
0260 } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
0261 ret = -ENODEV;
0262 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
0263 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
0264 ret = -EPERM;
0265 } else {
0266 pc->uses_protected_content = true;
0267
0268
0269
0270
0271
0272 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
0273
0274 if (!intel_pxp_is_active(&to_gt(i915)->pxp))
0275 ret = intel_pxp_start(&to_gt(i915)->pxp);
0276 }
0277
0278 return ret;
0279 }
0280
0281 static struct i915_gem_proto_context *
0282 proto_context_create(struct drm_i915_private *i915, unsigned int flags)
0283 {
0284 struct i915_gem_proto_context *pc, *err;
0285
0286 pc = kzalloc(sizeof(*pc), GFP_KERNEL);
0287 if (!pc)
0288 return ERR_PTR(-ENOMEM);
0289
0290 pc->num_user_engines = -1;
0291 pc->user_engines = NULL;
0292 pc->user_flags = BIT(UCONTEXT_BANNABLE) |
0293 BIT(UCONTEXT_RECOVERABLE);
0294 if (i915->params.enable_hangcheck)
0295 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
0296 pc->sched.priority = I915_PRIORITY_NORMAL;
0297
0298 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
0299 if (!HAS_EXECLISTS(i915)) {
0300 err = ERR_PTR(-EINVAL);
0301 goto proto_close;
0302 }
0303 pc->single_timeline = true;
0304 }
0305
0306 return pc;
0307
0308 proto_close:
0309 proto_context_close(i915, pc);
0310 return err;
0311 }
0312
0313 static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
0314 struct i915_gem_proto_context *pc,
0315 u32 *id)
0316 {
0317 int ret;
0318 void *old;
0319
0320 lockdep_assert_held(&fpriv->proto_context_lock);
0321
0322 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
0323 if (ret)
0324 return ret;
0325
0326 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
0327 if (xa_is_err(old)) {
0328 xa_erase(&fpriv->context_xa, *id);
0329 return xa_err(old);
0330 }
0331 WARN_ON(old);
0332
0333 return 0;
0334 }
0335
0336 static int proto_context_register(struct drm_i915_file_private *fpriv,
0337 struct i915_gem_proto_context *pc,
0338 u32 *id)
0339 {
0340 int ret;
0341
0342 mutex_lock(&fpriv->proto_context_lock);
0343 ret = proto_context_register_locked(fpriv, pc, id);
0344 mutex_unlock(&fpriv->proto_context_lock);
0345
0346 return ret;
0347 }
0348
0349 static struct i915_address_space *
0350 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
0351 {
0352 struct i915_address_space *vm;
0353
0354 xa_lock(&file_priv->vm_xa);
0355 vm = xa_load(&file_priv->vm_xa, id);
0356 if (vm)
0357 kref_get(&vm->ref);
0358 xa_unlock(&file_priv->vm_xa);
0359
0360 return vm;
0361 }
0362
0363 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
0364 struct i915_gem_proto_context *pc,
0365 const struct drm_i915_gem_context_param *args)
0366 {
0367 struct drm_i915_private *i915 = fpriv->dev_priv;
0368 struct i915_address_space *vm;
0369
0370 if (args->size)
0371 return -EINVAL;
0372
0373 if (!HAS_FULL_PPGTT(i915))
0374 return -ENODEV;
0375
0376 if (upper_32_bits(args->value))
0377 return -ENOENT;
0378
0379 vm = i915_gem_vm_lookup(fpriv, args->value);
0380 if (!vm)
0381 return -ENOENT;
0382
0383 if (pc->vm)
0384 i915_vm_put(pc->vm);
0385 pc->vm = vm;
0386
0387 return 0;
0388 }
0389
0390 struct set_proto_ctx_engines {
0391 struct drm_i915_private *i915;
0392 unsigned num_engines;
0393 struct i915_gem_proto_engine *engines;
0394 };
0395
0396 static int
0397 set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
0398 void *data)
0399 {
0400 struct i915_context_engines_load_balance __user *ext =
0401 container_of_user(base, typeof(*ext), base);
0402 const struct set_proto_ctx_engines *set = data;
0403 struct drm_i915_private *i915 = set->i915;
0404 struct intel_engine_cs **siblings;
0405 u16 num_siblings, idx;
0406 unsigned int n;
0407 int err;
0408
0409 if (!HAS_EXECLISTS(i915))
0410 return -ENODEV;
0411
0412 if (get_user(idx, &ext->engine_index))
0413 return -EFAULT;
0414
0415 if (idx >= set->num_engines) {
0416 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
0417 idx, set->num_engines);
0418 return -EINVAL;
0419 }
0420
0421 idx = array_index_nospec(idx, set->num_engines);
0422 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
0423 drm_dbg(&i915->drm,
0424 "Invalid placement[%d], already occupied\n", idx);
0425 return -EEXIST;
0426 }
0427
0428 if (get_user(num_siblings, &ext->num_siblings))
0429 return -EFAULT;
0430
0431 err = check_user_mbz(&ext->flags);
0432 if (err)
0433 return err;
0434
0435 err = check_user_mbz(&ext->mbz64);
0436 if (err)
0437 return err;
0438
0439 if (num_siblings == 0)
0440 return 0;
0441
0442 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
0443 if (!siblings)
0444 return -ENOMEM;
0445
0446 for (n = 0; n < num_siblings; n++) {
0447 struct i915_engine_class_instance ci;
0448
0449 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
0450 err = -EFAULT;
0451 goto err_siblings;
0452 }
0453
0454 siblings[n] = intel_engine_lookup_user(i915,
0455 ci.engine_class,
0456 ci.engine_instance);
0457 if (!siblings[n]) {
0458 drm_dbg(&i915->drm,
0459 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
0460 n, ci.engine_class, ci.engine_instance);
0461 err = -EINVAL;
0462 goto err_siblings;
0463 }
0464 }
0465
0466 if (num_siblings == 1) {
0467 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
0468 set->engines[idx].engine = siblings[0];
0469 kfree(siblings);
0470 } else {
0471 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
0472 set->engines[idx].num_siblings = num_siblings;
0473 set->engines[idx].siblings = siblings;
0474 }
0475
0476 return 0;
0477
0478 err_siblings:
0479 kfree(siblings);
0480
0481 return err;
0482 }
0483
0484 static int
0485 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
0486 {
0487 struct i915_context_engines_bond __user *ext =
0488 container_of_user(base, typeof(*ext), base);
0489 const struct set_proto_ctx_engines *set = data;
0490 struct drm_i915_private *i915 = set->i915;
0491 struct i915_engine_class_instance ci;
0492 struct intel_engine_cs *master;
0493 u16 idx, num_bonds;
0494 int err, n;
0495
0496 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
0497 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
0498 drm_dbg(&i915->drm,
0499 "Bonding not supported on this platform\n");
0500 return -ENODEV;
0501 }
0502
0503 if (get_user(idx, &ext->virtual_index))
0504 return -EFAULT;
0505
0506 if (idx >= set->num_engines) {
0507 drm_dbg(&i915->drm,
0508 "Invalid index for virtual engine: %d >= %d\n",
0509 idx, set->num_engines);
0510 return -EINVAL;
0511 }
0512
0513 idx = array_index_nospec(idx, set->num_engines);
0514 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
0515 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
0516 return -EINVAL;
0517 }
0518
0519 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
0520 drm_dbg(&i915->drm,
0521 "Bonding with virtual engines not allowed\n");
0522 return -EINVAL;
0523 }
0524
0525 err = check_user_mbz(&ext->flags);
0526 if (err)
0527 return err;
0528
0529 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
0530 err = check_user_mbz(&ext->mbz64[n]);
0531 if (err)
0532 return err;
0533 }
0534
0535 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
0536 return -EFAULT;
0537
0538 master = intel_engine_lookup_user(i915,
0539 ci.engine_class,
0540 ci.engine_instance);
0541 if (!master) {
0542 drm_dbg(&i915->drm,
0543 "Unrecognised master engine: { class:%u, instance:%u }\n",
0544 ci.engine_class, ci.engine_instance);
0545 return -EINVAL;
0546 }
0547
0548 if (intel_engine_uses_guc(master)) {
0549 DRM_DEBUG("bonding extension not supported with GuC submission");
0550 return -ENODEV;
0551 }
0552
0553 if (get_user(num_bonds, &ext->num_bonds))
0554 return -EFAULT;
0555
0556 for (n = 0; n < num_bonds; n++) {
0557 struct intel_engine_cs *bond;
0558
0559 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
0560 return -EFAULT;
0561
0562 bond = intel_engine_lookup_user(i915,
0563 ci.engine_class,
0564 ci.engine_instance);
0565 if (!bond) {
0566 drm_dbg(&i915->drm,
0567 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
0568 n, ci.engine_class, ci.engine_instance);
0569 return -EINVAL;
0570 }
0571 }
0572
0573 return 0;
0574 }
0575
0576 static int
0577 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
0578 void *data)
0579 {
0580 struct i915_context_engines_parallel_submit __user *ext =
0581 container_of_user(base, typeof(*ext), base);
0582 const struct set_proto_ctx_engines *set = data;
0583 struct drm_i915_private *i915 = set->i915;
0584 struct i915_engine_class_instance prev_engine;
0585 u64 flags;
0586 int err = 0, n, i, j;
0587 u16 slot, width, num_siblings;
0588 struct intel_engine_cs **siblings = NULL;
0589 intel_engine_mask_t prev_mask;
0590
0591 if (get_user(slot, &ext->engine_index))
0592 return -EFAULT;
0593
0594 if (get_user(width, &ext->width))
0595 return -EFAULT;
0596
0597 if (get_user(num_siblings, &ext->num_siblings))
0598 return -EFAULT;
0599
0600 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
0601 num_siblings != 1) {
0602 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
0603 num_siblings);
0604 return -EINVAL;
0605 }
0606
0607 if (slot >= set->num_engines) {
0608 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
0609 slot, set->num_engines);
0610 return -EINVAL;
0611 }
0612
0613 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
0614 drm_dbg(&i915->drm,
0615 "Invalid placement[%d], already occupied\n", slot);
0616 return -EINVAL;
0617 }
0618
0619 if (get_user(flags, &ext->flags))
0620 return -EFAULT;
0621
0622 if (flags) {
0623 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
0624 return -EINVAL;
0625 }
0626
0627 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
0628 err = check_user_mbz(&ext->mbz64[n]);
0629 if (err)
0630 return err;
0631 }
0632
0633 if (width < 2) {
0634 drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
0635 return -EINVAL;
0636 }
0637
0638 if (num_siblings < 1) {
0639 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
0640 num_siblings);
0641 return -EINVAL;
0642 }
0643
0644 siblings = kmalloc_array(num_siblings * width,
0645 sizeof(*siblings),
0646 GFP_KERNEL);
0647 if (!siblings)
0648 return -ENOMEM;
0649
0650
0651 for (i = 0; i < width; ++i) {
0652 intel_engine_mask_t current_mask = 0;
0653
0654 for (j = 0; j < num_siblings; ++j) {
0655 struct i915_engine_class_instance ci;
0656
0657 n = i * num_siblings + j;
0658 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
0659 err = -EFAULT;
0660 goto out_err;
0661 }
0662
0663 siblings[n] =
0664 intel_engine_lookup_user(i915, ci.engine_class,
0665 ci.engine_instance);
0666 if (!siblings[n]) {
0667 drm_dbg(&i915->drm,
0668 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
0669 n, ci.engine_class, ci.engine_instance);
0670 err = -EINVAL;
0671 goto out_err;
0672 }
0673
0674
0675
0676
0677
0678 if (siblings[n]->class == RENDER_CLASS ||
0679 siblings[n]->class == COMPUTE_CLASS) {
0680 err = -EINVAL;
0681 goto out_err;
0682 }
0683
0684 if (n) {
0685 if (prev_engine.engine_class !=
0686 ci.engine_class) {
0687 drm_dbg(&i915->drm,
0688 "Mismatched class %d, %d\n",
0689 prev_engine.engine_class,
0690 ci.engine_class);
0691 err = -EINVAL;
0692 goto out_err;
0693 }
0694 }
0695
0696 prev_engine = ci;
0697 current_mask |= siblings[n]->logical_mask;
0698 }
0699
0700 if (i > 0) {
0701 if (current_mask != prev_mask << 1) {
0702 drm_dbg(&i915->drm,
0703 "Non contiguous logical mask 0x%x, 0x%x\n",
0704 prev_mask, current_mask);
0705 err = -EINVAL;
0706 goto out_err;
0707 }
0708 }
0709 prev_mask = current_mask;
0710 }
0711
0712 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
0713 set->engines[slot].num_siblings = num_siblings;
0714 set->engines[slot].width = width;
0715 set->engines[slot].siblings = siblings;
0716
0717 return 0;
0718
0719 out_err:
0720 kfree(siblings);
0721
0722 return err;
0723 }
0724
0725 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
0726 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
0727 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
0728 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
0729 set_proto_ctx_engines_parallel_submit,
0730 };
0731
0732 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
0733 struct i915_gem_proto_context *pc,
0734 const struct drm_i915_gem_context_param *args)
0735 {
0736 struct drm_i915_private *i915 = fpriv->dev_priv;
0737 struct set_proto_ctx_engines set = { .i915 = i915 };
0738 struct i915_context_param_engines __user *user =
0739 u64_to_user_ptr(args->value);
0740 unsigned int n;
0741 u64 extensions;
0742 int err;
0743
0744 if (pc->num_user_engines >= 0) {
0745 drm_dbg(&i915->drm, "Cannot set engines twice");
0746 return -EINVAL;
0747 }
0748
0749 if (args->size < sizeof(*user) ||
0750 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
0751 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
0752 args->size);
0753 return -EINVAL;
0754 }
0755
0756 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
0757
0758 if (set.num_engines > I915_EXEC_RING_MASK + 1)
0759 return -EINVAL;
0760
0761 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
0762 if (!set.engines)
0763 return -ENOMEM;
0764
0765 for (n = 0; n < set.num_engines; n++) {
0766 struct i915_engine_class_instance ci;
0767 struct intel_engine_cs *engine;
0768
0769 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
0770 kfree(set.engines);
0771 return -EFAULT;
0772 }
0773
0774 memset(&set.engines[n], 0, sizeof(set.engines[n]));
0775
0776 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
0777 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
0778 continue;
0779
0780 engine = intel_engine_lookup_user(i915,
0781 ci.engine_class,
0782 ci.engine_instance);
0783 if (!engine) {
0784 drm_dbg(&i915->drm,
0785 "Invalid engine[%d]: { class:%d, instance:%d }\n",
0786 n, ci.engine_class, ci.engine_instance);
0787 kfree(set.engines);
0788 return -ENOENT;
0789 }
0790
0791 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
0792 set.engines[n].engine = engine;
0793 }
0794
0795 err = -EFAULT;
0796 if (!get_user(extensions, &user->extensions))
0797 err = i915_user_extensions(u64_to_user_ptr(extensions),
0798 set_proto_ctx_engines_extensions,
0799 ARRAY_SIZE(set_proto_ctx_engines_extensions),
0800 &set);
0801 if (err) {
0802 kfree(set.engines);
0803 return err;
0804 }
0805
0806 pc->num_user_engines = set.num_engines;
0807 pc->user_engines = set.engines;
0808
0809 return 0;
0810 }
0811
0812 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
0813 struct i915_gem_proto_context *pc,
0814 struct drm_i915_gem_context_param *args)
0815 {
0816 struct drm_i915_private *i915 = fpriv->dev_priv;
0817 struct drm_i915_gem_context_param_sseu user_sseu;
0818 struct intel_sseu *sseu;
0819 int ret;
0820
0821 if (args->size < sizeof(user_sseu))
0822 return -EINVAL;
0823
0824 if (GRAPHICS_VER(i915) != 11)
0825 return -ENODEV;
0826
0827 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
0828 sizeof(user_sseu)))
0829 return -EFAULT;
0830
0831 if (user_sseu.rsvd)
0832 return -EINVAL;
0833
0834 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
0835 return -EINVAL;
0836
0837 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
0838 return -EINVAL;
0839
0840 if (pc->num_user_engines >= 0) {
0841 int idx = user_sseu.engine.engine_instance;
0842 struct i915_gem_proto_engine *pe;
0843
0844 if (idx >= pc->num_user_engines)
0845 return -EINVAL;
0846
0847 pe = &pc->user_engines[idx];
0848
0849
0850 if (pe->engine->class != RENDER_CLASS)
0851 return -EINVAL;
0852
0853 sseu = &pe->sseu;
0854 } else {
0855
0856 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
0857 return -EINVAL;
0858
0859
0860 if (user_sseu.engine.engine_instance != 0)
0861 return -EINVAL;
0862
0863 sseu = &pc->legacy_rcs_sseu;
0864 }
0865
0866 ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
0867 if (ret)
0868 return ret;
0869
0870 args->size = sizeof(user_sseu);
0871
0872 return 0;
0873 }
0874
0875 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
0876 struct i915_gem_proto_context *pc,
0877 struct drm_i915_gem_context_param *args)
0878 {
0879 int ret = 0;
0880
0881 switch (args->param) {
0882 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
0883 if (args->size)
0884 ret = -EINVAL;
0885 else if (args->value)
0886 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
0887 else
0888 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
0889 break;
0890
0891 case I915_CONTEXT_PARAM_BANNABLE:
0892 if (args->size)
0893 ret = -EINVAL;
0894 else if (!capable(CAP_SYS_ADMIN) && !args->value)
0895 ret = -EPERM;
0896 else if (args->value)
0897 pc->user_flags |= BIT(UCONTEXT_BANNABLE);
0898 else if (pc->uses_protected_content)
0899 ret = -EPERM;
0900 else
0901 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
0902 break;
0903
0904 case I915_CONTEXT_PARAM_RECOVERABLE:
0905 if (args->size)
0906 ret = -EINVAL;
0907 else if (!args->value)
0908 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
0909 else if (pc->uses_protected_content)
0910 ret = -EPERM;
0911 else
0912 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
0913 break;
0914
0915 case I915_CONTEXT_PARAM_PRIORITY:
0916 ret = validate_priority(fpriv->dev_priv, args);
0917 if (!ret)
0918 pc->sched.priority = args->value;
0919 break;
0920
0921 case I915_CONTEXT_PARAM_SSEU:
0922 ret = set_proto_ctx_sseu(fpriv, pc, args);
0923 break;
0924
0925 case I915_CONTEXT_PARAM_VM:
0926 ret = set_proto_ctx_vm(fpriv, pc, args);
0927 break;
0928
0929 case I915_CONTEXT_PARAM_ENGINES:
0930 ret = set_proto_ctx_engines(fpriv, pc, args);
0931 break;
0932
0933 case I915_CONTEXT_PARAM_PERSISTENCE:
0934 if (args->size)
0935 ret = -EINVAL;
0936 else
0937 ret = proto_context_set_persistence(fpriv->dev_priv, pc,
0938 args->value);
0939 break;
0940
0941 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
0942 ret = proto_context_set_protected(fpriv->dev_priv, pc,
0943 args->value);
0944 break;
0945
0946 case I915_CONTEXT_PARAM_NO_ZEROMAP:
0947 case I915_CONTEXT_PARAM_BAN_PERIOD:
0948 case I915_CONTEXT_PARAM_RINGSIZE:
0949 default:
0950 ret = -EINVAL;
0951 break;
0952 }
0953
0954 return ret;
0955 }
0956
0957 static int intel_context_set_gem(struct intel_context *ce,
0958 struct i915_gem_context *ctx,
0959 struct intel_sseu sseu)
0960 {
0961 int ret = 0;
0962
0963 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
0964 RCU_INIT_POINTER(ce->gem_context, ctx);
0965
0966 GEM_BUG_ON(intel_context_is_pinned(ce));
0967 ce->ring_size = SZ_16K;
0968
0969 i915_vm_put(ce->vm);
0970 ce->vm = i915_gem_context_get_eb_vm(ctx);
0971
0972 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
0973 intel_engine_has_timeslices(ce->engine) &&
0974 intel_engine_has_semaphores(ce->engine))
0975 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
0976
0977 if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
0978 ctx->i915->params.request_timeout_ms) {
0979 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
0980
0981 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
0982 }
0983
0984
0985 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
0986 ret = intel_context_reconfigure_sseu(ce, sseu);
0987
0988 return ret;
0989 }
0990
0991 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
0992 {
0993 while (count--) {
0994 struct intel_context *ce = e->engines[count], *child;
0995
0996 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
0997 continue;
0998
0999 for_each_child(ce, child)
1000 intel_context_unpin(child);
1001 intel_context_unpin(ce);
1002 }
1003 }
1004
1005 static void unpin_engines(struct i915_gem_engines *e)
1006 {
1007 __unpin_engines(e, e->num_engines);
1008 }
1009
1010 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1011 {
1012 while (count--) {
1013 if (!e->engines[count])
1014 continue;
1015
1016 intel_context_put(e->engines[count]);
1017 }
1018 kfree(e);
1019 }
1020
1021 static void free_engines(struct i915_gem_engines *e)
1022 {
1023 __free_engines(e, e->num_engines);
1024 }
1025
1026 static void free_engines_rcu(struct rcu_head *rcu)
1027 {
1028 struct i915_gem_engines *engines =
1029 container_of(rcu, struct i915_gem_engines, rcu);
1030
1031 i915_sw_fence_fini(&engines->fence);
1032 free_engines(engines);
1033 }
1034
1035 static void accumulate_runtime(struct i915_drm_client *client,
1036 struct i915_gem_engines *engines)
1037 {
1038 struct i915_gem_engines_iter it;
1039 struct intel_context *ce;
1040
1041 if (!client)
1042 return;
1043
1044
1045 for_each_gem_engine(ce, engines, it) {
1046 unsigned int class = ce->engine->uabi_class;
1047
1048 GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
1049 atomic64_add(intel_context_get_total_runtime_ns(ce),
1050 &client->past_runtime[class]);
1051 }
1052 }
1053
1054 static int
1055 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1056 {
1057 struct i915_gem_engines *engines =
1058 container_of(fence, typeof(*engines), fence);
1059 struct i915_gem_context *ctx = engines->ctx;
1060
1061 switch (state) {
1062 case FENCE_COMPLETE:
1063 if (!list_empty(&engines->link)) {
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&ctx->stale.lock, flags);
1067 list_del(&engines->link);
1068 spin_unlock_irqrestore(&ctx->stale.lock, flags);
1069 }
1070 accumulate_runtime(ctx->client, engines);
1071 i915_gem_context_put(ctx);
1072
1073 break;
1074
1075 case FENCE_FREE:
1076 init_rcu_head(&engines->rcu);
1077 call_rcu(&engines->rcu, free_engines_rcu);
1078 break;
1079 }
1080
1081 return NOTIFY_DONE;
1082 }
1083
1084 static struct i915_gem_engines *alloc_engines(unsigned int count)
1085 {
1086 struct i915_gem_engines *e;
1087
1088 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1089 if (!e)
1090 return NULL;
1091
1092 i915_sw_fence_init(&e->fence, engines_notify);
1093 return e;
1094 }
1095
1096 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1097 struct intel_sseu rcs_sseu)
1098 {
1099 const struct intel_gt *gt = to_gt(ctx->i915);
1100 struct intel_engine_cs *engine;
1101 struct i915_gem_engines *e, *err;
1102 enum intel_engine_id id;
1103
1104 e = alloc_engines(I915_NUM_ENGINES);
1105 if (!e)
1106 return ERR_PTR(-ENOMEM);
1107
1108 for_each_engine(engine, gt, id) {
1109 struct intel_context *ce;
1110 struct intel_sseu sseu = {};
1111 int ret;
1112
1113 if (engine->legacy_idx == INVALID_ENGINE)
1114 continue;
1115
1116 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
1117 GEM_BUG_ON(e->engines[engine->legacy_idx]);
1118
1119 ce = intel_context_create(engine);
1120 if (IS_ERR(ce)) {
1121 err = ERR_CAST(ce);
1122 goto free_engines;
1123 }
1124
1125 e->engines[engine->legacy_idx] = ce;
1126 e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1127
1128 if (engine->class == RENDER_CLASS)
1129 sseu = rcs_sseu;
1130
1131 ret = intel_context_set_gem(ce, ctx, sseu);
1132 if (ret) {
1133 err = ERR_PTR(ret);
1134 goto free_engines;
1135 }
1136
1137 }
1138
1139 return e;
1140
1141 free_engines:
1142 free_engines(e);
1143 return err;
1144 }
1145
1146 static int perma_pin_contexts(struct intel_context *ce)
1147 {
1148 struct intel_context *child;
1149 int i = 0, j = 0, ret;
1150
1151 GEM_BUG_ON(!intel_context_is_parent(ce));
1152
1153 ret = intel_context_pin(ce);
1154 if (unlikely(ret))
1155 return ret;
1156
1157 for_each_child(ce, child) {
1158 ret = intel_context_pin(child);
1159 if (unlikely(ret))
1160 goto unwind;
1161 ++i;
1162 }
1163
1164 set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1165
1166 return 0;
1167
1168 unwind:
1169 intel_context_unpin(ce);
1170 for_each_child(ce, child) {
1171 if (j++ < i)
1172 intel_context_unpin(child);
1173 else
1174 break;
1175 }
1176
1177 return ret;
1178 }
1179
1180 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1181 unsigned int num_engines,
1182 struct i915_gem_proto_engine *pe)
1183 {
1184 struct i915_gem_engines *e, *err;
1185 unsigned int n;
1186
1187 e = alloc_engines(num_engines);
1188 if (!e)
1189 return ERR_PTR(-ENOMEM);
1190 e->num_engines = num_engines;
1191
1192 for (n = 0; n < num_engines; n++) {
1193 struct intel_context *ce, *child;
1194 int ret;
1195
1196 switch (pe[n].type) {
1197 case I915_GEM_ENGINE_TYPE_PHYSICAL:
1198 ce = intel_context_create(pe[n].engine);
1199 break;
1200
1201 case I915_GEM_ENGINE_TYPE_BALANCED:
1202 ce = intel_engine_create_virtual(pe[n].siblings,
1203 pe[n].num_siblings, 0);
1204 break;
1205
1206 case I915_GEM_ENGINE_TYPE_PARALLEL:
1207 ce = intel_engine_create_parallel(pe[n].siblings,
1208 pe[n].num_siblings,
1209 pe[n].width);
1210 break;
1211
1212 case I915_GEM_ENGINE_TYPE_INVALID:
1213 default:
1214 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1215 continue;
1216 }
1217
1218 if (IS_ERR(ce)) {
1219 err = ERR_CAST(ce);
1220 goto free_engines;
1221 }
1222
1223 e->engines[n] = ce;
1224
1225 ret = intel_context_set_gem(ce, ctx, pe->sseu);
1226 if (ret) {
1227 err = ERR_PTR(ret);
1228 goto free_engines;
1229 }
1230 for_each_child(ce, child) {
1231 ret = intel_context_set_gem(child, ctx, pe->sseu);
1232 if (ret) {
1233 err = ERR_PTR(ret);
1234 goto free_engines;
1235 }
1236 }
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1248 ret = perma_pin_contexts(ce);
1249 if (ret) {
1250 err = ERR_PTR(ret);
1251 goto free_engines;
1252 }
1253 }
1254 }
1255
1256 return e;
1257
1258 free_engines:
1259 free_engines(e);
1260 return err;
1261 }
1262
1263 static void i915_gem_context_release_work(struct work_struct *work)
1264 {
1265 struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1266 release_work);
1267 struct i915_address_space *vm;
1268
1269 trace_i915_context_free(ctx);
1270 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1271
1272 spin_lock(&ctx->i915->gem.contexts.lock);
1273 list_del(&ctx->link);
1274 spin_unlock(&ctx->i915->gem.contexts.lock);
1275
1276 if (ctx->syncobj)
1277 drm_syncobj_put(ctx->syncobj);
1278
1279 vm = ctx->vm;
1280 if (vm)
1281 i915_vm_put(vm);
1282
1283 if (ctx->pxp_wakeref)
1284 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1285
1286 if (ctx->client)
1287 i915_drm_client_put(ctx->client);
1288
1289 mutex_destroy(&ctx->engines_mutex);
1290 mutex_destroy(&ctx->lut_mutex);
1291
1292 put_pid(ctx->pid);
1293 mutex_destroy(&ctx->mutex);
1294
1295 kfree_rcu(ctx, rcu);
1296 }
1297
1298 void i915_gem_context_release(struct kref *ref)
1299 {
1300 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1301
1302 queue_work(ctx->i915->wq, &ctx->release_work);
1303 }
1304
1305 static inline struct i915_gem_engines *
1306 __context_engines_static(const struct i915_gem_context *ctx)
1307 {
1308 return rcu_dereference_protected(ctx->engines, true);
1309 }
1310
1311 static void __reset_context(struct i915_gem_context *ctx,
1312 struct intel_engine_cs *engine)
1313 {
1314 intel_gt_handle_error(engine->gt, engine->mask, 0,
1315 "context closure in %s", ctx->name);
1316 }
1317
1318 static bool __cancel_engine(struct intel_engine_cs *engine)
1319 {
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334 return intel_engine_pulse(engine) == 0;
1335 }
1336
1337 static struct intel_engine_cs *active_engine(struct intel_context *ce)
1338 {
1339 struct intel_engine_cs *engine = NULL;
1340 struct i915_request *rq;
1341
1342 if (intel_context_has_inflight(ce))
1343 return intel_context_inflight(ce);
1344
1345 if (!ce->timeline)
1346 return NULL;
1347
1348
1349
1350
1351
1352
1353 rcu_read_lock();
1354 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1355 bool found;
1356
1357
1358 if (!i915_request_get_rcu(rq))
1359 break;
1360
1361
1362 found = true;
1363 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1364 found = i915_request_active_engine(rq, &engine);
1365
1366 i915_request_put(rq);
1367 if (found)
1368 break;
1369 }
1370 rcu_read_unlock();
1371
1372 return engine;
1373 }
1374
1375 static void
1376 kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
1377 {
1378 struct i915_gem_engines_iter it;
1379 struct intel_context *ce;
1380
1381
1382
1383
1384
1385
1386
1387
1388 for_each_gem_engine(ce, engines, it) {
1389 struct intel_engine_cs *engine;
1390 bool skip = false;
1391
1392 if (exit)
1393 skip = intel_context_set_exiting(ce);
1394 else if (!persistent)
1395 skip = intel_context_exit_nonpersistent(ce, NULL);
1396
1397 if (skip)
1398 continue;
1399
1400
1401
1402
1403
1404
1405
1406
1407 engine = active_engine(ce);
1408
1409
1410 if (engine && !__cancel_engine(engine) && (exit || !persistent))
1411
1412
1413
1414
1415
1416 __reset_context(engines->ctx, engine);
1417 }
1418 }
1419
1420 static void kill_context(struct i915_gem_context *ctx)
1421 {
1422 struct i915_gem_engines *pos, *next;
1423
1424 spin_lock_irq(&ctx->stale.lock);
1425 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1426 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1427 if (!i915_sw_fence_await(&pos->fence)) {
1428 list_del_init(&pos->link);
1429 continue;
1430 }
1431
1432 spin_unlock_irq(&ctx->stale.lock);
1433
1434 kill_engines(pos, !ctx->i915->params.enable_hangcheck,
1435 i915_gem_context_is_persistent(ctx));
1436
1437 spin_lock_irq(&ctx->stale.lock);
1438 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1439 list_safe_reset_next(pos, next, link);
1440 list_del_init(&pos->link);
1441
1442 i915_sw_fence_complete(&pos->fence);
1443 }
1444 spin_unlock_irq(&ctx->stale.lock);
1445 }
1446
1447 static void engines_idle_release(struct i915_gem_context *ctx,
1448 struct i915_gem_engines *engines)
1449 {
1450 struct i915_gem_engines_iter it;
1451 struct intel_context *ce;
1452
1453 INIT_LIST_HEAD(&engines->link);
1454
1455 engines->ctx = i915_gem_context_get(ctx);
1456
1457 for_each_gem_engine(ce, engines, it) {
1458 int err;
1459
1460
1461 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1462 if (!intel_context_pin_if_active(ce))
1463 continue;
1464
1465
1466 err = i915_sw_fence_await_active(&engines->fence,
1467 &ce->active,
1468 I915_ACTIVE_AWAIT_BARRIER);
1469 intel_context_unpin(ce);
1470 if (err)
1471 goto kill;
1472 }
1473
1474 spin_lock_irq(&ctx->stale.lock);
1475 if (!i915_gem_context_is_closed(ctx))
1476 list_add_tail(&engines->link, &ctx->stale.engines);
1477 spin_unlock_irq(&ctx->stale.lock);
1478
1479 kill:
1480 if (list_empty(&engines->link))
1481 kill_engines(engines, true,
1482 i915_gem_context_is_persistent(ctx));
1483
1484 i915_sw_fence_commit(&engines->fence);
1485 }
1486
1487 static void set_closed_name(struct i915_gem_context *ctx)
1488 {
1489 char *s;
1490
1491
1492
1493 s = strrchr(ctx->name, '[');
1494 if (!s)
1495 return;
1496
1497 *s = '<';
1498
1499 s = strchr(s + 1, ']');
1500 if (s)
1501 *s = '>';
1502 }
1503
1504 static void context_close(struct i915_gem_context *ctx)
1505 {
1506 struct i915_drm_client *client;
1507
1508
1509 mutex_lock(&ctx->engines_mutex);
1510 unpin_engines(__context_engines_static(ctx));
1511 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1512 i915_gem_context_set_closed(ctx);
1513 mutex_unlock(&ctx->engines_mutex);
1514
1515 mutex_lock(&ctx->mutex);
1516
1517 set_closed_name(ctx);
1518
1519
1520
1521
1522
1523
1524 lut_close(ctx);
1525
1526 ctx->file_priv = ERR_PTR(-EBADF);
1527
1528 client = ctx->client;
1529 if (client) {
1530 spin_lock(&client->ctx_lock);
1531 list_del_rcu(&ctx->client_link);
1532 spin_unlock(&client->ctx_lock);
1533 }
1534
1535 mutex_unlock(&ctx->mutex);
1536
1537
1538
1539
1540
1541
1542
1543
1544 kill_context(ctx);
1545
1546 i915_gem_context_put(ctx);
1547 }
1548
1549 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1550 {
1551 if (i915_gem_context_is_persistent(ctx) == state)
1552 return 0;
1553
1554 if (state) {
1555
1556
1557
1558
1559
1560 if (!ctx->i915->params.enable_hangcheck)
1561 return -EINVAL;
1562
1563 i915_gem_context_set_persistence(ctx);
1564 } else {
1565
1566 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1567 return -ENODEV;
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582 if (!intel_has_reset_engine(to_gt(ctx->i915)))
1583 return -ENODEV;
1584
1585 i915_gem_context_clear_persistence(ctx);
1586 }
1587
1588 return 0;
1589 }
1590
1591 static struct i915_gem_context *
1592 i915_gem_create_context(struct drm_i915_private *i915,
1593 const struct i915_gem_proto_context *pc)
1594 {
1595 struct i915_gem_context *ctx;
1596 struct i915_address_space *vm = NULL;
1597 struct i915_gem_engines *e;
1598 int err;
1599 int i;
1600
1601 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1602 if (!ctx)
1603 return ERR_PTR(-ENOMEM);
1604
1605 kref_init(&ctx->ref);
1606 ctx->i915 = i915;
1607 ctx->sched = pc->sched;
1608 mutex_init(&ctx->mutex);
1609 INIT_LIST_HEAD(&ctx->link);
1610 INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1611
1612 spin_lock_init(&ctx->stale.lock);
1613 INIT_LIST_HEAD(&ctx->stale.engines);
1614
1615 if (pc->vm) {
1616 vm = i915_vm_get(pc->vm);
1617 } else if (HAS_FULL_PPGTT(i915)) {
1618 struct i915_ppgtt *ppgtt;
1619
1620 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1621 if (IS_ERR(ppgtt)) {
1622 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1623 PTR_ERR(ppgtt));
1624 err = PTR_ERR(ppgtt);
1625 goto err_ctx;
1626 }
1627 vm = &ppgtt->vm;
1628 }
1629 if (vm)
1630 ctx->vm = vm;
1631
1632 mutex_init(&ctx->engines_mutex);
1633 if (pc->num_user_engines >= 0) {
1634 i915_gem_context_set_user_engines(ctx);
1635 e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1636 } else {
1637 i915_gem_context_clear_user_engines(ctx);
1638 e = default_engines(ctx, pc->legacy_rcs_sseu);
1639 }
1640 if (IS_ERR(e)) {
1641 err = PTR_ERR(e);
1642 goto err_vm;
1643 }
1644 RCU_INIT_POINTER(ctx->engines, e);
1645
1646 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1647 mutex_init(&ctx->lut_mutex);
1648
1649
1650
1651
1652 ctx->remap_slice = ALL_L3_SLICES(i915);
1653
1654 ctx->user_flags = pc->user_flags;
1655
1656 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1657 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1658
1659 if (pc->single_timeline) {
1660 err = drm_syncobj_create(&ctx->syncobj,
1661 DRM_SYNCOBJ_CREATE_SIGNALED,
1662 NULL);
1663 if (err)
1664 goto err_engines;
1665 }
1666
1667 if (pc->uses_protected_content) {
1668 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1669 ctx->uses_protected_content = true;
1670 }
1671
1672 trace_i915_context_create(ctx);
1673
1674 return ctx;
1675
1676 err_engines:
1677 free_engines(e);
1678 err_vm:
1679 if (ctx->vm)
1680 i915_vm_put(ctx->vm);
1681 err_ctx:
1682 kfree(ctx);
1683 return ERR_PTR(err);
1684 }
1685
1686 static void init_contexts(struct i915_gem_contexts *gc)
1687 {
1688 spin_lock_init(&gc->lock);
1689 INIT_LIST_HEAD(&gc->list);
1690 }
1691
1692 void i915_gem_init__contexts(struct drm_i915_private *i915)
1693 {
1694 init_contexts(&i915->gem.contexts);
1695 }
1696
1697 static void gem_context_register(struct i915_gem_context *ctx,
1698 struct drm_i915_file_private *fpriv,
1699 u32 id)
1700 {
1701 struct drm_i915_private *i915 = ctx->i915;
1702 void *old;
1703
1704 ctx->file_priv = fpriv;
1705
1706 ctx->pid = get_task_pid(current, PIDTYPE_PID);
1707 ctx->client = i915_drm_client_get(fpriv->client);
1708
1709 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1710 current->comm, pid_nr(ctx->pid));
1711
1712
1713 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1714 WARN_ON(old);
1715
1716 spin_lock(&ctx->client->ctx_lock);
1717 list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
1718 spin_unlock(&ctx->client->ctx_lock);
1719
1720 spin_lock(&i915->gem.contexts.lock);
1721 list_add_tail(&ctx->link, &i915->gem.contexts.list);
1722 spin_unlock(&i915->gem.contexts.lock);
1723 }
1724
1725 int i915_gem_context_open(struct drm_i915_private *i915,
1726 struct drm_file *file)
1727 {
1728 struct drm_i915_file_private *file_priv = file->driver_priv;
1729 struct i915_gem_proto_context *pc;
1730 struct i915_gem_context *ctx;
1731 int err;
1732
1733 mutex_init(&file_priv->proto_context_lock);
1734 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1735
1736
1737 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1738
1739
1740 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1741
1742 pc = proto_context_create(i915, 0);
1743 if (IS_ERR(pc)) {
1744 err = PTR_ERR(pc);
1745 goto err;
1746 }
1747
1748 ctx = i915_gem_create_context(i915, pc);
1749 proto_context_close(i915, pc);
1750 if (IS_ERR(ctx)) {
1751 err = PTR_ERR(ctx);
1752 goto err;
1753 }
1754
1755 gem_context_register(ctx, file_priv, 0);
1756
1757 return 0;
1758
1759 err:
1760 xa_destroy(&file_priv->vm_xa);
1761 xa_destroy(&file_priv->context_xa);
1762 xa_destroy(&file_priv->proto_context_xa);
1763 mutex_destroy(&file_priv->proto_context_lock);
1764 return err;
1765 }
1766
1767 void i915_gem_context_close(struct drm_file *file)
1768 {
1769 struct drm_i915_file_private *file_priv = file->driver_priv;
1770 struct i915_gem_proto_context *pc;
1771 struct i915_address_space *vm;
1772 struct i915_gem_context *ctx;
1773 unsigned long idx;
1774
1775 xa_for_each(&file_priv->proto_context_xa, idx, pc)
1776 proto_context_close(file_priv->dev_priv, pc);
1777 xa_destroy(&file_priv->proto_context_xa);
1778 mutex_destroy(&file_priv->proto_context_lock);
1779
1780 xa_for_each(&file_priv->context_xa, idx, ctx)
1781 context_close(ctx);
1782 xa_destroy(&file_priv->context_xa);
1783
1784 xa_for_each(&file_priv->vm_xa, idx, vm)
1785 i915_vm_put(vm);
1786 xa_destroy(&file_priv->vm_xa);
1787 }
1788
1789 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1790 struct drm_file *file)
1791 {
1792 struct drm_i915_private *i915 = to_i915(dev);
1793 struct drm_i915_gem_vm_control *args = data;
1794 struct drm_i915_file_private *file_priv = file->driver_priv;
1795 struct i915_ppgtt *ppgtt;
1796 u32 id;
1797 int err;
1798
1799 if (!HAS_FULL_PPGTT(i915))
1800 return -ENODEV;
1801
1802 if (args->flags)
1803 return -EINVAL;
1804
1805 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1806 if (IS_ERR(ppgtt))
1807 return PTR_ERR(ppgtt);
1808
1809 if (args->extensions) {
1810 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1811 NULL, 0,
1812 ppgtt);
1813 if (err)
1814 goto err_put;
1815 }
1816
1817 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1818 xa_limit_32b, GFP_KERNEL);
1819 if (err)
1820 goto err_put;
1821
1822 GEM_BUG_ON(id == 0);
1823 args->vm_id = id;
1824 return 0;
1825
1826 err_put:
1827 i915_vm_put(&ppgtt->vm);
1828 return err;
1829 }
1830
1831 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1832 struct drm_file *file)
1833 {
1834 struct drm_i915_file_private *file_priv = file->driver_priv;
1835 struct drm_i915_gem_vm_control *args = data;
1836 struct i915_address_space *vm;
1837
1838 if (args->flags)
1839 return -EINVAL;
1840
1841 if (args->extensions)
1842 return -EINVAL;
1843
1844 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1845 if (!vm)
1846 return -ENOENT;
1847
1848 i915_vm_put(vm);
1849 return 0;
1850 }
1851
1852 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1853 struct i915_gem_context *ctx,
1854 struct drm_i915_gem_context_param *args)
1855 {
1856 struct i915_address_space *vm;
1857 int err;
1858 u32 id;
1859
1860 if (!i915_gem_context_has_full_ppgtt(ctx))
1861 return -ENODEV;
1862
1863 vm = ctx->vm;
1864 GEM_BUG_ON(!vm);
1865
1866 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1867 if (err)
1868 return err;
1869
1870 i915_vm_get(vm);
1871
1872 GEM_BUG_ON(id == 0);
1873 args->value = id;
1874 args->size = 0;
1875
1876 return err;
1877 }
1878
1879 int
1880 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1881 const struct drm_i915_gem_context_param_sseu *user,
1882 struct intel_sseu *context)
1883 {
1884 const struct sseu_dev_info *device = >->info.sseu;
1885 struct drm_i915_private *i915 = gt->i915;
1886 unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
1887
1888
1889 if (!user->slice_mask || !user->subslice_mask ||
1890 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1891 return -EINVAL;
1892
1893
1894 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1895 return -EINVAL;
1896
1897
1898
1899
1900
1901 if (overflows_type(user->slice_mask, context->slice_mask) ||
1902 overflows_type(user->subslice_mask, context->subslice_mask) ||
1903 overflows_type(user->min_eus_per_subslice,
1904 context->min_eus_per_subslice) ||
1905 overflows_type(user->max_eus_per_subslice,
1906 context->max_eus_per_subslice))
1907 return -EINVAL;
1908
1909
1910 if (user->slice_mask & ~device->slice_mask)
1911 return -EINVAL;
1912
1913 if (user->subslice_mask & ~dev_subslice_mask)
1914 return -EINVAL;
1915
1916 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1917 return -EINVAL;
1918
1919 context->slice_mask = user->slice_mask;
1920 context->subslice_mask = user->subslice_mask;
1921 context->min_eus_per_subslice = user->min_eus_per_subslice;
1922 context->max_eus_per_subslice = user->max_eus_per_subslice;
1923
1924
1925 if (GRAPHICS_VER(i915) == 11) {
1926 unsigned int hw_s = hweight8(device->slice_mask);
1927 unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
1928 unsigned int req_s = hweight8(context->slice_mask);
1929 unsigned int req_ss = hweight8(context->subslice_mask);
1930
1931
1932
1933
1934
1935 if (req_s > 1 && req_ss != hw_ss_per_s)
1936 return -EINVAL;
1937
1938
1939
1940
1941
1942 if (req_ss > 4 && (req_ss & 1))
1943 return -EINVAL;
1944
1945
1946
1947
1948
1949
1950 if (req_s == 1 && req_ss < hw_ss_per_s &&
1951 req_ss > (hw_ss_per_s / 2))
1952 return -EINVAL;
1953
1954
1955
1956
1957 if (req_s != 1 && req_s != hw_s)
1958 return -EINVAL;
1959
1960
1961
1962
1963
1964 if (req_s == 1 &&
1965 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1966 return -EINVAL;
1967
1968
1969 if ((user->min_eus_per_subslice !=
1970 device->max_eus_per_subslice) ||
1971 (user->max_eus_per_subslice !=
1972 device->max_eus_per_subslice))
1973 return -EINVAL;
1974 }
1975
1976 return 0;
1977 }
1978
1979 static int set_sseu(struct i915_gem_context *ctx,
1980 struct drm_i915_gem_context_param *args)
1981 {
1982 struct drm_i915_private *i915 = ctx->i915;
1983 struct drm_i915_gem_context_param_sseu user_sseu;
1984 struct intel_context *ce;
1985 struct intel_sseu sseu;
1986 unsigned long lookup;
1987 int ret;
1988
1989 if (args->size < sizeof(user_sseu))
1990 return -EINVAL;
1991
1992 if (GRAPHICS_VER(i915) != 11)
1993 return -ENODEV;
1994
1995 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1996 sizeof(user_sseu)))
1997 return -EFAULT;
1998
1999 if (user_sseu.rsvd)
2000 return -EINVAL;
2001
2002 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2003 return -EINVAL;
2004
2005 lookup = 0;
2006 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2007 lookup |= LOOKUP_USER_INDEX;
2008
2009 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2010 if (IS_ERR(ce))
2011 return PTR_ERR(ce);
2012
2013
2014 if (ce->engine->class != RENDER_CLASS) {
2015 ret = -ENODEV;
2016 goto out_ce;
2017 }
2018
2019 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
2020 if (ret)
2021 goto out_ce;
2022
2023 ret = intel_context_reconfigure_sseu(ce, sseu);
2024 if (ret)
2025 goto out_ce;
2026
2027 args->size = sizeof(user_sseu);
2028
2029 out_ce:
2030 intel_context_put(ce);
2031 return ret;
2032 }
2033
2034 static int
2035 set_persistence(struct i915_gem_context *ctx,
2036 const struct drm_i915_gem_context_param *args)
2037 {
2038 if (args->size)
2039 return -EINVAL;
2040
2041 return __context_set_persistence(ctx, args->value);
2042 }
2043
2044 static int set_priority(struct i915_gem_context *ctx,
2045 const struct drm_i915_gem_context_param *args)
2046 {
2047 struct i915_gem_engines_iter it;
2048 struct intel_context *ce;
2049 int err;
2050
2051 err = validate_priority(ctx->i915, args);
2052 if (err)
2053 return err;
2054
2055 ctx->sched.priority = args->value;
2056
2057 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2058 if (!intel_engine_has_timeslices(ce->engine))
2059 continue;
2060
2061 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2062 intel_engine_has_semaphores(ce->engine))
2063 intel_context_set_use_semaphores(ce);
2064 else
2065 intel_context_clear_use_semaphores(ce);
2066 }
2067 i915_gem_context_unlock_engines(ctx);
2068
2069 return 0;
2070 }
2071
2072 static int get_protected(struct i915_gem_context *ctx,
2073 struct drm_i915_gem_context_param *args)
2074 {
2075 args->size = 0;
2076 args->value = i915_gem_context_uses_protected_content(ctx);
2077
2078 return 0;
2079 }
2080
2081 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2082 struct i915_gem_context *ctx,
2083 struct drm_i915_gem_context_param *args)
2084 {
2085 int ret = 0;
2086
2087 switch (args->param) {
2088 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2089 if (args->size)
2090 ret = -EINVAL;
2091 else if (args->value)
2092 i915_gem_context_set_no_error_capture(ctx);
2093 else
2094 i915_gem_context_clear_no_error_capture(ctx);
2095 break;
2096
2097 case I915_CONTEXT_PARAM_BANNABLE:
2098 if (args->size)
2099 ret = -EINVAL;
2100 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2101 ret = -EPERM;
2102 else if (args->value)
2103 i915_gem_context_set_bannable(ctx);
2104 else if (i915_gem_context_uses_protected_content(ctx))
2105 ret = -EPERM;
2106 else
2107 i915_gem_context_clear_bannable(ctx);
2108 break;
2109
2110 case I915_CONTEXT_PARAM_RECOVERABLE:
2111 if (args->size)
2112 ret = -EINVAL;
2113 else if (!args->value)
2114 i915_gem_context_clear_recoverable(ctx);
2115 else if (i915_gem_context_uses_protected_content(ctx))
2116 ret = -EPERM;
2117 else
2118 i915_gem_context_set_recoverable(ctx);
2119 break;
2120
2121 case I915_CONTEXT_PARAM_PRIORITY:
2122 ret = set_priority(ctx, args);
2123 break;
2124
2125 case I915_CONTEXT_PARAM_SSEU:
2126 ret = set_sseu(ctx, args);
2127 break;
2128
2129 case I915_CONTEXT_PARAM_PERSISTENCE:
2130 ret = set_persistence(ctx, args);
2131 break;
2132
2133 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2134 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2135 case I915_CONTEXT_PARAM_BAN_PERIOD:
2136 case I915_CONTEXT_PARAM_RINGSIZE:
2137 case I915_CONTEXT_PARAM_VM:
2138 case I915_CONTEXT_PARAM_ENGINES:
2139 default:
2140 ret = -EINVAL;
2141 break;
2142 }
2143
2144 return ret;
2145 }
2146
2147 struct create_ext {
2148 struct i915_gem_proto_context *pc;
2149 struct drm_i915_file_private *fpriv;
2150 };
2151
2152 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2153 {
2154 struct drm_i915_gem_context_create_ext_setparam local;
2155 const struct create_ext *arg = data;
2156
2157 if (copy_from_user(&local, ext, sizeof(local)))
2158 return -EFAULT;
2159
2160 if (local.param.ctx_id)
2161 return -EINVAL;
2162
2163 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2164 }
2165
2166 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2167 {
2168 return -EINVAL;
2169 }
2170
2171 static const i915_user_extension_fn create_extensions[] = {
2172 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2173 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2174 };
2175
2176 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2177 {
2178 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2179 }
2180
2181 static inline struct i915_gem_context *
2182 __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2183 {
2184 struct i915_gem_context *ctx;
2185
2186 rcu_read_lock();
2187 ctx = xa_load(&file_priv->context_xa, id);
2188 if (ctx && !kref_get_unless_zero(&ctx->ref))
2189 ctx = NULL;
2190 rcu_read_unlock();
2191
2192 return ctx;
2193 }
2194
2195 static struct i915_gem_context *
2196 finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2197 struct i915_gem_proto_context *pc, u32 id)
2198 {
2199 struct i915_gem_context *ctx;
2200 void *old;
2201
2202 lockdep_assert_held(&file_priv->proto_context_lock);
2203
2204 ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2205 if (IS_ERR(ctx))
2206 return ctx;
2207
2208 gem_context_register(ctx, file_priv, id);
2209
2210 old = xa_erase(&file_priv->proto_context_xa, id);
2211 GEM_BUG_ON(old != pc);
2212 proto_context_close(file_priv->dev_priv, pc);
2213
2214
2215 return i915_gem_context_get(ctx);
2216 }
2217
2218 struct i915_gem_context *
2219 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2220 {
2221 struct i915_gem_proto_context *pc;
2222 struct i915_gem_context *ctx;
2223
2224 ctx = __context_lookup(file_priv, id);
2225 if (ctx)
2226 return ctx;
2227
2228 mutex_lock(&file_priv->proto_context_lock);
2229
2230 ctx = __context_lookup(file_priv, id);
2231 if (!ctx) {
2232 pc = xa_load(&file_priv->proto_context_xa, id);
2233 if (!pc)
2234 ctx = ERR_PTR(-ENOENT);
2235 else
2236 ctx = finalize_create_context_locked(file_priv, pc, id);
2237 }
2238 mutex_unlock(&file_priv->proto_context_lock);
2239
2240 return ctx;
2241 }
2242
2243 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2244 struct drm_file *file)
2245 {
2246 struct drm_i915_private *i915 = to_i915(dev);
2247 struct drm_i915_gem_context_create_ext *args = data;
2248 struct create_ext ext_data;
2249 int ret;
2250 u32 id;
2251
2252 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2253 return -ENODEV;
2254
2255 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2256 return -EINVAL;
2257
2258 ret = intel_gt_terminally_wedged(to_gt(i915));
2259 if (ret)
2260 return ret;
2261
2262 ext_data.fpriv = file->driver_priv;
2263 if (client_is_banned(ext_data.fpriv)) {
2264 drm_dbg(&i915->drm,
2265 "client %s[%d] banned from creating ctx\n",
2266 current->comm, task_pid_nr(current));
2267 return -EIO;
2268 }
2269
2270 ext_data.pc = proto_context_create(i915, args->flags);
2271 if (IS_ERR(ext_data.pc))
2272 return PTR_ERR(ext_data.pc);
2273
2274 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2275 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2276 create_extensions,
2277 ARRAY_SIZE(create_extensions),
2278 &ext_data);
2279 if (ret)
2280 goto err_pc;
2281 }
2282
2283 if (GRAPHICS_VER(i915) > 12) {
2284 struct i915_gem_context *ctx;
2285
2286
2287 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2288 xa_limit_32b, GFP_KERNEL);
2289 if (ret)
2290 goto err_pc;
2291
2292 ctx = i915_gem_create_context(i915, ext_data.pc);
2293 if (IS_ERR(ctx)) {
2294 ret = PTR_ERR(ctx);
2295 goto err_pc;
2296 }
2297
2298 proto_context_close(i915, ext_data.pc);
2299 gem_context_register(ctx, ext_data.fpriv, id);
2300 } else {
2301 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2302 if (ret < 0)
2303 goto err_pc;
2304 }
2305
2306 args->ctx_id = id;
2307 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2308
2309 return 0;
2310
2311 err_pc:
2312 proto_context_close(i915, ext_data.pc);
2313 return ret;
2314 }
2315
2316 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2317 struct drm_file *file)
2318 {
2319 struct drm_i915_gem_context_destroy *args = data;
2320 struct drm_i915_file_private *file_priv = file->driver_priv;
2321 struct i915_gem_proto_context *pc;
2322 struct i915_gem_context *ctx;
2323
2324 if (args->pad != 0)
2325 return -EINVAL;
2326
2327 if (!args->ctx_id)
2328 return -ENOENT;
2329
2330
2331
2332
2333 mutex_lock(&file_priv->proto_context_lock);
2334 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2335 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2336 mutex_unlock(&file_priv->proto_context_lock);
2337
2338 if (!ctx && !pc)
2339 return -ENOENT;
2340 GEM_WARN_ON(ctx && pc);
2341
2342 if (pc)
2343 proto_context_close(file_priv->dev_priv, pc);
2344
2345 if (ctx)
2346 context_close(ctx);
2347
2348 return 0;
2349 }
2350
2351 static int get_sseu(struct i915_gem_context *ctx,
2352 struct drm_i915_gem_context_param *args)
2353 {
2354 struct drm_i915_gem_context_param_sseu user_sseu;
2355 struct intel_context *ce;
2356 unsigned long lookup;
2357 int err;
2358
2359 if (args->size == 0)
2360 goto out;
2361 else if (args->size < sizeof(user_sseu))
2362 return -EINVAL;
2363
2364 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2365 sizeof(user_sseu)))
2366 return -EFAULT;
2367
2368 if (user_sseu.rsvd)
2369 return -EINVAL;
2370
2371 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2372 return -EINVAL;
2373
2374 lookup = 0;
2375 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2376 lookup |= LOOKUP_USER_INDEX;
2377
2378 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2379 if (IS_ERR(ce))
2380 return PTR_ERR(ce);
2381
2382 err = intel_context_lock_pinned(ce);
2383 if (err) {
2384 intel_context_put(ce);
2385 return err;
2386 }
2387
2388 user_sseu.slice_mask = ce->sseu.slice_mask;
2389 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2390 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2391 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2392
2393 intel_context_unlock_pinned(ce);
2394 intel_context_put(ce);
2395
2396 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2397 sizeof(user_sseu)))
2398 return -EFAULT;
2399
2400 out:
2401 args->size = sizeof(user_sseu);
2402
2403 return 0;
2404 }
2405
2406 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2407 struct drm_file *file)
2408 {
2409 struct drm_i915_file_private *file_priv = file->driver_priv;
2410 struct drm_i915_gem_context_param *args = data;
2411 struct i915_gem_context *ctx;
2412 struct i915_address_space *vm;
2413 int ret = 0;
2414
2415 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2416 if (IS_ERR(ctx))
2417 return PTR_ERR(ctx);
2418
2419 switch (args->param) {
2420 case I915_CONTEXT_PARAM_GTT_SIZE:
2421 args->size = 0;
2422 vm = i915_gem_context_get_eb_vm(ctx);
2423 args->value = vm->total;
2424 i915_vm_put(vm);
2425
2426 break;
2427
2428 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2429 args->size = 0;
2430 args->value = i915_gem_context_no_error_capture(ctx);
2431 break;
2432
2433 case I915_CONTEXT_PARAM_BANNABLE:
2434 args->size = 0;
2435 args->value = i915_gem_context_is_bannable(ctx);
2436 break;
2437
2438 case I915_CONTEXT_PARAM_RECOVERABLE:
2439 args->size = 0;
2440 args->value = i915_gem_context_is_recoverable(ctx);
2441 break;
2442
2443 case I915_CONTEXT_PARAM_PRIORITY:
2444 args->size = 0;
2445 args->value = ctx->sched.priority;
2446 break;
2447
2448 case I915_CONTEXT_PARAM_SSEU:
2449 ret = get_sseu(ctx, args);
2450 break;
2451
2452 case I915_CONTEXT_PARAM_VM:
2453 ret = get_ppgtt(file_priv, ctx, args);
2454 break;
2455
2456 case I915_CONTEXT_PARAM_PERSISTENCE:
2457 args->size = 0;
2458 args->value = i915_gem_context_is_persistent(ctx);
2459 break;
2460
2461 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2462 ret = get_protected(ctx, args);
2463 break;
2464
2465 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2466 case I915_CONTEXT_PARAM_BAN_PERIOD:
2467 case I915_CONTEXT_PARAM_ENGINES:
2468 case I915_CONTEXT_PARAM_RINGSIZE:
2469 default:
2470 ret = -EINVAL;
2471 break;
2472 }
2473
2474 i915_gem_context_put(ctx);
2475 return ret;
2476 }
2477
2478 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2479 struct drm_file *file)
2480 {
2481 struct drm_i915_file_private *file_priv = file->driver_priv;
2482 struct drm_i915_gem_context_param *args = data;
2483 struct i915_gem_proto_context *pc;
2484 struct i915_gem_context *ctx;
2485 int ret = 0;
2486
2487 mutex_lock(&file_priv->proto_context_lock);
2488 ctx = __context_lookup(file_priv, args->ctx_id);
2489 if (!ctx) {
2490 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2491 if (pc) {
2492
2493
2494
2495
2496 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2497 ret = set_proto_ctx_param(file_priv, pc, args);
2498 } else {
2499 ret = -ENOENT;
2500 }
2501 }
2502 mutex_unlock(&file_priv->proto_context_lock);
2503
2504 if (ctx) {
2505 ret = ctx_setparam(file_priv, ctx, args);
2506 i915_gem_context_put(ctx);
2507 }
2508
2509 return ret;
2510 }
2511
2512 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2513 void *data, struct drm_file *file)
2514 {
2515 struct drm_i915_private *i915 = to_i915(dev);
2516 struct drm_i915_reset_stats *args = data;
2517 struct i915_gem_context *ctx;
2518
2519 if (args->flags || args->pad)
2520 return -EINVAL;
2521
2522 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2523 if (IS_ERR(ctx))
2524 return PTR_ERR(ctx);
2525
2526
2527
2528
2529
2530
2531
2532
2533 if (capable(CAP_SYS_ADMIN))
2534 args->reset_count = i915_reset_count(&i915->gpu_error);
2535 else
2536 args->reset_count = 0;
2537
2538 args->batch_active = atomic_read(&ctx->guilty_count);
2539 args->batch_pending = atomic_read(&ctx->active_count);
2540
2541 i915_gem_context_put(ctx);
2542 return 0;
2543 }
2544
2545
2546 struct intel_context *
2547 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2548 {
2549 const struct i915_gem_engines *e = it->engines;
2550 struct intel_context *ctx;
2551
2552 if (unlikely(!e))
2553 return NULL;
2554
2555 do {
2556 if (it->idx >= e->num_engines)
2557 return NULL;
2558
2559 ctx = e->engines[it->idx++];
2560 } while (!ctx);
2561
2562 return ctx;
2563 }
2564
2565 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2566 #include "selftests/mock_context.c"
2567 #include "selftests/i915_gem_context.c"
2568 #endif
2569
2570 void i915_gem_context_module_exit(void)
2571 {
2572 kmem_cache_destroy(slab_luts);
2573 }
2574
2575 int __init i915_gem_context_module_init(void)
2576 {
2577 slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2578 if (!slab_luts)
2579 return -ENOMEM;
2580
2581 return 0;
2582 }