0001
0002
0003
0004
0005
0006
0007 #include <linux/oom.h>
0008 #include <linux/sched/mm.h>
0009 #include <linux/shmem_fs.h>
0010 #include <linux/slab.h>
0011 #include <linux/swap.h>
0012 #include <linux/pci.h>
0013 #include <linux/dma-buf.h>
0014 #include <linux/vmalloc.h>
0015
0016 #include "gt/intel_gt_requests.h"
0017
0018 #include "i915_trace.h"
0019
0020 static bool swap_available(void)
0021 {
0022 return get_nr_swap_pages() > 0;
0023 }
0024
0025 static bool can_release_pages(struct drm_i915_gem_object *obj)
0026 {
0027
0028 if (!i915_gem_object_is_shrinkable(obj))
0029 return false;
0030
0031
0032
0033
0034
0035
0036 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
0037 }
0038
0039 static bool drop_pages(struct drm_i915_gem_object *obj,
0040 unsigned long shrink, bool trylock_vm)
0041 {
0042 unsigned long flags;
0043
0044 flags = 0;
0045 if (shrink & I915_SHRINK_ACTIVE)
0046 flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
0047 if (!(shrink & I915_SHRINK_BOUND))
0048 flags |= I915_GEM_OBJECT_UNBIND_TEST;
0049 if (trylock_vm)
0050 flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
0051
0052 if (i915_gem_object_unbind(obj, flags) == 0)
0053 return true;
0054
0055 return false;
0056 }
0057
0058 static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
0059 {
0060 if (obj->ops->shrink) {
0061 unsigned int shrink_flags = 0;
0062
0063 if (!(flags & I915_SHRINK_ACTIVE))
0064 shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
0065
0066 if (flags & I915_SHRINK_WRITEBACK)
0067 shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
0068
0069 return obj->ops->shrink(obj, shrink_flags);
0070 }
0071
0072 return 0;
0073 }
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 unsigned long
0102 i915_gem_shrink(struct i915_gem_ww_ctx *ww,
0103 struct drm_i915_private *i915,
0104 unsigned long target,
0105 unsigned long *nr_scanned,
0106 unsigned int shrink)
0107 {
0108 const struct {
0109 struct list_head *list;
0110 unsigned int bit;
0111 } phases[] = {
0112 { &i915->mm.purge_list, ~0u },
0113 {
0114 &i915->mm.shrink_list,
0115 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
0116 },
0117 { NULL, 0 },
0118 }, *phase;
0119 intel_wakeref_t wakeref = 0;
0120 unsigned long count = 0;
0121 unsigned long scanned = 0;
0122 int err = 0;
0123
0124
0125 bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
0126
0127 trace_i915_gem_shrink(i915, target, shrink);
0128
0129
0130
0131
0132
0133
0134 if (shrink & I915_SHRINK_BOUND) {
0135 wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
0136 if (!wakeref)
0137 shrink &= ~I915_SHRINK_BOUND;
0138 }
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 if (shrink & I915_SHRINK_ACTIVE)
0151
0152 intel_gt_retire_requests(to_gt(i915));
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 for (phase = phases; phase->list; phase++) {
0174 struct list_head still_in_list;
0175 struct drm_i915_gem_object *obj;
0176 unsigned long flags;
0177
0178 if ((shrink & phase->bit) == 0)
0179 continue;
0180
0181 INIT_LIST_HEAD(&still_in_list);
0182
0183
0184
0185
0186
0187
0188
0189
0190 spin_lock_irqsave(&i915->mm.obj_lock, flags);
0191 while (count < target &&
0192 (obj = list_first_entry_or_null(phase->list,
0193 typeof(*obj),
0194 mm.link))) {
0195 list_move_tail(&obj->mm.link, &still_in_list);
0196
0197 if (shrink & I915_SHRINK_VMAPS &&
0198 !is_vmalloc_addr(obj->mm.mapping))
0199 continue;
0200
0201 if (!(shrink & I915_SHRINK_ACTIVE) &&
0202 i915_gem_object_is_framebuffer(obj))
0203 continue;
0204
0205 if (!can_release_pages(obj))
0206 continue;
0207
0208 if (!kref_get_unless_zero(&obj->base.refcount))
0209 continue;
0210
0211 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
0212
0213
0214 if (!ww) {
0215 if (!i915_gem_object_trylock(obj, NULL))
0216 goto skip;
0217 } else {
0218 err = i915_gem_object_lock(obj, ww);
0219 if (err)
0220 goto skip;
0221 }
0222
0223 if (drop_pages(obj, shrink, trylock_vm) &&
0224 !__i915_gem_object_put_pages(obj) &&
0225 !try_to_writeback(obj, shrink))
0226 count += obj->base.size >> PAGE_SHIFT;
0227
0228 if (!ww)
0229 i915_gem_object_unlock(obj);
0230
0231 scanned += obj->base.size >> PAGE_SHIFT;
0232 skip:
0233 i915_gem_object_put(obj);
0234
0235 spin_lock_irqsave(&i915->mm.obj_lock, flags);
0236 if (err)
0237 break;
0238 }
0239 list_splice_tail(&still_in_list, phase->list);
0240 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
0241 if (err)
0242 break;
0243 }
0244
0245 if (shrink & I915_SHRINK_BOUND)
0246 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
0247
0248 if (err)
0249 return err;
0250
0251 if (nr_scanned)
0252 *nr_scanned += scanned;
0253 return count;
0254 }
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
0271 {
0272 intel_wakeref_t wakeref;
0273 unsigned long freed = 0;
0274
0275 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
0276 freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
0277 I915_SHRINK_BOUND |
0278 I915_SHRINK_UNBOUND);
0279 }
0280
0281 return freed;
0282 }
0283
0284 static unsigned long
0285 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
0286 {
0287 struct drm_i915_private *i915 =
0288 container_of(shrinker, struct drm_i915_private, mm.shrinker);
0289 unsigned long num_objects;
0290 unsigned long count;
0291
0292 count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
0293 num_objects = READ_ONCE(i915->mm.shrink_count);
0294
0295
0296
0297
0298
0299
0300
0301
0302 if (num_objects) {
0303 unsigned long avg = 2 * count / num_objects;
0304
0305 i915->mm.shrinker.batch =
0306 max((i915->mm.shrinker.batch + avg) >> 1,
0307 128ul );
0308 }
0309
0310 return count;
0311 }
0312
0313 static unsigned long
0314 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
0315 {
0316 struct drm_i915_private *i915 =
0317 container_of(shrinker, struct drm_i915_private, mm.shrinker);
0318 unsigned long freed;
0319
0320 sc->nr_scanned = 0;
0321
0322 freed = i915_gem_shrink(NULL, i915,
0323 sc->nr_to_scan,
0324 &sc->nr_scanned,
0325 I915_SHRINK_BOUND |
0326 I915_SHRINK_UNBOUND);
0327 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
0328 intel_wakeref_t wakeref;
0329
0330 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
0331 freed += i915_gem_shrink(NULL, i915,
0332 sc->nr_to_scan - sc->nr_scanned,
0333 &sc->nr_scanned,
0334 I915_SHRINK_ACTIVE |
0335 I915_SHRINK_BOUND |
0336 I915_SHRINK_UNBOUND |
0337 I915_SHRINK_WRITEBACK);
0338 }
0339 }
0340
0341 return sc->nr_scanned ? freed : SHRINK_STOP;
0342 }
0343
0344 static int
0345 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
0346 {
0347 struct drm_i915_private *i915 =
0348 container_of(nb, struct drm_i915_private, mm.oom_notifier);
0349 struct drm_i915_gem_object *obj;
0350 unsigned long unevictable, available, freed_pages;
0351 intel_wakeref_t wakeref;
0352 unsigned long flags;
0353
0354 freed_pages = 0;
0355 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
0356 freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
0357 I915_SHRINK_BOUND |
0358 I915_SHRINK_UNBOUND |
0359 I915_SHRINK_WRITEBACK);
0360
0361
0362
0363
0364
0365 available = unevictable = 0;
0366 spin_lock_irqsave(&i915->mm.obj_lock, flags);
0367 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
0368 if (!can_release_pages(obj))
0369 unevictable += obj->base.size >> PAGE_SHIFT;
0370 else
0371 available += obj->base.size >> PAGE_SHIFT;
0372 }
0373 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
0374
0375 if (freed_pages || available)
0376 pr_info("Purging GPU memory, %lu pages freed, "
0377 "%lu pages still pinned, %lu pages left available.\n",
0378 freed_pages, unevictable, available);
0379
0380 *(unsigned long *)ptr += freed_pages;
0381 return NOTIFY_DONE;
0382 }
0383
0384 static int
0385 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
0386 {
0387 struct drm_i915_private *i915 =
0388 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
0389 struct i915_vma *vma, *next;
0390 unsigned long freed_pages = 0;
0391 intel_wakeref_t wakeref;
0392
0393 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
0394 freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
0395 I915_SHRINK_BOUND |
0396 I915_SHRINK_UNBOUND |
0397 I915_SHRINK_VMAPS);
0398
0399
0400 mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
0401 list_for_each_entry_safe(vma, next,
0402 &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
0403 unsigned long count = vma->node.size >> PAGE_SHIFT;
0404 struct drm_i915_gem_object *obj = vma->obj;
0405
0406 if (!vma->iomap || i915_vma_is_active(vma))
0407 continue;
0408
0409 if (!i915_gem_object_trylock(obj, NULL))
0410 continue;
0411
0412 if (__i915_vma_unbind(vma) == 0)
0413 freed_pages += count;
0414
0415 i915_gem_object_unlock(obj);
0416 }
0417 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
0418
0419 *(unsigned long *)ptr += freed_pages;
0420 return NOTIFY_DONE;
0421 }
0422
0423 void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
0424 {
0425 i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
0426 i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
0427 i915->mm.shrinker.seeks = DEFAULT_SEEKS;
0428 i915->mm.shrinker.batch = 4096;
0429 drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
0430 "drm-i915_gem"));
0431
0432 i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
0433 drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
0434
0435 i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
0436 drm_WARN_ON(&i915->drm,
0437 register_vmap_purge_notifier(&i915->mm.vmap_notifier));
0438 }
0439
0440 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
0441 {
0442 drm_WARN_ON(&i915->drm,
0443 unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
0444 drm_WARN_ON(&i915->drm,
0445 unregister_oom_notifier(&i915->mm.oom_notifier));
0446 unregister_shrinker(&i915->mm.shrinker);
0447 }
0448
0449 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
0450 struct mutex *mutex)
0451 {
0452 if (!IS_ENABLED(CONFIG_LOCKDEP))
0453 return;
0454
0455 fs_reclaim_acquire(GFP_KERNEL);
0456
0457 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
0458 mutex_release(&mutex->dep_map, _RET_IP_);
0459
0460 fs_reclaim_release(GFP_KERNEL);
0461 }
0462
0463 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
0476 {
0477 struct drm_i915_private *i915 = obj_to_i915(obj);
0478 unsigned long flags;
0479
0480
0481
0482
0483
0484
0485
0486 if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
0487 return;
0488
0489 spin_lock_irqsave(&i915->mm.obj_lock, flags);
0490 if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
0491 !list_empty(&obj->mm.link)) {
0492 list_del_init(&obj->mm.link);
0493 i915->mm.shrink_count--;
0494 i915->mm.shrink_memory -= obj->base.size;
0495 }
0496 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
0497 }
0498
0499 static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
0500 struct list_head *head)
0501 {
0502 struct drm_i915_private *i915 = obj_to_i915(obj);
0503 unsigned long flags;
0504
0505 if (!i915_gem_object_is_shrinkable(obj))
0506 return;
0507
0508 if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
0509 return;
0510
0511 spin_lock_irqsave(&i915->mm.obj_lock, flags);
0512 GEM_BUG_ON(!kref_read(&obj->base.refcount));
0513 if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
0514 GEM_BUG_ON(!list_empty(&obj->mm.link));
0515
0516 list_add_tail(&obj->mm.link, head);
0517 i915->mm.shrink_count++;
0518 i915->mm.shrink_memory += obj->base.size;
0519
0520 }
0521 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
0522 }
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
0535 {
0536 ___i915_gem_object_make_shrinkable(obj,
0537 &obj_to_i915(obj)->mm.shrink_list);
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
0551 {
0552 ___i915_gem_object_make_shrinkable(obj,
0553 &obj_to_i915(obj)->mm.purge_list);
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
0567 {
0568 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
0569 __i915_gem_object_make_shrinkable(obj);
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
0584 {
0585 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
0586 __i915_gem_object_make_purgeable(obj);
0587 }