Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2019 Intel Corporation
0005  */
0006 
0007 #include "gem/i915_gem_pm.h"
0008 #include "gem/i915_gem_ttm_pm.h"
0009 #include "gt/intel_gt.h"
0010 #include "gt/intel_gt_pm.h"
0011 #include "gt/intel_gt_requests.h"
0012 
0013 #include "i915_driver.h"
0014 #include "i915_drv.h"
0015 
0016 #if defined(CONFIG_X86)
0017 #include <asm/smp.h>
0018 #else
0019 #define wbinvd_on_all_cpus() \
0020     pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
0021 #endif
0022 
0023 void i915_gem_suspend(struct drm_i915_private *i915)
0024 {
0025     GEM_TRACE("%s\n", dev_name(i915->drm.dev));
0026 
0027     intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
0028     flush_workqueue(i915->wq);
0029 
0030     /*
0031      * We have to flush all the executing contexts to main memory so
0032      * that they can saved in the hibernation image. To ensure the last
0033      * context image is coherent, we have to switch away from it. That
0034      * leaves the i915->kernel_context still active when
0035      * we actually suspend, and its image in memory may not match the GPU
0036      * state. Fortunately, the kernel_context is disposable and we do
0037      * not rely on its state.
0038      */
0039     intel_gt_suspend_prepare(to_gt(i915));
0040 
0041     i915_gem_drain_freed_objects(i915);
0042 }
0043 
0044 static int lmem_restore(struct drm_i915_private *i915, u32 flags)
0045 {
0046     struct intel_memory_region *mr;
0047     int ret = 0, id;
0048 
0049     for_each_memory_region(mr, i915, id) {
0050         if (mr->type == INTEL_MEMORY_LOCAL) {
0051             ret = i915_ttm_restore_region(mr, flags);
0052             if (ret)
0053                 break;
0054         }
0055     }
0056 
0057     return ret;
0058 }
0059 
0060 static int lmem_suspend(struct drm_i915_private *i915, u32 flags)
0061 {
0062     struct intel_memory_region *mr;
0063     int ret = 0, id;
0064 
0065     for_each_memory_region(mr, i915, id) {
0066         if (mr->type == INTEL_MEMORY_LOCAL) {
0067             ret = i915_ttm_backup_region(mr, flags);
0068             if (ret)
0069                 break;
0070         }
0071     }
0072 
0073     return ret;
0074 }
0075 
0076 static void lmem_recover(struct drm_i915_private *i915)
0077 {
0078     struct intel_memory_region *mr;
0079     int id;
0080 
0081     for_each_memory_region(mr, i915, id)
0082         if (mr->type == INTEL_MEMORY_LOCAL)
0083             i915_ttm_recover_region(mr);
0084 }
0085 
0086 int i915_gem_backup_suspend(struct drm_i915_private *i915)
0087 {
0088     int ret;
0089 
0090     /* Opportunistically try to evict unpinned objects */
0091     ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU);
0092     if (ret)
0093         goto out_recover;
0094 
0095     i915_gem_suspend(i915);
0096 
0097     /*
0098      * More objects may have become unpinned as requests were
0099      * retired. Now try to evict again. The gt may be wedged here
0100      * in which case we automatically fall back to memcpy.
0101      * We allow also backing up pinned objects that have not been
0102      * marked for early recover, and that may contain, for example,
0103      * page-tables for the migrate context.
0104      */
0105     ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU |
0106                I915_TTM_BACKUP_PINNED);
0107     if (ret)
0108         goto out_recover;
0109 
0110     /*
0111      * Remaining objects are backed up using memcpy once we've stopped
0112      * using the migrate context.
0113      */
0114     ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED);
0115     if (ret)
0116         goto out_recover;
0117 
0118     return 0;
0119 
0120 out_recover:
0121     lmem_recover(i915);
0122 
0123     return ret;
0124 }
0125 
0126 void i915_gem_suspend_late(struct drm_i915_private *i915)
0127 {
0128     struct drm_i915_gem_object *obj;
0129     struct list_head *phases[] = {
0130         &i915->mm.shrink_list,
0131         &i915->mm.purge_list,
0132         NULL
0133     }, **phase;
0134     unsigned long flags;
0135     bool flush = false;
0136 
0137     /*
0138      * Neither the BIOS, ourselves or any other kernel
0139      * expects the system to be in execlists mode on startup,
0140      * so we need to reset the GPU back to legacy mode. And the only
0141      * known way to disable logical contexts is through a GPU reset.
0142      *
0143      * So in order to leave the system in a known default configuration,
0144      * always reset the GPU upon unload and suspend. Afterwards we then
0145      * clean up the GEM state tracking, flushing off the requests and
0146      * leaving the system in a known idle state.
0147      *
0148      * Note that is of the upmost importance that the GPU is idle and
0149      * all stray writes are flushed *before* we dismantle the backing
0150      * storage for the pinned objects.
0151      *
0152      * However, since we are uncertain that resetting the GPU on older
0153      * machines is a good idea, we don't - just in case it leaves the
0154      * machine in an unusable condition.
0155      */
0156 
0157     intel_gt_suspend_late(to_gt(i915));
0158 
0159     spin_lock_irqsave(&i915->mm.obj_lock, flags);
0160     for (phase = phases; *phase; phase++) {
0161         list_for_each_entry(obj, *phase, mm.link) {
0162             if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
0163                 flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
0164             __start_cpu_write(obj); /* presume auto-hibernate */
0165         }
0166     }
0167     spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
0168     if (flush)
0169         wbinvd_on_all_cpus();
0170 }
0171 
0172 int i915_gem_freeze(struct drm_i915_private *i915)
0173 {
0174     /* Discard all purgeable objects, let userspace recover those as
0175      * required after resuming.
0176      */
0177     i915_gem_shrink_all(i915);
0178 
0179     return 0;
0180 }
0181 
0182 int i915_gem_freeze_late(struct drm_i915_private *i915)
0183 {
0184     struct drm_i915_gem_object *obj;
0185     intel_wakeref_t wakeref;
0186 
0187     /*
0188      * Called just before we write the hibernation image.
0189      *
0190      * We need to update the domain tracking to reflect that the CPU
0191      * will be accessing all the pages to create and restore from the
0192      * hibernation, and so upon restoration those pages will be in the
0193      * CPU domain.
0194      *
0195      * To make sure the hibernation image contains the latest state,
0196      * we update that state just before writing out the image.
0197      *
0198      * To try and reduce the hibernation image, we manually shrink
0199      * the objects as well, see i915_gem_freeze()
0200      */
0201 
0202     with_intel_runtime_pm(&i915->runtime_pm, wakeref)
0203         i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
0204     i915_gem_drain_freed_objects(i915);
0205 
0206     wbinvd_on_all_cpus();
0207     list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
0208         __start_cpu_write(obj);
0209 
0210     return 0;
0211 }
0212 
0213 void i915_gem_resume(struct drm_i915_private *i915)
0214 {
0215     int ret;
0216 
0217     GEM_TRACE("%s\n", dev_name(i915->drm.dev));
0218 
0219     ret = lmem_restore(i915, 0);
0220     GEM_WARN_ON(ret);
0221 
0222     /*
0223      * As we didn't flush the kernel context before suspend, we cannot
0224      * guarantee that the context image is complete. So let's just reset
0225      * it and start again.
0226      */
0227     intel_gt_resume(to_gt(i915));
0228 
0229     ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
0230     GEM_WARN_ON(ret);
0231 }