0001
0002
0003
0004
0005
0006 #include <drm/ttm/ttm_placement.h>
0007 #include <drm/ttm/ttm_tt.h>
0008
0009 #include "i915_drv.h"
0010 #include "intel_memory_region.h"
0011 #include "intel_region_ttm.h"
0012
0013 #include "gem/i915_gem_region.h"
0014 #include "gem/i915_gem_ttm.h"
0015 #include "gem/i915_gem_ttm_move.h"
0016 #include "gem/i915_gem_ttm_pm.h"
0017
0018
0019
0020
0021
0022 void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
0023 {
0024 if (obj->ttm.backup) {
0025 i915_gem_object_put(obj->ttm.backup);
0026 obj->ttm.backup = NULL;
0027 }
0028 }
0029
0030
0031
0032
0033
0034
0035
0036 struct i915_gem_ttm_pm_apply {
0037 struct i915_gem_apply_to_region base;
0038 bool allow_gpu : 1;
0039 bool backup_pinned : 1;
0040 };
0041
0042 static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
0043 struct drm_i915_gem_object *obj)
0044 {
0045 struct i915_gem_ttm_pm_apply *pm_apply =
0046 container_of(apply, typeof(*pm_apply), base);
0047 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0048 struct ttm_buffer_object *backup_bo;
0049 struct drm_i915_private *i915 =
0050 container_of(bo->bdev, typeof(*i915), bdev);
0051 struct drm_i915_gem_object *backup;
0052 struct ttm_operation_ctx ctx = {};
0053 int err = 0;
0054
0055 if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
0056 return 0;
0057
0058 if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
0059 return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
0060
0061 if (!pm_apply->backup_pinned ||
0062 (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
0063 return 0;
0064
0065 if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
0066 return 0;
0067
0068 backup = i915_gem_object_create_shmem(i915, obj->base.size);
0069 if (IS_ERR(backup))
0070 return PTR_ERR(backup);
0071
0072 err = i915_gem_object_lock(backup, apply->ww);
0073 if (err)
0074 goto out_no_lock;
0075
0076 backup_bo = i915_gem_to_ttm(backup);
0077 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
0078 if (err)
0079 goto out_no_populate;
0080
0081 err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
0082 GEM_WARN_ON(err);
0083 ttm_bo_wait_ctx(backup_bo, &ctx);
0084
0085 obj->ttm.backup = backup;
0086 return 0;
0087
0088 out_no_populate:
0089 i915_gem_ww_unlock_single(backup);
0090 out_no_lock:
0091 i915_gem_object_put(backup);
0092
0093 return err;
0094 }
0095
0096 static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
0097 struct drm_i915_gem_object *obj)
0098 {
0099 i915_ttm_backup_free(obj);
0100 return 0;
0101 }
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 void i915_ttm_recover_region(struct intel_memory_region *mr)
0112 {
0113 static const struct i915_gem_apply_to_region_ops recover_ops = {
0114 .process_obj = i915_ttm_recover,
0115 };
0116 struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
0117 int ret;
0118
0119 ret = i915_gem_process_region(mr, &apply);
0120 GEM_WARN_ON(ret);
0121 }
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134 int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
0135 {
0136 static const struct i915_gem_apply_to_region_ops backup_ops = {
0137 .process_obj = i915_ttm_backup,
0138 };
0139 struct i915_gem_ttm_pm_apply pm_apply = {
0140 .base = {.ops = &backup_ops},
0141 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
0142 .backup_pinned = flags & I915_TTM_BACKUP_PINNED,
0143 };
0144
0145 return i915_gem_process_region(mr, &pm_apply.base);
0146 }
0147
0148 static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
0149 struct drm_i915_gem_object *obj)
0150 {
0151 struct i915_gem_ttm_pm_apply *pm_apply =
0152 container_of(apply, typeof(*pm_apply), base);
0153 struct drm_i915_gem_object *backup = obj->ttm.backup;
0154 struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
0155 struct ttm_operation_ctx ctx = {};
0156 int err;
0157
0158 if (!backup)
0159 return 0;
0160
0161 if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
0162 return 0;
0163
0164 err = i915_gem_object_lock(backup, apply->ww);
0165 if (err)
0166 return err;
0167
0168
0169 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
0170 if (!err) {
0171 err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
0172 false);
0173 GEM_WARN_ON(err);
0174 ttm_bo_wait_ctx(backup_bo, &ctx);
0175
0176 obj->ttm.backup = NULL;
0177 err = 0;
0178 }
0179
0180 i915_gem_ww_unlock_single(backup);
0181
0182 if (!err)
0183 i915_gem_object_put(backup);
0184
0185 return err;
0186 }
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
0199 {
0200 static const struct i915_gem_apply_to_region_ops restore_ops = {
0201 .process_obj = i915_ttm_restore,
0202 };
0203 struct i915_gem_ttm_pm_apply pm_apply = {
0204 .base = {.ops = &restore_ops},
0205 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
0206 };
0207
0208 return i915_gem_process_region(mr, &pm_apply.base);
0209 }