0001
0002
0003
0004
0005 #include <drm/ttm/ttm_bo_driver.h>
0006 #include <drm/ttm/ttm_device.h>
0007 #include <drm/ttm/ttm_range_manager.h>
0008
0009 #include "i915_drv.h"
0010 #include "i915_scatterlist.h"
0011 #include "i915_ttm_buddy_manager.h"
0012
0013 #include "intel_region_ttm.h"
0014
0015 #include "gem/i915_gem_region.h"
0016 #include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
0033 {
0034 struct drm_device *drm = &dev_priv->drm;
0035
0036 return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
0037 drm->dev, drm->anon_inode->i_mapping,
0038 drm->vma_offset_manager, false, false);
0039 }
0040
0041
0042
0043
0044
0045 void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
0046 {
0047 ttm_device_fini(&dev_priv->bdev);
0048 }
0049
0050
0051
0052
0053
0054
0055 int intel_region_to_ttm_type(const struct intel_memory_region *mem)
0056 {
0057 int type;
0058
0059 GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
0060 mem->type != INTEL_MEMORY_MOCK &&
0061 mem->type != INTEL_MEMORY_SYSTEM);
0062
0063 if (mem->type == INTEL_MEMORY_SYSTEM)
0064 return TTM_PL_SYSTEM;
0065
0066 type = mem->instance + TTM_PL_PRIV;
0067 GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
0068
0069 return type;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 int intel_region_ttm_init(struct intel_memory_region *mem)
0084 {
0085 struct ttm_device *bdev = &mem->i915->bdev;
0086 int mem_type = intel_region_to_ttm_type(mem);
0087 int ret;
0088
0089 ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
0090 resource_size(&mem->region),
0091 mem->io_size,
0092 mem->min_page_size, PAGE_SIZE);
0093 if (ret)
0094 return ret;
0095
0096 mem->region_private = ttm_manager_type(bdev, mem_type);
0097
0098 return 0;
0099 }
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 int intel_region_ttm_fini(struct intel_memory_region *mem)
0110 {
0111 struct ttm_resource_manager *man = mem->region_private;
0112 int ret = -EBUSY;
0113 int count;
0114
0115
0116
0117
0118
0119
0120 if (man)
0121 ttm_resource_manager_cleanup(man);
0122
0123
0124 for (count = 0; count < 10; ++count) {
0125 i915_gem_flush_free_objects(mem->i915);
0126
0127 mutex_lock(&mem->objects.lock);
0128 if (list_empty(&mem->objects.list))
0129 ret = 0;
0130 mutex_unlock(&mem->objects.lock);
0131 if (!ret)
0132 break;
0133
0134 msleep(20);
0135 flush_delayed_work(&mem->i915->bdev.wq);
0136 }
0137
0138
0139 if (ret || !man)
0140 return ret;
0141
0142 ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
0143 intel_region_to_ttm_type(mem));
0144 GEM_WARN_ON(ret);
0145 mem->region_private = NULL;
0146
0147 return ret;
0148 }
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 struct i915_refct_sgt *
0164 intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
0165 struct ttm_resource *res,
0166 u32 page_alignment)
0167 {
0168 if (mem->is_range_manager) {
0169 struct ttm_range_mgr_node *range_node =
0170 to_ttm_range_mgr_node(res);
0171
0172 return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
0173 mem->region.start,
0174 page_alignment);
0175 } else {
0176 return i915_rsgt_from_buddy_resource(res, mem->region.start,
0177 page_alignment);
0178 }
0179 }
0180
0181 #ifdef CONFIG_DRM_I915_SELFTEST
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 struct ttm_resource *
0198 intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
0199 resource_size_t offset,
0200 resource_size_t size,
0201 unsigned int flags)
0202 {
0203 struct ttm_resource_manager *man = mem->region_private;
0204 struct ttm_place place = {};
0205 struct ttm_buffer_object mock_bo = {};
0206 struct ttm_resource *res;
0207 int ret;
0208
0209 if (flags & I915_BO_ALLOC_CONTIGUOUS)
0210 place.flags |= TTM_PL_FLAG_CONTIGUOUS;
0211 if (offset != I915_BO_INVALID_OFFSET) {
0212 place.fpfn = offset >> PAGE_SHIFT;
0213 place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
0214 } else if (mem->io_size && mem->io_size < mem->total) {
0215 if (flags & I915_BO_ALLOC_GPU_ONLY) {
0216 place.flags |= TTM_PL_FLAG_TOPDOWN;
0217 } else {
0218 place.fpfn = 0;
0219 place.lpfn = mem->io_size >> PAGE_SHIFT;
0220 }
0221 }
0222
0223 mock_bo.base.size = size;
0224 mock_bo.bdev = &mem->i915->bdev;
0225
0226 ret = man->func->alloc(man, &mock_bo, &place, &res);
0227 if (ret == -ENOSPC)
0228 ret = -ENXIO;
0229 if (!ret)
0230 res->bo = NULL;
0231 return ret ? ERR_PTR(ret) : res;
0232 }
0233
0234 #endif
0235
0236
0237
0238
0239
0240
0241 void intel_region_ttm_resource_free(struct intel_memory_region *mem,
0242 struct ttm_resource *res)
0243 {
0244 struct ttm_resource_manager *man = mem->region_private;
0245 struct ttm_buffer_object mock_bo = {};
0246
0247 mock_bo.base.size = res->num_pages << PAGE_SHIFT;
0248 mock_bo.bdev = &mem->i915->bdev;
0249 res->bo = &mock_bo;
0250
0251 man->func->free(man, res);
0252 }