0001
0002
0003
0004
0005
0006 #include <linux/prandom.h>
0007
0008 #include <uapi/drm/i915_drm.h>
0009
0010 #include "intel_memory_region.h"
0011 #include "i915_drv.h"
0012 #include "i915_ttm_buddy_manager.h"
0013
0014 static const struct {
0015 u16 class;
0016 u16 instance;
0017 } intel_region_map[] = {
0018 [INTEL_REGION_SMEM] = {
0019 .class = INTEL_MEMORY_SYSTEM,
0020 .instance = 0,
0021 },
0022 [INTEL_REGION_LMEM_0] = {
0023 .class = INTEL_MEMORY_LOCAL,
0024 .instance = 0,
0025 },
0026 [INTEL_REGION_STOLEN_SMEM] = {
0027 .class = INTEL_MEMORY_STOLEN_SYSTEM,
0028 .instance = 0,
0029 },
0030 [INTEL_REGION_STOLEN_LMEM] = {
0031 .class = INTEL_MEMORY_STOLEN_LOCAL,
0032 .instance = 0,
0033 },
0034 };
0035
0036 static int __iopagetest(struct intel_memory_region *mem,
0037 u8 __iomem *va, int pagesize,
0038 u8 value, resource_size_t offset,
0039 const void *caller)
0040 {
0041 int byte = prandom_u32_max(pagesize);
0042 u8 result[3];
0043
0044 memset_io(va, value, pagesize);
0045 wmb();
0046
0047 result[0] = ioread8(va);
0048 result[1] = ioread8(va + byte);
0049 result[2] = ioread8(va + pagesize - 1);
0050 if (memchr_inv(result, value, sizeof(result))) {
0051 dev_err(mem->i915->drm.dev,
0052 "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
0053 &mem->region, &mem->io_start, &offset, caller,
0054 value, result[0], result[1], result[2]);
0055 return -EINVAL;
0056 }
0057
0058 return 0;
0059 }
0060
0061 static int iopagetest(struct intel_memory_region *mem,
0062 resource_size_t offset,
0063 const void *caller)
0064 {
0065 const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
0066 void __iomem *va;
0067 int err;
0068 int i;
0069
0070 va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
0071 if (!va) {
0072 dev_err(mem->i915->drm.dev,
0073 "Failed to ioremap memory region [%pa + %pa] for %ps\n",
0074 &mem->io_start, &offset, caller);
0075 return -EFAULT;
0076 }
0077
0078 for (i = 0; i < ARRAY_SIZE(val); i++) {
0079 err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
0080 if (err)
0081 break;
0082
0083 err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
0084 if (err)
0085 break;
0086 }
0087
0088 iounmap(va);
0089 return err;
0090 }
0091
0092 static resource_size_t random_page(resource_size_t last)
0093 {
0094
0095 return prandom_u32_max(last >> PAGE_SHIFT) << PAGE_SHIFT;
0096 }
0097
0098 static int iomemtest(struct intel_memory_region *mem,
0099 bool test_all,
0100 const void *caller)
0101 {
0102 resource_size_t last, page;
0103 int err;
0104
0105 if (mem->io_size < PAGE_SIZE)
0106 return 0;
0107
0108 last = mem->io_size - PAGE_SIZE;
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 if (test_all) {
0122 for (page = 0; page <= last; page += PAGE_SIZE) {
0123 err = iopagetest(mem, page, caller);
0124 if (err)
0125 return err;
0126 }
0127 } else {
0128 err = iopagetest(mem, 0, caller);
0129 if (err)
0130 return err;
0131
0132 err = iopagetest(mem, last, caller);
0133 if (err)
0134 return err;
0135
0136 err = iopagetest(mem, random_page(last), caller);
0137 if (err)
0138 return err;
0139 }
0140
0141 return 0;
0142 }
0143
0144 struct intel_memory_region *
0145 intel_memory_region_lookup(struct drm_i915_private *i915,
0146 u16 class, u16 instance)
0147 {
0148 struct intel_memory_region *mr;
0149 int id;
0150
0151
0152 for_each_memory_region(mr, i915, id) {
0153 if (mr->type == class && mr->instance == instance)
0154 return mr;
0155 }
0156
0157 return NULL;
0158 }
0159
0160 struct intel_memory_region *
0161 intel_memory_region_by_type(struct drm_i915_private *i915,
0162 enum intel_memory_type mem_type)
0163 {
0164 struct intel_memory_region *mr;
0165 int id;
0166
0167 for_each_memory_region(mr, i915, id)
0168 if (mr->type == mem_type)
0169 return mr;
0170
0171 return NULL;
0172 }
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 int intel_memory_region_reserve(struct intel_memory_region *mem,
0183 resource_size_t offset,
0184 resource_size_t size)
0185 {
0186 struct ttm_resource_manager *man = mem->region_private;
0187
0188 GEM_BUG_ON(mem->is_range_manager);
0189
0190 return i915_ttm_buddy_man_reserve(man, offset, size);
0191 }
0192
0193 void intel_memory_region_debug(struct intel_memory_region *mr,
0194 struct drm_printer *printer)
0195 {
0196 drm_printf(printer, "%s: ", mr->name);
0197
0198 if (mr->region_private)
0199 ttm_resource_manager_debug(mr->region_private, printer);
0200 else
0201 drm_printf(printer, "total:%pa bytes\n", &mr->total);
0202 }
0203
0204 static int intel_memory_region_memtest(struct intel_memory_region *mem,
0205 void *caller)
0206 {
0207 struct drm_i915_private *i915 = mem->i915;
0208 int err = 0;
0209
0210 if (!mem->io_start)
0211 return 0;
0212
0213 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
0214 err = iomemtest(mem, i915->params.memtest, caller);
0215
0216 return err;
0217 }
0218
0219 struct intel_memory_region *
0220 intel_memory_region_create(struct drm_i915_private *i915,
0221 resource_size_t start,
0222 resource_size_t size,
0223 resource_size_t min_page_size,
0224 resource_size_t io_start,
0225 resource_size_t io_size,
0226 u16 type,
0227 u16 instance,
0228 const struct intel_memory_region_ops *ops)
0229 {
0230 struct intel_memory_region *mem;
0231 int err;
0232
0233 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
0234 if (!mem)
0235 return ERR_PTR(-ENOMEM);
0236
0237 mem->i915 = i915;
0238 mem->region = (struct resource)DEFINE_RES_MEM(start, size);
0239 mem->io_start = io_start;
0240 mem->io_size = io_size;
0241 mem->min_page_size = min_page_size;
0242 mem->ops = ops;
0243 mem->total = size;
0244 mem->type = type;
0245 mem->instance = instance;
0246
0247 mutex_init(&mem->objects.lock);
0248 INIT_LIST_HEAD(&mem->objects.list);
0249
0250 if (ops->init) {
0251 err = ops->init(mem);
0252 if (err)
0253 goto err_free;
0254 }
0255
0256 err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
0257 if (err)
0258 goto err_release;
0259
0260 return mem;
0261
0262 err_release:
0263 if (mem->ops->release)
0264 mem->ops->release(mem);
0265 err_free:
0266 kfree(mem);
0267 return ERR_PTR(err);
0268 }
0269
0270 void intel_memory_region_set_name(struct intel_memory_region *mem,
0271 const char *fmt, ...)
0272 {
0273 va_list ap;
0274
0275 va_start(ap, fmt);
0276 vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
0277 va_end(ap);
0278 }
0279
0280 void intel_memory_region_avail(struct intel_memory_region *mr,
0281 u64 *avail, u64 *visible_avail)
0282 {
0283 if (mr->type == INTEL_MEMORY_LOCAL) {
0284 i915_ttm_buddy_man_avail(mr->region_private,
0285 avail, visible_avail);
0286 *avail <<= PAGE_SHIFT;
0287 *visible_avail <<= PAGE_SHIFT;
0288 } else {
0289 *avail = mr->total;
0290 *visible_avail = mr->total;
0291 }
0292 }
0293
0294 void intel_memory_region_destroy(struct intel_memory_region *mem)
0295 {
0296 int ret = 0;
0297
0298 if (mem->ops->release)
0299 ret = mem->ops->release(mem);
0300
0301 GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
0302 mutex_destroy(&mem->objects.lock);
0303 if (!ret)
0304 kfree(mem);
0305 }
0306
0307
0308
0309 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
0310 {
0311 int err, i;
0312
0313 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
0314 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
0315 u16 type, instance;
0316
0317 if (!HAS_REGION(i915, BIT(i)))
0318 continue;
0319
0320 type = intel_region_map[i].class;
0321 instance = intel_region_map[i].instance;
0322 switch (type) {
0323 case INTEL_MEMORY_SYSTEM:
0324 if (IS_DGFX(i915))
0325 mem = i915_gem_ttm_system_setup(i915, type,
0326 instance);
0327 else
0328 mem = i915_gem_shmem_setup(i915, type,
0329 instance);
0330 break;
0331 case INTEL_MEMORY_STOLEN_LOCAL:
0332 mem = i915_gem_stolen_lmem_setup(i915, type, instance);
0333 if (!IS_ERR(mem))
0334 i915->mm.stolen_region = mem;
0335 break;
0336 case INTEL_MEMORY_STOLEN_SYSTEM:
0337 mem = i915_gem_stolen_smem_setup(i915, type, instance);
0338 if (!IS_ERR(mem))
0339 i915->mm.stolen_region = mem;
0340 break;
0341 default:
0342 continue;
0343 }
0344
0345 if (IS_ERR(mem)) {
0346 err = PTR_ERR(mem);
0347 drm_err(&i915->drm,
0348 "Failed to setup region(%d) type=%d\n",
0349 err, type);
0350 goto out_cleanup;
0351 }
0352
0353 mem->id = i;
0354 i915->mm.regions[i] = mem;
0355 }
0356
0357 return 0;
0358
0359 out_cleanup:
0360 intel_memory_regions_driver_release(i915);
0361 return err;
0362 }
0363
0364 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
0365 {
0366 int i;
0367
0368 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
0369 struct intel_memory_region *region =
0370 fetch_and_zero(&i915->mm.regions[i]);
0371
0372 if (region)
0373 intel_memory_region_destroy(region);
0374 }
0375 }
0376
0377 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0378 #include "selftests/intel_memory_region.c"
0379 #include "selftests/mock_region.c"
0380 #endif