0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <drm/ttm/ttm_bo_driver.h>
0033 #include <drm/ttm/ttm_placement.h>
0034 #include <drm/drm_cache.h>
0035 #include <drm/drm_vma_manager.h>
0036 #include <linux/iosys-map.h>
0037 #include <linux/io.h>
0038 #include <linux/highmem.h>
0039 #include <linux/wait.h>
0040 #include <linux/slab.h>
0041 #include <linux/vmalloc.h>
0042 #include <linux/module.h>
0043 #include <linux/dma-resv.h>
0044
0045 struct ttm_transfer_obj {
0046 struct ttm_buffer_object base;
0047 struct ttm_buffer_object *bo;
0048 };
0049
0050 int ttm_mem_io_reserve(struct ttm_device *bdev,
0051 struct ttm_resource *mem)
0052 {
0053 if (mem->bus.offset || mem->bus.addr)
0054 return 0;
0055
0056 mem->bus.is_iomem = false;
0057 if (!bdev->funcs->io_mem_reserve)
0058 return 0;
0059
0060 return bdev->funcs->io_mem_reserve(bdev, mem);
0061 }
0062
0063 void ttm_mem_io_free(struct ttm_device *bdev,
0064 struct ttm_resource *mem)
0065 {
0066 if (!mem)
0067 return;
0068
0069 if (!mem->bus.offset && !mem->bus.addr)
0070 return;
0071
0072 if (bdev->funcs->io_mem_free)
0073 bdev->funcs->io_mem_free(bdev, mem);
0074
0075 mem->bus.offset = 0;
0076 mem->bus.addr = NULL;
0077 }
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 void ttm_move_memcpy(bool clear,
0090 u32 num_pages,
0091 struct ttm_kmap_iter *dst_iter,
0092 struct ttm_kmap_iter *src_iter)
0093 {
0094 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
0095 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
0096 struct iosys_map src_map, dst_map;
0097 pgoff_t i;
0098
0099
0100 if (dst_ops->maps_tt && src_ops->maps_tt)
0101 return;
0102
0103
0104 if (clear) {
0105 for (i = 0; i < num_pages; ++i) {
0106 dst_ops->map_local(dst_iter, &dst_map, i);
0107 if (dst_map.is_iomem)
0108 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
0109 else
0110 memset(dst_map.vaddr, 0, PAGE_SIZE);
0111 if (dst_ops->unmap_local)
0112 dst_ops->unmap_local(dst_iter, &dst_map);
0113 }
0114 return;
0115 }
0116
0117 for (i = 0; i < num_pages; ++i) {
0118 dst_ops->map_local(dst_iter, &dst_map, i);
0119 src_ops->map_local(src_iter, &src_map, i);
0120
0121 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
0122
0123 if (src_ops->unmap_local)
0124 src_ops->unmap_local(src_iter, &src_map);
0125 if (dst_ops->unmap_local)
0126 dst_ops->unmap_local(dst_iter, &dst_map);
0127 }
0128 }
0129 EXPORT_SYMBOL(ttm_move_memcpy);
0130
0131 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
0132 struct ttm_operation_ctx *ctx,
0133 struct ttm_resource *dst_mem)
0134 {
0135 struct ttm_device *bdev = bo->bdev;
0136 struct ttm_resource_manager *dst_man =
0137 ttm_manager_type(bo->bdev, dst_mem->mem_type);
0138 struct ttm_tt *ttm = bo->ttm;
0139 struct ttm_resource *src_mem = bo->resource;
0140 struct ttm_resource_manager *src_man =
0141 ttm_manager_type(bdev, src_mem->mem_type);
0142 union {
0143 struct ttm_kmap_iter_tt tt;
0144 struct ttm_kmap_iter_linear_io io;
0145 } _dst_iter, _src_iter;
0146 struct ttm_kmap_iter *dst_iter, *src_iter;
0147 bool clear;
0148 int ret = 0;
0149
0150 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
0151 dst_man->use_tt)) {
0152 ret = ttm_tt_populate(bdev, ttm, ctx);
0153 if (ret)
0154 return ret;
0155 }
0156
0157 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
0158 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
0159 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
0160 if (IS_ERR(dst_iter))
0161 return PTR_ERR(dst_iter);
0162
0163 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
0164 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
0165 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
0166 if (IS_ERR(src_iter)) {
0167 ret = PTR_ERR(src_iter);
0168 goto out_src_iter;
0169 }
0170
0171 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
0172 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
0173 ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
0174
0175 if (!src_iter->ops->maps_tt)
0176 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
0177 ttm_bo_move_sync_cleanup(bo, dst_mem);
0178
0179 out_src_iter:
0180 if (!dst_iter->ops->maps_tt)
0181 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
0182
0183 return ret;
0184 }
0185 EXPORT_SYMBOL(ttm_bo_move_memcpy);
0186
0187 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
0188 {
0189 struct ttm_transfer_obj *fbo;
0190
0191 fbo = container_of(bo, struct ttm_transfer_obj, base);
0192 dma_resv_fini(&fbo->base.base._resv);
0193 ttm_bo_put(fbo->bo);
0194 kfree(fbo);
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
0213 struct ttm_buffer_object **new_obj)
0214 {
0215 struct ttm_transfer_obj *fbo;
0216 int ret;
0217
0218 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
0219 if (!fbo)
0220 return -ENOMEM;
0221
0222 fbo->base = *bo;
0223
0224
0225
0226
0227
0228
0229 atomic_inc(&ttm_glob.bo_count);
0230 INIT_LIST_HEAD(&fbo->base.ddestroy);
0231 drm_vma_node_reset(&fbo->base.base.vma_node);
0232
0233 kref_init(&fbo->base.kref);
0234 fbo->base.destroy = &ttm_transfered_destroy;
0235 fbo->base.pin_count = 0;
0236 if (bo->type != ttm_bo_type_sg)
0237 fbo->base.base.resv = &fbo->base.base._resv;
0238
0239 dma_resv_init(&fbo->base.base._resv);
0240 fbo->base.base.dev = NULL;
0241 ret = dma_resv_trylock(&fbo->base.base._resv);
0242 WARN_ON(!ret);
0243
0244 if (fbo->base.resource) {
0245 ttm_resource_set_bo(fbo->base.resource, &fbo->base);
0246 bo->resource = NULL;
0247 ttm_bo_set_bulk_move(&fbo->base, NULL);
0248 } else {
0249 fbo->base.bulk_move = NULL;
0250 }
0251
0252 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
0253 if (ret) {
0254 kfree(fbo);
0255 return ret;
0256 }
0257
0258 ttm_bo_get(bo);
0259 fbo->bo = bo;
0260
0261 ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
0262
0263 *new_obj = &fbo->base;
0264 return 0;
0265 }
0266
0267 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
0268 pgprot_t tmp)
0269 {
0270 struct ttm_resource_manager *man;
0271 enum ttm_caching caching;
0272
0273 man = ttm_manager_type(bo->bdev, res->mem_type);
0274 caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
0275
0276 return ttm_prot_from_caching(caching, tmp);
0277 }
0278 EXPORT_SYMBOL(ttm_io_prot);
0279
0280 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
0281 unsigned long offset,
0282 unsigned long size,
0283 struct ttm_bo_kmap_obj *map)
0284 {
0285 struct ttm_resource *mem = bo->resource;
0286
0287 if (bo->resource->bus.addr) {
0288 map->bo_kmap_type = ttm_bo_map_premapped;
0289 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
0290 } else {
0291 resource_size_t res = bo->resource->bus.offset + offset;
0292
0293 map->bo_kmap_type = ttm_bo_map_iomap;
0294 if (mem->bus.caching == ttm_write_combined)
0295 map->virtual = ioremap_wc(res, size);
0296 #ifdef CONFIG_X86
0297 else if (mem->bus.caching == ttm_cached)
0298 map->virtual = ioremap_cache(res, size);
0299 #endif
0300 else
0301 map->virtual = ioremap(res, size);
0302 }
0303 return (!map->virtual) ? -ENOMEM : 0;
0304 }
0305
0306 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
0307 unsigned long start_page,
0308 unsigned long num_pages,
0309 struct ttm_bo_kmap_obj *map)
0310 {
0311 struct ttm_resource *mem = bo->resource;
0312 struct ttm_operation_ctx ctx = {
0313 .interruptible = false,
0314 .no_wait_gpu = false
0315 };
0316 struct ttm_tt *ttm = bo->ttm;
0317 pgprot_t prot;
0318 int ret;
0319
0320 BUG_ON(!ttm);
0321
0322 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
0323 if (ret)
0324 return ret;
0325
0326 if (num_pages == 1 && ttm->caching == ttm_cached) {
0327
0328
0329
0330
0331
0332 map->bo_kmap_type = ttm_bo_map_kmap;
0333 map->page = ttm->pages[start_page];
0334 map->virtual = kmap(map->page);
0335 } else {
0336
0337
0338
0339
0340 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
0341 map->bo_kmap_type = ttm_bo_map_vmap;
0342 map->virtual = vmap(ttm->pages + start_page, num_pages,
0343 0, prot);
0344 }
0345 return (!map->virtual) ? -ENOMEM : 0;
0346 }
0347
0348 int ttm_bo_kmap(struct ttm_buffer_object *bo,
0349 unsigned long start_page, unsigned long num_pages,
0350 struct ttm_bo_kmap_obj *map)
0351 {
0352 unsigned long offset, size;
0353 int ret;
0354
0355 map->virtual = NULL;
0356 map->bo = bo;
0357 if (num_pages > bo->resource->num_pages)
0358 return -EINVAL;
0359 if ((start_page + num_pages) > bo->resource->num_pages)
0360 return -EINVAL;
0361
0362 ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
0363 if (ret)
0364 return ret;
0365 if (!bo->resource->bus.is_iomem) {
0366 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
0367 } else {
0368 offset = start_page << PAGE_SHIFT;
0369 size = num_pages << PAGE_SHIFT;
0370 return ttm_bo_ioremap(bo, offset, size, map);
0371 }
0372 }
0373 EXPORT_SYMBOL(ttm_bo_kmap);
0374
0375 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
0376 {
0377 if (!map->virtual)
0378 return;
0379 switch (map->bo_kmap_type) {
0380 case ttm_bo_map_iomap:
0381 iounmap(map->virtual);
0382 break;
0383 case ttm_bo_map_vmap:
0384 vunmap(map->virtual);
0385 break;
0386 case ttm_bo_map_kmap:
0387 kunmap(map->page);
0388 break;
0389 case ttm_bo_map_premapped:
0390 break;
0391 default:
0392 BUG();
0393 }
0394 ttm_mem_io_free(map->bo->bdev, map->bo->resource);
0395 map->virtual = NULL;
0396 map->page = NULL;
0397 }
0398 EXPORT_SYMBOL(ttm_bo_kunmap);
0399
0400 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
0401 {
0402 struct ttm_resource *mem = bo->resource;
0403 int ret;
0404
0405 ret = ttm_mem_io_reserve(bo->bdev, mem);
0406 if (ret)
0407 return ret;
0408
0409 if (mem->bus.is_iomem) {
0410 void __iomem *vaddr_iomem;
0411
0412 if (mem->bus.addr)
0413 vaddr_iomem = (void __iomem *)mem->bus.addr;
0414 else if (mem->bus.caching == ttm_write_combined)
0415 vaddr_iomem = ioremap_wc(mem->bus.offset,
0416 bo->base.size);
0417 #ifdef CONFIG_X86
0418 else if (mem->bus.caching == ttm_cached)
0419 vaddr_iomem = ioremap_cache(mem->bus.offset,
0420 bo->base.size);
0421 #endif
0422 else
0423 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
0424
0425 if (!vaddr_iomem)
0426 return -ENOMEM;
0427
0428 iosys_map_set_vaddr_iomem(map, vaddr_iomem);
0429
0430 } else {
0431 struct ttm_operation_ctx ctx = {
0432 .interruptible = false,
0433 .no_wait_gpu = false
0434 };
0435 struct ttm_tt *ttm = bo->ttm;
0436 pgprot_t prot;
0437 void *vaddr;
0438
0439 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
0440 if (ret)
0441 return ret;
0442
0443
0444
0445
0446
0447 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
0448 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
0449 if (!vaddr)
0450 return -ENOMEM;
0451
0452 iosys_map_set_vaddr(map, vaddr);
0453 }
0454
0455 return 0;
0456 }
0457 EXPORT_SYMBOL(ttm_bo_vmap);
0458
0459 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
0460 {
0461 struct ttm_resource *mem = bo->resource;
0462
0463 if (iosys_map_is_null(map))
0464 return;
0465
0466 if (!map->is_iomem)
0467 vunmap(map->vaddr);
0468 else if (!mem->bus.addr)
0469 iounmap(map->vaddr_iomem);
0470 iosys_map_clear(map);
0471
0472 ttm_mem_io_free(bo->bdev, bo->resource);
0473 }
0474 EXPORT_SYMBOL(ttm_bo_vunmap);
0475
0476 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
0477 bool dst_use_tt)
0478 {
0479 int ret;
0480 ret = ttm_bo_wait(bo, false, false);
0481 if (ret)
0482 return ret;
0483
0484 if (!dst_use_tt)
0485 ttm_bo_tt_destroy(bo);
0486 ttm_resource_free(bo, &bo->resource);
0487 return 0;
0488 }
0489
0490 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
0491 struct dma_fence *fence,
0492 bool dst_use_tt)
0493 {
0494 struct ttm_buffer_object *ghost_obj;
0495 int ret;
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
0506 if (ret)
0507 return ret;
0508
0509 dma_resv_add_fence(&ghost_obj->base._resv, fence,
0510 DMA_RESV_USAGE_KERNEL);
0511
0512
0513
0514
0515
0516
0517
0518 if (dst_use_tt)
0519 ghost_obj->ttm = NULL;
0520 else
0521 bo->ttm = NULL;
0522
0523 dma_resv_unlock(&ghost_obj->base._resv);
0524 ttm_bo_put(ghost_obj);
0525 return 0;
0526 }
0527
0528 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
0529 struct dma_fence *fence)
0530 {
0531 struct ttm_device *bdev = bo->bdev;
0532 struct ttm_resource_manager *from;
0533
0534 from = ttm_manager_type(bdev, bo->resource->mem_type);
0535
0536
0537
0538
0539
0540 spin_lock(&from->move_lock);
0541 if (!from->move || dma_fence_is_later(fence, from->move)) {
0542 dma_fence_put(from->move);
0543 from->move = dma_fence_get(fence);
0544 }
0545 spin_unlock(&from->move_lock);
0546
0547 ttm_resource_free(bo, &bo->resource);
0548 }
0549
0550 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
0551 struct dma_fence *fence,
0552 bool evict,
0553 bool pipeline,
0554 struct ttm_resource *new_mem)
0555 {
0556 struct ttm_device *bdev = bo->bdev;
0557 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
0558 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
0559 int ret = 0;
0560
0561 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
0562 if (!evict)
0563 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
0564 else if (!from->use_tt && pipeline)
0565 ttm_bo_move_pipeline_evict(bo, fence);
0566 else
0567 ret = ttm_bo_wait_free_node(bo, man->use_tt);
0568
0569 if (ret)
0570 return ret;
0571
0572 ttm_bo_assign_mem(bo, new_mem);
0573
0574 return 0;
0575 }
0576 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
0577
0578 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
0579 struct ttm_resource *new_mem)
0580 {
0581 struct ttm_device *bdev = bo->bdev;
0582 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
0583 int ret;
0584
0585 ret = ttm_bo_wait_free_node(bo, man->use_tt);
0586 if (WARN_ON(ret))
0587 return;
0588
0589 ttm_bo_assign_mem(bo, new_mem);
0590 }
0591 EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
0605 {
0606 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
0607 struct ttm_buffer_object *ghost;
0608 struct ttm_resource *sys_res;
0609 struct ttm_tt *ttm;
0610 int ret;
0611
0612 ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
0613 if (ret)
0614 return ret;
0615
0616
0617 ret = ttm_bo_wait(bo, false, true);
0618 if (ret != -EBUSY) {
0619 if (!bo->ttm) {
0620
0621 ret = ttm_tt_create(bo, true);
0622 if (ret)
0623 goto error_free_sys_mem;
0624 } else {
0625 ttm_tt_unpopulate(bo->bdev, bo->ttm);
0626 if (bo->type == ttm_bo_type_device)
0627 ttm_tt_mark_for_clear(bo->ttm);
0628 }
0629 ttm_resource_free(bo, &bo->resource);
0630 ttm_bo_assign_mem(bo, sys_res);
0631 return 0;
0632 }
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642 ttm = bo->ttm;
0643 bo->ttm = NULL;
0644 ret = ttm_tt_create(bo, true);
0645 swap(bo->ttm, ttm);
0646 if (ret)
0647 goto error_free_sys_mem;
0648
0649 ret = ttm_buffer_object_transfer(bo, &ghost);
0650 if (ret)
0651 goto error_destroy_tt;
0652
0653 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
0654
0655 if (ret)
0656 ttm_bo_wait(bo, false, false);
0657
0658 dma_resv_unlock(&ghost->base._resv);
0659 ttm_bo_put(ghost);
0660 bo->ttm = ttm;
0661 ttm_bo_assign_mem(bo, sys_res);
0662 return 0;
0663
0664 error_destroy_tt:
0665 ttm_tt_destroy(bo->bdev, ttm);
0666
0667 error_free_sys_mem:
0668 ttm_resource_free(bo, &sys_res);
0669 return ret;
0670 }