0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/dma-mapping.h>
0034 #include <linux/pagemap.h>
0035 #include <linux/pci.h>
0036 #include <linux/seq_file.h>
0037 #include <linux/slab.h>
0038 #include <linux/swap.h>
0039 #include <linux/swiotlb.h>
0040
0041 #include <drm/drm_device.h>
0042 #include <drm/drm_file.h>
0043 #include <drm/drm_prime.h>
0044 #include <drm/radeon_drm.h>
0045 #include <drm/ttm/ttm_bo_api.h>
0046 #include <drm/ttm/ttm_bo_driver.h>
0047 #include <drm/ttm/ttm_placement.h>
0048 #include <drm/ttm/ttm_range_manager.h>
0049
0050 #include "radeon_reg.h"
0051 #include "radeon.h"
0052 #include "radeon_ttm.h"
0053
0054 static void radeon_ttm_debugfs_init(struct radeon_device *rdev);
0055
0056 static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
0057 struct ttm_resource *bo_mem);
0058 static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
0059
0060 struct radeon_device *radeon_get_rdev(struct ttm_device *bdev)
0061 {
0062 struct radeon_mman *mman;
0063 struct radeon_device *rdev;
0064
0065 mman = container_of(bdev, struct radeon_mman, bdev);
0066 rdev = container_of(mman, struct radeon_device, mman);
0067 return rdev;
0068 }
0069
0070 static int radeon_ttm_init_vram(struct radeon_device *rdev)
0071 {
0072 return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM,
0073 false, rdev->mc.real_vram_size >> PAGE_SHIFT);
0074 }
0075
0076 static int radeon_ttm_init_gtt(struct radeon_device *rdev)
0077 {
0078 return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT,
0079 true, rdev->mc.gtt_size >> PAGE_SHIFT);
0080 }
0081
0082 static void radeon_evict_flags(struct ttm_buffer_object *bo,
0083 struct ttm_placement *placement)
0084 {
0085 static const struct ttm_place placements = {
0086 .fpfn = 0,
0087 .lpfn = 0,
0088 .mem_type = TTM_PL_SYSTEM,
0089 .flags = 0
0090 };
0091
0092 struct radeon_bo *rbo;
0093
0094 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
0095 placement->placement = &placements;
0096 placement->busy_placement = &placements;
0097 placement->num_placement = 1;
0098 placement->num_busy_placement = 1;
0099 return;
0100 }
0101 rbo = container_of(bo, struct radeon_bo, tbo);
0102 switch (bo->resource->mem_type) {
0103 case TTM_PL_VRAM:
0104 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
0105 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
0106 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
0107 bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
0108 unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
0109 int i;
0110
0111
0112
0113
0114
0115
0116 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
0117 RADEON_GEM_DOMAIN_GTT);
0118 rbo->placement.num_busy_placement = 0;
0119 for (i = 0; i < rbo->placement.num_placement; i++) {
0120 if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
0121 if (rbo->placements[i].fpfn < fpfn)
0122 rbo->placements[i].fpfn = fpfn;
0123 } else {
0124 rbo->placement.busy_placement =
0125 &rbo->placements[i];
0126 rbo->placement.num_busy_placement = 1;
0127 }
0128 }
0129 } else
0130 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
0131 break;
0132 case TTM_PL_TT:
0133 default:
0134 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
0135 }
0136 *placement = rbo->placement;
0137 }
0138
0139 static int radeon_move_blit(struct ttm_buffer_object *bo,
0140 bool evict,
0141 struct ttm_resource *new_mem,
0142 struct ttm_resource *old_mem)
0143 {
0144 struct radeon_device *rdev;
0145 uint64_t old_start, new_start;
0146 struct radeon_fence *fence;
0147 unsigned num_pages;
0148 int r, ridx;
0149
0150 rdev = radeon_get_rdev(bo->bdev);
0151 ridx = radeon_copy_ring_index(rdev);
0152 old_start = (u64)old_mem->start << PAGE_SHIFT;
0153 new_start = (u64)new_mem->start << PAGE_SHIFT;
0154
0155 switch (old_mem->mem_type) {
0156 case TTM_PL_VRAM:
0157 old_start += rdev->mc.vram_start;
0158 break;
0159 case TTM_PL_TT:
0160 old_start += rdev->mc.gtt_start;
0161 break;
0162 default:
0163 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
0164 return -EINVAL;
0165 }
0166 switch (new_mem->mem_type) {
0167 case TTM_PL_VRAM:
0168 new_start += rdev->mc.vram_start;
0169 break;
0170 case TTM_PL_TT:
0171 new_start += rdev->mc.gtt_start;
0172 break;
0173 default:
0174 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
0175 return -EINVAL;
0176 }
0177 if (!rdev->ring[ridx].ready) {
0178 DRM_ERROR("Trying to move memory with ring turned off.\n");
0179 return -EINVAL;
0180 }
0181
0182 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
0183
0184 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
0185 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
0186 if (IS_ERR(fence))
0187 return PTR_ERR(fence);
0188
0189 r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
0190 radeon_fence_unref(&fence);
0191 return r;
0192 }
0193
0194 static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
0195 struct ttm_operation_ctx *ctx,
0196 struct ttm_resource *new_mem,
0197 struct ttm_place *hop)
0198 {
0199 struct ttm_resource *old_mem = bo->resource;
0200 struct radeon_device *rdev;
0201 struct radeon_bo *rbo;
0202 int r;
0203
0204 if (new_mem->mem_type == TTM_PL_TT) {
0205 r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
0206 if (r)
0207 return r;
0208 }
0209
0210 r = ttm_bo_wait_ctx(bo, ctx);
0211 if (r)
0212 return r;
0213
0214
0215 rbo = container_of(bo, struct radeon_bo, tbo);
0216 if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
0217 return -EINVAL;
0218
0219 rdev = radeon_get_rdev(bo->bdev);
0220 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
0221 ttm_bo_move_null(bo, new_mem);
0222 goto out;
0223 }
0224 if (old_mem->mem_type == TTM_PL_SYSTEM &&
0225 new_mem->mem_type == TTM_PL_TT) {
0226 ttm_bo_move_null(bo, new_mem);
0227 goto out;
0228 }
0229
0230 if (old_mem->mem_type == TTM_PL_TT &&
0231 new_mem->mem_type == TTM_PL_SYSTEM) {
0232 radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
0233 ttm_resource_free(bo, &bo->resource);
0234 ttm_bo_assign_mem(bo, new_mem);
0235 goto out;
0236 }
0237 if (rdev->ring[radeon_copy_ring_index(rdev)].ready &&
0238 rdev->asic->copy.copy != NULL) {
0239 if ((old_mem->mem_type == TTM_PL_SYSTEM &&
0240 new_mem->mem_type == TTM_PL_VRAM) ||
0241 (old_mem->mem_type == TTM_PL_VRAM &&
0242 new_mem->mem_type == TTM_PL_SYSTEM)) {
0243 hop->fpfn = 0;
0244 hop->lpfn = 0;
0245 hop->mem_type = TTM_PL_TT;
0246 hop->flags = 0;
0247 return -EMULTIHOP;
0248 }
0249
0250 r = radeon_move_blit(bo, evict, new_mem, old_mem);
0251 } else {
0252 r = -ENODEV;
0253 }
0254
0255 if (r) {
0256 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
0257 if (r)
0258 return r;
0259 }
0260
0261 out:
0262
0263 atomic64_add(bo->base.size, &rdev->num_bytes_moved);
0264 radeon_bo_move_notify(bo);
0265 return 0;
0266 }
0267
0268 static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
0269 {
0270 struct radeon_device *rdev = radeon_get_rdev(bdev);
0271 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
0272
0273 switch (mem->mem_type) {
0274 case TTM_PL_SYSTEM:
0275
0276 return 0;
0277 case TTM_PL_TT:
0278 #if IS_ENABLED(CONFIG_AGP)
0279 if (rdev->flags & RADEON_IS_AGP) {
0280
0281 mem->bus.offset = (mem->start << PAGE_SHIFT) +
0282 rdev->mc.agp_base;
0283 mem->bus.is_iomem = !rdev->agp->cant_use_aperture;
0284 mem->bus.caching = ttm_write_combined;
0285 }
0286 #endif
0287 break;
0288 case TTM_PL_VRAM:
0289 mem->bus.offset = mem->start << PAGE_SHIFT;
0290
0291 if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size)
0292 return -EINVAL;
0293 mem->bus.offset += rdev->mc.aper_base;
0294 mem->bus.is_iomem = true;
0295 mem->bus.caching = ttm_write_combined;
0296 #ifdef __alpha__
0297
0298
0299
0300
0301 mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size);
0302 if (!mem->bus.addr)
0303 return -ENOMEM;
0304
0305
0306
0307
0308
0309
0310
0311 mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
0312 rdev->hose->dense_mem_base;
0313 #endif
0314 break;
0315 default:
0316 return -EINVAL;
0317 }
0318 return 0;
0319 }
0320
0321
0322
0323
0324 struct radeon_ttm_tt {
0325 struct ttm_tt ttm;
0326 u64 offset;
0327
0328 uint64_t userptr;
0329 struct mm_struct *usermm;
0330 uint32_t userflags;
0331 bool bound;
0332 };
0333
0334
0335 static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
0336 {
0337 struct radeon_device *rdev = radeon_get_rdev(bdev);
0338 struct radeon_ttm_tt *gtt = (void *)ttm;
0339 unsigned pinned = 0;
0340 int r;
0341
0342 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
0343 enum dma_data_direction direction = write ?
0344 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
0345
0346 if (current->mm != gtt->usermm)
0347 return -EPERM;
0348
0349 if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
0350
0351
0352 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
0353 struct vm_area_struct *vma;
0354 vma = find_vma(gtt->usermm, gtt->userptr);
0355 if (!vma || vma->vm_file || vma->vm_end < end)
0356 return -EPERM;
0357 }
0358
0359 do {
0360 unsigned num_pages = ttm->num_pages - pinned;
0361 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
0362 struct page **pages = ttm->pages + pinned;
0363
0364 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
0365 pages, NULL);
0366 if (r < 0)
0367 goto release_pages;
0368
0369 pinned += r;
0370
0371 } while (pinned < ttm->num_pages);
0372
0373 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
0374 (u64)ttm->num_pages << PAGE_SHIFT,
0375 GFP_KERNEL);
0376 if (r)
0377 goto release_sg;
0378
0379 r = dma_map_sgtable(rdev->dev, ttm->sg, direction, 0);
0380 if (r)
0381 goto release_sg;
0382
0383 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
0384 ttm->num_pages);
0385
0386 return 0;
0387
0388 release_sg:
0389 kfree(ttm->sg);
0390
0391 release_pages:
0392 release_pages(ttm->pages, pinned);
0393 return r;
0394 }
0395
0396 static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
0397 {
0398 struct radeon_device *rdev = radeon_get_rdev(bdev);
0399 struct radeon_ttm_tt *gtt = (void *)ttm;
0400 struct sg_page_iter sg_iter;
0401
0402 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
0403 enum dma_data_direction direction = write ?
0404 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
0405
0406
0407 if (!ttm->sg || !ttm->sg->sgl)
0408 return;
0409
0410
0411 dma_unmap_sgtable(rdev->dev, ttm->sg, direction, 0);
0412
0413 for_each_sgtable_page(ttm->sg, &sg_iter, 0) {
0414 struct page *page = sg_page_iter_page(&sg_iter);
0415 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
0416 set_page_dirty(page);
0417
0418 mark_page_accessed(page);
0419 put_page(page);
0420 }
0421
0422 sg_free_table(ttm->sg);
0423 }
0424
0425 static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
0426 {
0427 struct radeon_ttm_tt *gtt = (void*)ttm;
0428
0429 return (gtt->bound);
0430 }
0431
0432 static int radeon_ttm_backend_bind(struct ttm_device *bdev,
0433 struct ttm_tt *ttm,
0434 struct ttm_resource *bo_mem)
0435 {
0436 struct radeon_ttm_tt *gtt = (void*)ttm;
0437 struct radeon_device *rdev = radeon_get_rdev(bdev);
0438 uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
0439 RADEON_GART_PAGE_WRITE;
0440 int r;
0441
0442 if (gtt->bound)
0443 return 0;
0444
0445 if (gtt->userptr) {
0446 radeon_ttm_tt_pin_userptr(bdev, ttm);
0447 flags &= ~RADEON_GART_PAGE_WRITE;
0448 }
0449
0450 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
0451 if (!ttm->num_pages) {
0452 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
0453 ttm->num_pages, bo_mem, ttm);
0454 }
0455 if (ttm->caching == ttm_cached)
0456 flags |= RADEON_GART_PAGE_SNOOP;
0457 r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
0458 ttm->pages, gtt->ttm.dma_address, flags);
0459 if (r) {
0460 DRM_ERROR("failed to bind %u pages at 0x%08X\n",
0461 ttm->num_pages, (unsigned)gtt->offset);
0462 return r;
0463 }
0464 gtt->bound = true;
0465 return 0;
0466 }
0467
0468 static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
0469 {
0470 struct radeon_ttm_tt *gtt = (void *)ttm;
0471 struct radeon_device *rdev = radeon_get_rdev(bdev);
0472
0473 if (gtt->userptr)
0474 radeon_ttm_tt_unpin_userptr(bdev, ttm);
0475
0476 if (!gtt->bound)
0477 return;
0478
0479 radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
0480
0481 gtt->bound = false;
0482 }
0483
0484 static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
0485 {
0486 struct radeon_ttm_tt *gtt = (void *)ttm;
0487
0488 ttm_tt_fini(>t->ttm);
0489 kfree(gtt);
0490 }
0491
0492 static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
0493 uint32_t page_flags)
0494 {
0495 struct radeon_ttm_tt *gtt;
0496 enum ttm_caching caching;
0497 struct radeon_bo *rbo;
0498 #if IS_ENABLED(CONFIG_AGP)
0499 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
0500
0501 if (rdev->flags & RADEON_IS_AGP) {
0502 return ttm_agp_tt_create(bo, rdev->agp->bridge, page_flags);
0503 }
0504 #endif
0505 rbo = container_of(bo, struct radeon_bo, tbo);
0506
0507 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
0508 if (gtt == NULL) {
0509 return NULL;
0510 }
0511
0512 if (rbo->flags & RADEON_GEM_GTT_UC)
0513 caching = ttm_uncached;
0514 else if (rbo->flags & RADEON_GEM_GTT_WC)
0515 caching = ttm_write_combined;
0516 else
0517 caching = ttm_cached;
0518
0519 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
0520 kfree(gtt);
0521 return NULL;
0522 }
0523 return >t->ttm;
0524 }
0525
0526 static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
0527 struct ttm_tt *ttm)
0528 {
0529 #if IS_ENABLED(CONFIG_AGP)
0530 if (rdev->flags & RADEON_IS_AGP)
0531 return NULL;
0532 #endif
0533
0534 if (!ttm)
0535 return NULL;
0536 return container_of(ttm, struct radeon_ttm_tt, ttm);
0537 }
0538
0539 static int radeon_ttm_tt_populate(struct ttm_device *bdev,
0540 struct ttm_tt *ttm,
0541 struct ttm_operation_ctx *ctx)
0542 {
0543 struct radeon_device *rdev = radeon_get_rdev(bdev);
0544 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
0545 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
0546
0547 if (gtt && gtt->userptr) {
0548 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
0549 if (!ttm->sg)
0550 return -ENOMEM;
0551
0552 ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
0553 return 0;
0554 }
0555
0556 if (slave && ttm->sg) {
0557 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
0558 ttm->num_pages);
0559 return 0;
0560 }
0561
0562 return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
0563 }
0564
0565 static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
0566 {
0567 struct radeon_device *rdev = radeon_get_rdev(bdev);
0568 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
0569 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
0570
0571 radeon_ttm_tt_unbind(bdev, ttm);
0572
0573 if (gtt && gtt->userptr) {
0574 kfree(ttm->sg);
0575 ttm->page_flags &= ~TTM_TT_FLAG_EXTERNAL;
0576 return;
0577 }
0578
0579 if (slave)
0580 return;
0581
0582 return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
0583 }
0584
0585 int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
0586 struct ttm_tt *ttm, uint64_t addr,
0587 uint32_t flags)
0588 {
0589 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
0590
0591 if (gtt == NULL)
0592 return -EINVAL;
0593
0594 gtt->userptr = addr;
0595 gtt->usermm = current->mm;
0596 gtt->userflags = flags;
0597 return 0;
0598 }
0599
0600 bool radeon_ttm_tt_is_bound(struct ttm_device *bdev,
0601 struct ttm_tt *ttm)
0602 {
0603 #if IS_ENABLED(CONFIG_AGP)
0604 struct radeon_device *rdev = radeon_get_rdev(bdev);
0605 if (rdev->flags & RADEON_IS_AGP)
0606 return ttm_agp_is_bound(ttm);
0607 #endif
0608 return radeon_ttm_backend_is_bound(ttm);
0609 }
0610
0611 static int radeon_ttm_tt_bind(struct ttm_device *bdev,
0612 struct ttm_tt *ttm,
0613 struct ttm_resource *bo_mem)
0614 {
0615 #if IS_ENABLED(CONFIG_AGP)
0616 struct radeon_device *rdev = radeon_get_rdev(bdev);
0617 #endif
0618
0619 if (!bo_mem)
0620 return -EINVAL;
0621 #if IS_ENABLED(CONFIG_AGP)
0622 if (rdev->flags & RADEON_IS_AGP)
0623 return ttm_agp_bind(ttm, bo_mem);
0624 #endif
0625
0626 return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
0627 }
0628
0629 static void radeon_ttm_tt_unbind(struct ttm_device *bdev,
0630 struct ttm_tt *ttm)
0631 {
0632 #if IS_ENABLED(CONFIG_AGP)
0633 struct radeon_device *rdev = radeon_get_rdev(bdev);
0634
0635 if (rdev->flags & RADEON_IS_AGP) {
0636 ttm_agp_unbind(ttm);
0637 return;
0638 }
0639 #endif
0640 radeon_ttm_backend_unbind(bdev, ttm);
0641 }
0642
0643 static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
0644 struct ttm_tt *ttm)
0645 {
0646 #if IS_ENABLED(CONFIG_AGP)
0647 struct radeon_device *rdev = radeon_get_rdev(bdev);
0648
0649 if (rdev->flags & RADEON_IS_AGP) {
0650 ttm_agp_destroy(ttm);
0651 return;
0652 }
0653 #endif
0654 radeon_ttm_backend_destroy(bdev, ttm);
0655 }
0656
0657 bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev,
0658 struct ttm_tt *ttm)
0659 {
0660 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
0661
0662 if (gtt == NULL)
0663 return false;
0664
0665 return !!gtt->userptr;
0666 }
0667
0668 bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
0669 struct ttm_tt *ttm)
0670 {
0671 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
0672
0673 if (gtt == NULL)
0674 return false;
0675
0676 return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
0677 }
0678
0679 static struct ttm_device_funcs radeon_bo_driver = {
0680 .ttm_tt_create = &radeon_ttm_tt_create,
0681 .ttm_tt_populate = &radeon_ttm_tt_populate,
0682 .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
0683 .ttm_tt_destroy = &radeon_ttm_tt_destroy,
0684 .eviction_valuable = ttm_bo_eviction_valuable,
0685 .evict_flags = &radeon_evict_flags,
0686 .move = &radeon_bo_move,
0687 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
0688 };
0689
0690 int radeon_ttm_init(struct radeon_device *rdev)
0691 {
0692 int r;
0693
0694
0695 r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
0696 rdev->ddev->anon_inode->i_mapping,
0697 rdev->ddev->vma_offset_manager,
0698 rdev->need_swiotlb,
0699 dma_addressing_limited(&rdev->pdev->dev));
0700 if (r) {
0701 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
0702 return r;
0703 }
0704 rdev->mman.initialized = true;
0705
0706 r = radeon_ttm_init_vram(rdev);
0707 if (r) {
0708 DRM_ERROR("Failed initializing VRAM heap.\n");
0709 return r;
0710 }
0711
0712 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
0713
0714 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
0715 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
0716 NULL, &rdev->stolen_vga_memory);
0717 if (r) {
0718 return r;
0719 }
0720 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
0721 if (r)
0722 return r;
0723 r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
0724 radeon_bo_unreserve(rdev->stolen_vga_memory);
0725 if (r) {
0726 radeon_bo_unref(&rdev->stolen_vga_memory);
0727 return r;
0728 }
0729 DRM_INFO("radeon: %uM of VRAM memory ready\n",
0730 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
0731
0732 r = radeon_ttm_init_gtt(rdev);
0733 if (r) {
0734 DRM_ERROR("Failed initializing GTT heap.\n");
0735 return r;
0736 }
0737 DRM_INFO("radeon: %uM of GTT memory ready.\n",
0738 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
0739
0740 radeon_ttm_debugfs_init(rdev);
0741
0742 return 0;
0743 }
0744
0745 void radeon_ttm_fini(struct radeon_device *rdev)
0746 {
0747 int r;
0748
0749 if (!rdev->mman.initialized)
0750 return;
0751
0752 if (rdev->stolen_vga_memory) {
0753 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
0754 if (r == 0) {
0755 radeon_bo_unpin(rdev->stolen_vga_memory);
0756 radeon_bo_unreserve(rdev->stolen_vga_memory);
0757 }
0758 radeon_bo_unref(&rdev->stolen_vga_memory);
0759 }
0760 ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
0761 ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
0762 ttm_device_fini(&rdev->mman.bdev);
0763 radeon_gart_fini(rdev);
0764 rdev->mman.initialized = false;
0765 DRM_INFO("radeon: ttm finalized\n");
0766 }
0767
0768
0769
0770 void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
0771 {
0772 struct ttm_resource_manager *man;
0773
0774 if (!rdev->mman.initialized)
0775 return;
0776
0777 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
0778
0779 man->size = size >> PAGE_SHIFT;
0780 }
0781
0782 #if defined(CONFIG_DEBUG_FS)
0783
0784 static int radeon_ttm_page_pool_show(struct seq_file *m, void *data)
0785 {
0786 struct radeon_device *rdev = (struct radeon_device *)m->private;
0787
0788 return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
0789 }
0790
0791 DEFINE_SHOW_ATTRIBUTE(radeon_ttm_page_pool);
0792
0793 static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
0794 {
0795 struct radeon_device *rdev = inode->i_private;
0796 i_size_write(inode, rdev->mc.mc_vram_size);
0797 filep->private_data = inode->i_private;
0798 return 0;
0799 }
0800
0801 static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
0802 size_t size, loff_t *pos)
0803 {
0804 struct radeon_device *rdev = f->private_data;
0805 ssize_t result = 0;
0806 int r;
0807
0808 if (size & 0x3 || *pos & 0x3)
0809 return -EINVAL;
0810
0811 while (size) {
0812 unsigned long flags;
0813 uint32_t value;
0814
0815 if (*pos >= rdev->mc.mc_vram_size)
0816 return result;
0817
0818 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
0819 WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
0820 if (rdev->family >= CHIP_CEDAR)
0821 WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
0822 value = RREG32(RADEON_MM_DATA);
0823 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
0824
0825 r = put_user(value, (uint32_t __user *)buf);
0826 if (r)
0827 return r;
0828
0829 result += 4;
0830 buf += 4;
0831 *pos += 4;
0832 size -= 4;
0833 }
0834
0835 return result;
0836 }
0837
0838 static const struct file_operations radeon_ttm_vram_fops = {
0839 .owner = THIS_MODULE,
0840 .open = radeon_ttm_vram_open,
0841 .read = radeon_ttm_vram_read,
0842 .llseek = default_llseek
0843 };
0844
0845 static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
0846 {
0847 struct radeon_device *rdev = inode->i_private;
0848 i_size_write(inode, rdev->mc.gtt_size);
0849 filep->private_data = inode->i_private;
0850 return 0;
0851 }
0852
0853 static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
0854 size_t size, loff_t *pos)
0855 {
0856 struct radeon_device *rdev = f->private_data;
0857 ssize_t result = 0;
0858 int r;
0859
0860 while (size) {
0861 loff_t p = *pos / PAGE_SIZE;
0862 unsigned off = *pos & ~PAGE_MASK;
0863 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
0864 struct page *page;
0865 void *ptr;
0866
0867 if (p >= rdev->gart.num_cpu_pages)
0868 return result;
0869
0870 page = rdev->gart.pages[p];
0871 if (page) {
0872 ptr = kmap(page);
0873 ptr += off;
0874
0875 r = copy_to_user(buf, ptr, cur_size);
0876 kunmap(rdev->gart.pages[p]);
0877 } else
0878 r = clear_user(buf, cur_size);
0879
0880 if (r)
0881 return -EFAULT;
0882
0883 result += cur_size;
0884 buf += cur_size;
0885 *pos += cur_size;
0886 size -= cur_size;
0887 }
0888
0889 return result;
0890 }
0891
0892 static const struct file_operations radeon_ttm_gtt_fops = {
0893 .owner = THIS_MODULE,
0894 .open = radeon_ttm_gtt_open,
0895 .read = radeon_ttm_gtt_read,
0896 .llseek = default_llseek
0897 };
0898
0899 #endif
0900
0901 static void radeon_ttm_debugfs_init(struct radeon_device *rdev)
0902 {
0903 #if defined(CONFIG_DEBUG_FS)
0904 struct drm_minor *minor = rdev->ddev->primary;
0905 struct dentry *root = minor->debugfs_root;
0906
0907 debugfs_create_file("radeon_vram", 0444, root, rdev,
0908 &radeon_ttm_vram_fops);
0909 debugfs_create_file("radeon_gtt", 0444, root, rdev,
0910 &radeon_ttm_gtt_fops);
0911 debugfs_create_file("ttm_page_pool", 0444, root, rdev,
0912 &radeon_ttm_page_pool_fops);
0913 ttm_resource_manager_create_debugfs(ttm_manager_type(&rdev->mman.bdev,
0914 TTM_PL_VRAM),
0915 root, "radeon_vram_mm");
0916 ttm_resource_manager_create_debugfs(ttm_manager_type(&rdev->mman.bdev,
0917 TTM_PL_TT),
0918 root, "radeon_gtt_mm");
0919 #endif
0920 }