0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include "vmwgfx_drv.h"
0029 #include <drm/ttm/ttm_bo_driver.h>
0030 #include <drm/ttm/ttm_placement.h>
0031
0032 static const struct ttm_place vram_placement_flags = {
0033 .fpfn = 0,
0034 .lpfn = 0,
0035 .mem_type = TTM_PL_VRAM,
0036 .flags = 0
0037 };
0038
0039 static const struct ttm_place sys_placement_flags = {
0040 .fpfn = 0,
0041 .lpfn = 0,
0042 .mem_type = TTM_PL_SYSTEM,
0043 .flags = 0
0044 };
0045
0046 static const struct ttm_place gmr_placement_flags = {
0047 .fpfn = 0,
0048 .lpfn = 0,
0049 .mem_type = VMW_PL_GMR,
0050 .flags = 0
0051 };
0052
0053 static const struct ttm_place mob_placement_flags = {
0054 .fpfn = 0,
0055 .lpfn = 0,
0056 .mem_type = VMW_PL_MOB,
0057 .flags = 0
0058 };
0059
0060 struct ttm_placement vmw_vram_placement = {
0061 .num_placement = 1,
0062 .placement = &vram_placement_flags,
0063 .num_busy_placement = 1,
0064 .busy_placement = &vram_placement_flags
0065 };
0066
0067 static const struct ttm_place vram_gmr_placement_flags[] = {
0068 {
0069 .fpfn = 0,
0070 .lpfn = 0,
0071 .mem_type = TTM_PL_VRAM,
0072 .flags = 0
0073 }, {
0074 .fpfn = 0,
0075 .lpfn = 0,
0076 .mem_type = VMW_PL_GMR,
0077 .flags = 0
0078 }
0079 };
0080
0081 static const struct ttm_place gmr_vram_placement_flags[] = {
0082 {
0083 .fpfn = 0,
0084 .lpfn = 0,
0085 .mem_type = VMW_PL_GMR,
0086 .flags = 0
0087 }, {
0088 .fpfn = 0,
0089 .lpfn = 0,
0090 .mem_type = TTM_PL_VRAM,
0091 .flags = 0
0092 }
0093 };
0094
0095 static const struct ttm_place vmw_sys_placement_flags = {
0096 .fpfn = 0,
0097 .lpfn = 0,
0098 .mem_type = VMW_PL_SYSTEM,
0099 .flags = 0
0100 };
0101
0102 struct ttm_placement vmw_vram_gmr_placement = {
0103 .num_placement = 2,
0104 .placement = vram_gmr_placement_flags,
0105 .num_busy_placement = 1,
0106 .busy_placement = &gmr_placement_flags
0107 };
0108
0109 struct ttm_placement vmw_vram_sys_placement = {
0110 .num_placement = 1,
0111 .placement = &vram_placement_flags,
0112 .num_busy_placement = 1,
0113 .busy_placement = &sys_placement_flags
0114 };
0115
0116 struct ttm_placement vmw_sys_placement = {
0117 .num_placement = 1,
0118 .placement = &sys_placement_flags,
0119 .num_busy_placement = 1,
0120 .busy_placement = &sys_placement_flags
0121 };
0122
0123 struct ttm_placement vmw_pt_sys_placement = {
0124 .num_placement = 1,
0125 .placement = &vmw_sys_placement_flags,
0126 .num_busy_placement = 1,
0127 .busy_placement = &vmw_sys_placement_flags
0128 };
0129
0130 static const struct ttm_place nonfixed_placement_flags[] = {
0131 {
0132 .fpfn = 0,
0133 .lpfn = 0,
0134 .mem_type = TTM_PL_SYSTEM,
0135 .flags = 0
0136 }, {
0137 .fpfn = 0,
0138 .lpfn = 0,
0139 .mem_type = VMW_PL_GMR,
0140 .flags = 0
0141 }, {
0142 .fpfn = 0,
0143 .lpfn = 0,
0144 .mem_type = VMW_PL_MOB,
0145 .flags = 0
0146 }
0147 };
0148
0149 struct ttm_placement vmw_srf_placement = {
0150 .num_placement = 1,
0151 .num_busy_placement = 2,
0152 .placement = &gmr_placement_flags,
0153 .busy_placement = gmr_vram_placement_flags
0154 };
0155
0156 struct ttm_placement vmw_mob_placement = {
0157 .num_placement = 1,
0158 .num_busy_placement = 1,
0159 .placement = &mob_placement_flags,
0160 .busy_placement = &mob_placement_flags
0161 };
0162
0163 struct ttm_placement vmw_nonfixed_placement = {
0164 .num_placement = 3,
0165 .placement = nonfixed_placement_flags,
0166 .num_busy_placement = 1,
0167 .busy_placement = &sys_placement_flags
0168 };
0169
0170 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
0183 {
0184 return ++(viter->i) < viter->num_pages;
0185 }
0186
0187 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
0188 {
0189 bool ret = __vmw_piter_non_sg_next(viter);
0190
0191 return __sg_page_iter_dma_next(&viter->iter) && ret;
0192 }
0193
0194
0195 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
0196 {
0197 return viter->addrs[viter->i];
0198 }
0199
0200 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
0201 {
0202 return sg_page_iter_dma_address(&viter->iter);
0203 }
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
0218 unsigned long p_offset)
0219 {
0220 viter->i = p_offset - 1;
0221 viter->num_pages = vsgt->num_pages;
0222 viter->pages = vsgt->pages;
0223 switch (vsgt->mode) {
0224 case vmw_dma_alloc_coherent:
0225 viter->next = &__vmw_piter_non_sg_next;
0226 viter->dma_address = &__vmw_piter_dma_addr;
0227 viter->addrs = vsgt->addrs;
0228 break;
0229 case vmw_dma_map_populate:
0230 case vmw_dma_map_bind:
0231 viter->next = &__vmw_piter_sg_next;
0232 viter->dma_address = &__vmw_piter_sg_addr;
0233 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
0234 vsgt->sgt->orig_nents, p_offset);
0235 break;
0236 default:
0237 BUG();
0238 }
0239 }
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
0250 {
0251 struct device *dev = vmw_tt->dev_priv->drm.dev;
0252
0253 dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
0254 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
0271 {
0272 struct device *dev = vmw_tt->dev_priv->drm.dev;
0273
0274 return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
0288 {
0289 struct vmw_private *dev_priv = vmw_tt->dev_priv;
0290 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
0291 int ret = 0;
0292
0293 if (vmw_tt->mapped)
0294 return 0;
0295
0296 vsgt->mode = dev_priv->map_mode;
0297 vsgt->pages = vmw_tt->dma_ttm.pages;
0298 vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
0299 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
0300 vsgt->sgt = NULL;
0301
0302 switch (dev_priv->map_mode) {
0303 case vmw_dma_map_bind:
0304 case vmw_dma_map_populate:
0305 vsgt->sgt = &vmw_tt->sgt;
0306 ret = sg_alloc_table_from_pages_segment(
0307 &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
0308 (unsigned long)vsgt->num_pages << PAGE_SHIFT,
0309 dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
0310 if (ret)
0311 goto out_sg_alloc_fail;
0312
0313 ret = vmw_ttm_map_for_dma(vmw_tt);
0314 if (unlikely(ret != 0))
0315 goto out_map_fail;
0316
0317 break;
0318 default:
0319 break;
0320 }
0321
0322 vmw_tt->mapped = true;
0323 return 0;
0324
0325 out_map_fail:
0326 sg_free_table(vmw_tt->vsgt.sgt);
0327 vmw_tt->vsgt.sgt = NULL;
0328 out_sg_alloc_fail:
0329 return ret;
0330 }
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
0342 {
0343 struct vmw_private *dev_priv = vmw_tt->dev_priv;
0344
0345 if (!vmw_tt->vsgt.sgt)
0346 return;
0347
0348 switch (dev_priv->map_mode) {
0349 case vmw_dma_map_bind:
0350 case vmw_dma_map_populate:
0351 vmw_ttm_unmap_from_dma(vmw_tt);
0352 sg_free_table(vmw_tt->vsgt.sgt);
0353 vmw_tt->vsgt.sgt = NULL;
0354 break;
0355 default:
0356 break;
0357 }
0358 vmw_tt->mapped = false;
0359 }
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
0373 {
0374 struct vmw_ttm_tt *vmw_tt =
0375 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
0376
0377 return &vmw_tt->vsgt;
0378 }
0379
0380
0381 static int vmw_ttm_bind(struct ttm_device *bdev,
0382 struct ttm_tt *ttm, struct ttm_resource *bo_mem)
0383 {
0384 struct vmw_ttm_tt *vmw_be =
0385 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
0386 int ret = 0;
0387
0388 if (!bo_mem)
0389 return -EINVAL;
0390
0391 if (vmw_be->bound)
0392 return 0;
0393
0394 ret = vmw_ttm_map_dma(vmw_be);
0395 if (unlikely(ret != 0))
0396 return ret;
0397
0398 vmw_be->gmr_id = bo_mem->start;
0399 vmw_be->mem_type = bo_mem->mem_type;
0400
0401 switch (bo_mem->mem_type) {
0402 case VMW_PL_GMR:
0403 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
0404 ttm->num_pages, vmw_be->gmr_id);
0405 break;
0406 case VMW_PL_MOB:
0407 if (unlikely(vmw_be->mob == NULL)) {
0408 vmw_be->mob =
0409 vmw_mob_create(ttm->num_pages);
0410 if (unlikely(vmw_be->mob == NULL))
0411 return -ENOMEM;
0412 }
0413
0414 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
0415 &vmw_be->vsgt, ttm->num_pages,
0416 vmw_be->gmr_id);
0417 break;
0418 case VMW_PL_SYSTEM:
0419
0420 break;
0421 default:
0422 BUG();
0423 }
0424 vmw_be->bound = true;
0425 return ret;
0426 }
0427
0428 static void vmw_ttm_unbind(struct ttm_device *bdev,
0429 struct ttm_tt *ttm)
0430 {
0431 struct vmw_ttm_tt *vmw_be =
0432 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
0433
0434 if (!vmw_be->bound)
0435 return;
0436
0437 switch (vmw_be->mem_type) {
0438 case VMW_PL_GMR:
0439 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
0440 break;
0441 case VMW_PL_MOB:
0442 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
0443 break;
0444 case VMW_PL_SYSTEM:
0445 break;
0446 default:
0447 BUG();
0448 }
0449
0450 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
0451 vmw_ttm_unmap_dma(vmw_be);
0452 vmw_be->bound = false;
0453 }
0454
0455
0456 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
0457 {
0458 struct vmw_ttm_tt *vmw_be =
0459 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
0460
0461 vmw_ttm_unmap_dma(vmw_be);
0462 ttm_tt_fini(ttm);
0463 if (vmw_be->mob)
0464 vmw_mob_destroy(vmw_be->mob);
0465
0466 kfree(vmw_be);
0467 }
0468
0469
0470 static int vmw_ttm_populate(struct ttm_device *bdev,
0471 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
0472 {
0473 int ret;
0474
0475
0476 if (ttm_tt_is_populated(ttm))
0477 return 0;
0478
0479 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
0480
0481 return ret;
0482 }
0483
0484 static void vmw_ttm_unpopulate(struct ttm_device *bdev,
0485 struct ttm_tt *ttm)
0486 {
0487 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
0488 dma_ttm);
0489
0490 vmw_ttm_unbind(bdev, ttm);
0491
0492 if (vmw_tt->mob) {
0493 vmw_mob_destroy(vmw_tt->mob);
0494 vmw_tt->mob = NULL;
0495 }
0496
0497 vmw_ttm_unmap_dma(vmw_tt);
0498
0499 ttm_pool_free(&bdev->pool, ttm);
0500 }
0501
0502 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
0503 uint32_t page_flags)
0504 {
0505 struct vmw_ttm_tt *vmw_be;
0506 int ret;
0507
0508 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
0509 if (!vmw_be)
0510 return NULL;
0511
0512 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
0513 vmw_be->mob = NULL;
0514
0515 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
0516 ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
0517 ttm_cached);
0518 else
0519 ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
0520 ttm_cached, 0);
0521 if (unlikely(ret != 0))
0522 goto out_no_init;
0523
0524 return &vmw_be->dma_ttm;
0525 out_no_init:
0526 kfree(vmw_be);
0527 return NULL;
0528 }
0529
0530 static void vmw_evict_flags(struct ttm_buffer_object *bo,
0531 struct ttm_placement *placement)
0532 {
0533 *placement = vmw_sys_placement;
0534 }
0535
0536 static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
0537 {
0538 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
0539
0540 switch (mem->mem_type) {
0541 case TTM_PL_SYSTEM:
0542 case VMW_PL_SYSTEM:
0543 case VMW_PL_GMR:
0544 case VMW_PL_MOB:
0545 return 0;
0546 case TTM_PL_VRAM:
0547 mem->bus.offset = (mem->start << PAGE_SHIFT) +
0548 dev_priv->vram_start;
0549 mem->bus.is_iomem = true;
0550 mem->bus.caching = ttm_cached;
0551 break;
0552 default:
0553 return -EINVAL;
0554 }
0555 return 0;
0556 }
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569 static void vmw_move_notify(struct ttm_buffer_object *bo,
0570 struct ttm_resource *old_mem,
0571 struct ttm_resource *new_mem)
0572 {
0573 vmw_bo_move_notify(bo, new_mem);
0574 vmw_query_move_notify(bo, old_mem, new_mem);
0575 }
0576
0577
0578
0579
0580
0581
0582
0583 static void vmw_swap_notify(struct ttm_buffer_object *bo)
0584 {
0585 vmw_bo_swap_notify(bo);
0586 (void) ttm_bo_wait(bo, false, false);
0587 }
0588
0589 static bool vmw_memtype_is_system(uint32_t mem_type)
0590 {
0591 return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
0592 }
0593
0594 static int vmw_move(struct ttm_buffer_object *bo,
0595 bool evict,
0596 struct ttm_operation_ctx *ctx,
0597 struct ttm_resource *new_mem,
0598 struct ttm_place *hop)
0599 {
0600 struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
0601 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
0602 int ret;
0603
0604 if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
0605 ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
0606 if (ret)
0607 return ret;
0608 }
0609
0610 vmw_move_notify(bo, bo->resource, new_mem);
0611
0612 if (old_man->use_tt && new_man->use_tt) {
0613 if (vmw_memtype_is_system(bo->resource->mem_type)) {
0614 ttm_bo_move_null(bo, new_mem);
0615 return 0;
0616 }
0617 ret = ttm_bo_wait_ctx(bo, ctx);
0618 if (ret)
0619 goto fail;
0620
0621 vmw_ttm_unbind(bo->bdev, bo->ttm);
0622 ttm_resource_free(bo, &bo->resource);
0623 ttm_bo_assign_mem(bo, new_mem);
0624 return 0;
0625 } else {
0626 ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
0627 if (ret)
0628 goto fail;
0629 }
0630 return 0;
0631 fail:
0632 vmw_move_notify(bo, new_mem, bo->resource);
0633 return ret;
0634 }
0635
0636 struct ttm_device_funcs vmw_bo_driver = {
0637 .ttm_tt_create = &vmw_ttm_tt_create,
0638 .ttm_tt_populate = &vmw_ttm_populate,
0639 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
0640 .ttm_tt_destroy = &vmw_ttm_destroy,
0641 .eviction_valuable = ttm_bo_eviction_valuable,
0642 .evict_flags = vmw_evict_flags,
0643 .move = vmw_move,
0644 .swap_notify = vmw_swap_notify,
0645 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
0646 };
0647
0648 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
0649 unsigned long bo_size,
0650 struct ttm_buffer_object **bo_p)
0651 {
0652 struct ttm_operation_ctx ctx = {
0653 .interruptible = false,
0654 .no_wait_gpu = false
0655 };
0656 struct ttm_buffer_object *bo;
0657 int ret;
0658
0659 ret = vmw_bo_create_kernel(dev_priv, bo_size,
0660 &vmw_pt_sys_placement,
0661 &bo);
0662 if (unlikely(ret != 0))
0663 return ret;
0664
0665 ret = ttm_bo_reserve(bo, false, true, NULL);
0666 BUG_ON(ret != 0);
0667 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
0668 if (likely(ret == 0)) {
0669 struct vmw_ttm_tt *vmw_tt =
0670 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
0671 ret = vmw_ttm_map_dma(vmw_tt);
0672 }
0673
0674 ttm_bo_unreserve(bo);
0675
0676 if (likely(ret == 0))
0677 *bo_p = bo;
0678 return ret;
0679 }