0001
0002
0003 #include <linux/iosys-map.h>
0004 #include <linux/module.h>
0005
0006 #include <drm/drm_debugfs.h>
0007 #include <drm/drm_device.h>
0008 #include <drm/drm_drv.h>
0009 #include <drm/drm_file.h>
0010 #include <drm/drm_framebuffer.h>
0011 #include <drm/drm_gem_atomic_helper.h>
0012 #include <drm/drm_gem_framebuffer_helper.h>
0013 #include <drm/drm_gem_ttm_helper.h>
0014 #include <drm/drm_gem_vram_helper.h>
0015 #include <drm/drm_managed.h>
0016 #include <drm/drm_mode.h>
0017 #include <drm/drm_plane.h>
0018 #include <drm/drm_prime.h>
0019 #include <drm/drm_simple_kms_helper.h>
0020
0021 #include <drm/ttm/ttm_range_manager.h>
0022
0023 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
0113 {
0114
0115
0116
0117
0118
0119 WARN_ON(gbo->vmap_use_count);
0120 WARN_ON(iosys_map_is_set(&gbo->map));
0121
0122 drm_gem_object_release(&gbo->bo.base);
0123 }
0124
0125 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
0126 {
0127 drm_gem_vram_cleanup(gbo);
0128 kfree(gbo);
0129 }
0130
0131 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
0132 {
0133 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
0134
0135 drm_gem_vram_destroy(gbo);
0136 }
0137
0138 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
0139 unsigned long pl_flag)
0140 {
0141 u32 invariant_flags = 0;
0142 unsigned int i;
0143 unsigned int c = 0;
0144
0145 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
0146 invariant_flags = TTM_PL_FLAG_TOPDOWN;
0147
0148 gbo->placement.placement = gbo->placements;
0149 gbo->placement.busy_placement = gbo->placements;
0150
0151 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
0152 gbo->placements[c].mem_type = TTM_PL_VRAM;
0153 gbo->placements[c++].flags = invariant_flags;
0154 }
0155
0156 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
0157 gbo->placements[c].mem_type = TTM_PL_SYSTEM;
0158 gbo->placements[c++].flags = invariant_flags;
0159 }
0160
0161 gbo->placement.num_placement = c;
0162 gbo->placement.num_busy_placement = c;
0163
0164 for (i = 0; i < c; ++i) {
0165 gbo->placements[i].fpfn = 0;
0166 gbo->placements[i].lpfn = 0;
0167 }
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
0187 size_t size,
0188 unsigned long pg_align)
0189 {
0190 struct drm_gem_vram_object *gbo;
0191 struct drm_gem_object *gem;
0192 struct drm_vram_mm *vmm = dev->vram_mm;
0193 struct ttm_device *bdev;
0194 int ret;
0195
0196 if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
0197 return ERR_PTR(-EINVAL);
0198
0199 if (dev->driver->gem_create_object) {
0200 gem = dev->driver->gem_create_object(dev, size);
0201 if (IS_ERR(gem))
0202 return ERR_CAST(gem);
0203 gbo = drm_gem_vram_of_gem(gem);
0204 } else {
0205 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
0206 if (!gbo)
0207 return ERR_PTR(-ENOMEM);
0208 gem = &gbo->bo.base;
0209 }
0210
0211 if (!gem->funcs)
0212 gem->funcs = &drm_gem_vram_object_funcs;
0213
0214 ret = drm_gem_object_init(dev, gem, size);
0215 if (ret) {
0216 kfree(gbo);
0217 return ERR_PTR(ret);
0218 }
0219
0220 bdev = &vmm->bdev;
0221
0222 gbo->bo.bdev = bdev;
0223 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
0224
0225
0226
0227
0228
0229 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
0230 &gbo->placement, pg_align, false, NULL, NULL,
0231 ttm_buffer_object_destroy);
0232 if (ret)
0233 return ERR_PTR(ret);
0234
0235 return gbo;
0236 }
0237 EXPORT_SYMBOL(drm_gem_vram_create);
0238
0239
0240
0241
0242
0243
0244
0245 void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
0246 {
0247 ttm_bo_put(&gbo->bo);
0248 }
0249 EXPORT_SYMBOL(drm_gem_vram_put);
0250
0251 static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
0252 {
0253
0254 if (WARN_ON_ONCE(!gbo->bo.resource ||
0255 gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
0256 return 0;
0257
0258 return gbo->bo.resource->start;
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
0274 {
0275 if (WARN_ON_ONCE(!gbo->bo.pin_count))
0276 return (s64)-ENODEV;
0277 return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
0278 }
0279 EXPORT_SYMBOL(drm_gem_vram_offset);
0280
0281 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
0282 unsigned long pl_flag)
0283 {
0284 struct ttm_operation_ctx ctx = { false, false };
0285 int ret;
0286
0287 if (gbo->bo.pin_count)
0288 goto out;
0289
0290 if (pl_flag)
0291 drm_gem_vram_placement(gbo, pl_flag);
0292
0293 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
0294 if (ret < 0)
0295 return ret;
0296
0297 out:
0298 ttm_bo_pin(&gbo->bo);
0299
0300 return 0;
0301 }
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
0327 {
0328 int ret;
0329
0330 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
0331 if (ret)
0332 return ret;
0333 ret = drm_gem_vram_pin_locked(gbo, pl_flag);
0334 ttm_bo_unreserve(&gbo->bo);
0335
0336 return ret;
0337 }
0338 EXPORT_SYMBOL(drm_gem_vram_pin);
0339
0340 static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
0341 {
0342 ttm_bo_unpin(&gbo->bo);
0343 }
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
0354 {
0355 int ret;
0356
0357 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
0358 if (ret)
0359 return ret;
0360
0361 drm_gem_vram_unpin_locked(gbo);
0362 ttm_bo_unreserve(&gbo->bo);
0363
0364 return 0;
0365 }
0366 EXPORT_SYMBOL(drm_gem_vram_unpin);
0367
0368 static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
0369 struct iosys_map *map)
0370 {
0371 int ret;
0372
0373 if (gbo->vmap_use_count > 0)
0374 goto out;
0375
0376
0377
0378
0379
0380
0381 if (iosys_map_is_null(&gbo->map)) {
0382 ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
0383 if (ret)
0384 return ret;
0385 }
0386
0387 out:
0388 ++gbo->vmap_use_count;
0389 *map = gbo->map;
0390
0391 return 0;
0392 }
0393
0394 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
0395 struct iosys_map *map)
0396 {
0397 struct drm_device *dev = gbo->bo.base.dev;
0398
0399 if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count))
0400 return;
0401
0402 if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map)))
0403 return;
0404
0405 if (--gbo->vmap_use_count > 0)
0406 return;
0407
0408
0409
0410
0411
0412
0413
0414 }
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
0433 {
0434 int ret;
0435
0436 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
0437 if (ret)
0438 return ret;
0439
0440 ret = drm_gem_vram_pin_locked(gbo, 0);
0441 if (ret)
0442 goto err_ttm_bo_unreserve;
0443 ret = drm_gem_vram_kmap_locked(gbo, map);
0444 if (ret)
0445 goto err_drm_gem_vram_unpin_locked;
0446
0447 ttm_bo_unreserve(&gbo->bo);
0448
0449 return 0;
0450
0451 err_drm_gem_vram_unpin_locked:
0452 drm_gem_vram_unpin_locked(gbo);
0453 err_ttm_bo_unreserve:
0454 ttm_bo_unreserve(&gbo->bo);
0455 return ret;
0456 }
0457 EXPORT_SYMBOL(drm_gem_vram_vmap);
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467 void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
0468 struct iosys_map *map)
0469 {
0470 int ret;
0471
0472 ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
0473 if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
0474 return;
0475
0476 drm_gem_vram_kunmap_locked(gbo, map);
0477 drm_gem_vram_unpin_locked(gbo);
0478
0479 ttm_bo_unreserve(&gbo->bo);
0480 }
0481 EXPORT_SYMBOL(drm_gem_vram_vunmap);
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
0503 struct drm_device *dev,
0504 unsigned long pg_align,
0505 unsigned long pitch_align,
0506 struct drm_mode_create_dumb *args)
0507 {
0508 size_t pitch, size;
0509 struct drm_gem_vram_object *gbo;
0510 int ret;
0511 u32 handle;
0512
0513 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
0514 if (pitch_align) {
0515 if (WARN_ON_ONCE(!is_power_of_2(pitch_align)))
0516 return -EINVAL;
0517 pitch = ALIGN(pitch, pitch_align);
0518 }
0519 size = pitch * args->height;
0520
0521 size = roundup(size, PAGE_SIZE);
0522 if (!size)
0523 return -EINVAL;
0524
0525 gbo = drm_gem_vram_create(dev, size, pg_align);
0526 if (IS_ERR(gbo))
0527 return PTR_ERR(gbo);
0528
0529 ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
0530 if (ret)
0531 goto err_drm_gem_object_put;
0532
0533 drm_gem_object_put(&gbo->bo.base);
0534
0535 args->pitch = pitch;
0536 args->size = size;
0537 args->handle = handle;
0538
0539 return 0;
0540
0541 err_drm_gem_object_put:
0542 drm_gem_object_put(&gbo->bo.base);
0543 return ret;
0544 }
0545 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
0546
0547
0548
0549
0550
0551 static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
0552 {
0553 return (bo->destroy == ttm_buffer_object_destroy);
0554 }
0555
0556 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
0557 struct ttm_placement *pl)
0558 {
0559 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
0560 *pl = gbo->placement;
0561 }
0562
0563 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo)
0564 {
0565 struct ttm_buffer_object *bo = &gbo->bo;
0566 struct drm_device *dev = bo->base.dev;
0567
0568 if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count))
0569 return;
0570
0571 ttm_bo_vunmap(bo, &gbo->map);
0572 iosys_map_clear(&gbo->map);
0573 }
0574
0575 static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
0576 bool evict,
0577 struct ttm_operation_ctx *ctx,
0578 struct ttm_resource *new_mem)
0579 {
0580 drm_gem_vram_bo_driver_move_notify(gbo);
0581 return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
0582 }
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593 static void drm_gem_vram_object_free(struct drm_gem_object *gem)
0594 {
0595 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
0596
0597 drm_gem_vram_put(gbo);
0598 }
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619 int drm_gem_vram_driver_dumb_create(struct drm_file *file,
0620 struct drm_device *dev,
0621 struct drm_mode_create_dumb *args)
0622 {
0623 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
0624 return -EINVAL;
0625
0626 return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args);
0627 }
0628 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
0629
0630
0631
0632
0633
0634 static void __drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
0635 struct drm_plane_state *state,
0636 unsigned int num_planes)
0637 {
0638 struct drm_gem_object *obj;
0639 struct drm_gem_vram_object *gbo;
0640 struct drm_framebuffer *fb = state->fb;
0641
0642 while (num_planes) {
0643 --num_planes;
0644 obj = drm_gem_fb_get_obj(fb, num_planes);
0645 if (!obj)
0646 continue;
0647 gbo = drm_gem_vram_of_gem(obj);
0648 drm_gem_vram_unpin(gbo);
0649 }
0650 }
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666 int
0667 drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
0668 struct drm_plane_state *new_state)
0669 {
0670 struct drm_framebuffer *fb = new_state->fb;
0671 struct drm_gem_vram_object *gbo;
0672 struct drm_gem_object *obj;
0673 unsigned int i;
0674 int ret;
0675
0676 if (!fb)
0677 return 0;
0678
0679 for (i = 0; i < fb->format->num_planes; ++i) {
0680 obj = drm_gem_fb_get_obj(fb, i);
0681 if (!obj) {
0682 ret = -EINVAL;
0683 goto err_drm_gem_vram_unpin;
0684 }
0685 gbo = drm_gem_vram_of_gem(obj);
0686 ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
0687 if (ret)
0688 goto err_drm_gem_vram_unpin;
0689 }
0690
0691 ret = drm_gem_plane_helper_prepare_fb(plane, new_state);
0692 if (ret)
0693 goto err_drm_gem_vram_unpin;
0694
0695 return 0;
0696
0697 err_drm_gem_vram_unpin:
0698 __drm_gem_vram_plane_helper_cleanup_fb(plane, new_state, i);
0699 return ret;
0700 }
0701 EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713 void
0714 drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
0715 struct drm_plane_state *old_state)
0716 {
0717 struct drm_framebuffer *fb = old_state->fb;
0718
0719 if (!fb)
0720 return;
0721
0722 __drm_gem_vram_plane_helper_cleanup_fb(plane, old_state, fb->format->num_planes);
0723 }
0724 EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744 int drm_gem_vram_simple_display_pipe_prepare_fb(
0745 struct drm_simple_display_pipe *pipe,
0746 struct drm_plane_state *new_state)
0747 {
0748 return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
0749 }
0750 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762 void drm_gem_vram_simple_display_pipe_cleanup_fb(
0763 struct drm_simple_display_pipe *pipe,
0764 struct drm_plane_state *old_state)
0765 {
0766 drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
0767 }
0768 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783 static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
0784 {
0785 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795 return drm_gem_vram_pin(gbo, 0);
0796 }
0797
0798
0799
0800
0801
0802
0803 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
0804 {
0805 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
0806
0807 drm_gem_vram_unpin(gbo);
0808 }
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820 static int drm_gem_vram_object_vmap(struct drm_gem_object *gem,
0821 struct iosys_map *map)
0822 {
0823 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
0824
0825 return drm_gem_vram_vmap(gbo, map);
0826 }
0827
0828
0829
0830
0831
0832
0833
0834 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
0835 struct iosys_map *map)
0836 {
0837 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
0838
0839 drm_gem_vram_vunmap(gbo, map);
0840 }
0841
0842
0843
0844
0845
0846 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
0847 .free = drm_gem_vram_object_free,
0848 .pin = drm_gem_vram_object_pin,
0849 .unpin = drm_gem_vram_object_unpin,
0850 .vmap = drm_gem_vram_object_vmap,
0851 .vunmap = drm_gem_vram_object_vunmap,
0852 .mmap = drm_gem_ttm_mmap,
0853 .print_info = drm_gem_ttm_print_info,
0854 };
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864 static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
0865 {
0866 ttm_tt_fini(tt);
0867 kfree(tt);
0868 }
0869
0870
0871
0872
0873
0874 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
0875 uint32_t page_flags)
0876 {
0877 struct ttm_tt *tt;
0878 int ret;
0879
0880 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
0881 if (!tt)
0882 return NULL;
0883
0884 ret = ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
0885 if (ret < 0)
0886 goto err_ttm_tt_init;
0887
0888 return tt;
0889
0890 err_ttm_tt_init:
0891 kfree(tt);
0892 return NULL;
0893 }
0894
0895 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
0896 struct ttm_placement *placement)
0897 {
0898 struct drm_gem_vram_object *gbo;
0899
0900
0901 if (!drm_is_gem_vram(bo))
0902 return;
0903
0904 gbo = drm_gem_vram_of_bo(bo);
0905
0906 drm_gem_vram_bo_driver_evict_flags(gbo, placement);
0907 }
0908
0909 static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
0910 {
0911 struct drm_gem_vram_object *gbo;
0912
0913
0914 if (!drm_is_gem_vram(bo))
0915 return;
0916
0917 gbo = drm_gem_vram_of_bo(bo);
0918
0919 drm_gem_vram_bo_driver_move_notify(gbo);
0920 }
0921
0922 static int bo_driver_move(struct ttm_buffer_object *bo,
0923 bool evict,
0924 struct ttm_operation_ctx *ctx,
0925 struct ttm_resource *new_mem,
0926 struct ttm_place *hop)
0927 {
0928 struct drm_gem_vram_object *gbo;
0929
0930 gbo = drm_gem_vram_of_bo(bo);
0931
0932 return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
0933 }
0934
0935 static int bo_driver_io_mem_reserve(struct ttm_device *bdev,
0936 struct ttm_resource *mem)
0937 {
0938 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
0939
0940 switch (mem->mem_type) {
0941 case TTM_PL_SYSTEM:
0942 break;
0943 case TTM_PL_VRAM:
0944 mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
0945 mem->bus.is_iomem = true;
0946 mem->bus.caching = ttm_write_combined;
0947 break;
0948 default:
0949 return -EINVAL;
0950 }
0951
0952 return 0;
0953 }
0954
0955 static struct ttm_device_funcs bo_driver = {
0956 .ttm_tt_create = bo_driver_ttm_tt_create,
0957 .ttm_tt_destroy = bo_driver_ttm_tt_destroy,
0958 .eviction_valuable = ttm_bo_eviction_valuable,
0959 .evict_flags = bo_driver_evict_flags,
0960 .move = bo_driver_move,
0961 .delete_mem_notify = bo_driver_delete_mem_notify,
0962 .io_mem_reserve = bo_driver_io_mem_reserve,
0963 };
0964
0965
0966
0967
0968
0969 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
0970 {
0971 struct drm_info_node *node = (struct drm_info_node *) m->private;
0972 struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
0973 struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
0974 struct drm_printer p = drm_seq_file_printer(m);
0975
0976 ttm_resource_manager_debug(man, &p);
0977 return 0;
0978 }
0979
0980 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
0981 { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
0982 };
0983
0984
0985
0986
0987
0988
0989
0990 void drm_vram_mm_debugfs_init(struct drm_minor *minor)
0991 {
0992 drm_debugfs_create_files(drm_vram_mm_debugfs_list,
0993 ARRAY_SIZE(drm_vram_mm_debugfs_list),
0994 minor->debugfs_root, minor);
0995 }
0996 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
0997
0998 static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
0999 uint64_t vram_base, size_t vram_size)
1000 {
1001 int ret;
1002
1003 vmm->vram_base = vram_base;
1004 vmm->vram_size = vram_size;
1005
1006 ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
1007 dev->anon_inode->i_mapping,
1008 dev->vma_offset_manager,
1009 false, true);
1010 if (ret)
1011 return ret;
1012
1013 ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM,
1014 false, vram_size >> PAGE_SHIFT);
1015 if (ret)
1016 return ret;
1017
1018 return 0;
1019 }
1020
1021 static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
1022 {
1023 ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
1024 ttm_device_fini(&vmm->bdev);
1025 }
1026
1027
1028
1029
1030
1031 static struct drm_vram_mm *drm_vram_helper_alloc_mm(struct drm_device *dev, uint64_t vram_base,
1032 size_t vram_size)
1033 {
1034 int ret;
1035
1036 if (WARN_ON(dev->vram_mm))
1037 return dev->vram_mm;
1038
1039 dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
1040 if (!dev->vram_mm)
1041 return ERR_PTR(-ENOMEM);
1042
1043 ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
1044 if (ret)
1045 goto err_kfree;
1046
1047 return dev->vram_mm;
1048
1049 err_kfree:
1050 kfree(dev->vram_mm);
1051 dev->vram_mm = NULL;
1052 return ERR_PTR(ret);
1053 }
1054
1055 static void drm_vram_helper_release_mm(struct drm_device *dev)
1056 {
1057 if (!dev->vram_mm)
1058 return;
1059
1060 drm_vram_mm_cleanup(dev->vram_mm);
1061 kfree(dev->vram_mm);
1062 dev->vram_mm = NULL;
1063 }
1064
1065 static void drm_vram_mm_release(struct drm_device *dev, void *ptr)
1066 {
1067 drm_vram_helper_release_mm(dev);
1068 }
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
1086 size_t vram_size)
1087 {
1088 struct drm_vram_mm *vram_mm;
1089
1090 if (drm_WARN_ON_ONCE(dev, dev->vram_mm))
1091 return 0;
1092
1093 vram_mm = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
1094 if (IS_ERR(vram_mm))
1095 return PTR_ERR(vram_mm);
1096 return drmm_add_action_or_reset(dev, drm_vram_mm_release, NULL);
1097 }
1098 EXPORT_SYMBOL(drmm_vram_helper_init);
1099
1100
1101
1102
1103
1104 static enum drm_mode_status
1105 drm_vram_helper_mode_valid_internal(struct drm_device *dev,
1106 const struct drm_display_mode *mode,
1107 unsigned long max_bpp)
1108 {
1109 struct drm_vram_mm *vmm = dev->vram_mm;
1110 unsigned long fbsize, fbpages, max_fbpages;
1111
1112 if (WARN_ON(!dev->vram_mm))
1113 return MODE_BAD;
1114
1115 max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
1116
1117 fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
1118 fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
1119
1120 if (fbpages > max_fbpages)
1121 return MODE_MEM;
1122
1123 return MODE_OK;
1124 }
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 enum drm_mode_status
1152 drm_vram_helper_mode_valid(struct drm_device *dev,
1153 const struct drm_display_mode *mode)
1154 {
1155 static const unsigned long max_bpp = 4;
1156
1157 return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
1158 }
1159 EXPORT_SYMBOL(drm_vram_helper_mode_valid);
1160
1161 MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
1162 MODULE_LICENSE("GPL");