0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define pr_fmt(fmt) "[TTM] " fmt
0033
0034 #include <linux/sched.h>
0035 #include <linux/shmem_fs.h>
0036 #include <linux/file.h>
0037 #include <linux/module.h>
0038 #include <drm/drm_cache.h>
0039 #include <drm/ttm/ttm_bo_driver.h>
0040
0041 #include "ttm_module.h"
0042
0043 static unsigned long ttm_pages_limit;
0044
0045 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
0046 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
0047
0048 static unsigned long ttm_dma32_pages_limit;
0049
0050 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
0051 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
0052
0053 static atomic_long_t ttm_pages_allocated;
0054 static atomic_long_t ttm_dma32_pages_allocated;
0055
0056
0057
0058
0059 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
0060 {
0061 struct ttm_device *bdev = bo->bdev;
0062 uint32_t page_flags = 0;
0063
0064 dma_resv_assert_held(bo->base.resv);
0065
0066 if (bo->ttm)
0067 return 0;
0068
0069 switch (bo->type) {
0070 case ttm_bo_type_device:
0071 if (zero_alloc)
0072 page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
0073 break;
0074 case ttm_bo_type_kernel:
0075 break;
0076 case ttm_bo_type_sg:
0077 page_flags |= TTM_TT_FLAG_EXTERNAL;
0078 break;
0079 default:
0080 pr_err("Illegal buffer object type\n");
0081 return -EINVAL;
0082 }
0083
0084 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
0085 if (unlikely(bo->ttm == NULL))
0086 return -ENOMEM;
0087
0088 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
0089 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
0090
0091 return 0;
0092 }
0093
0094
0095
0096
0097 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
0098 {
0099 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
0100 if (!ttm->pages)
0101 return -ENOMEM;
0102
0103 return 0;
0104 }
0105
0106 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
0107 {
0108 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
0109 sizeof(*ttm->dma_address), GFP_KERNEL);
0110 if (!ttm->pages)
0111 return -ENOMEM;
0112
0113 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
0114 return 0;
0115 }
0116
0117 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
0118 {
0119 ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
0120 GFP_KERNEL);
0121 if (!ttm->dma_address)
0122 return -ENOMEM;
0123
0124 return 0;
0125 }
0126
0127 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
0128 {
0129 bdev->funcs->ttm_tt_destroy(bdev, ttm);
0130 }
0131
0132 static void ttm_tt_init_fields(struct ttm_tt *ttm,
0133 struct ttm_buffer_object *bo,
0134 uint32_t page_flags,
0135 enum ttm_caching caching,
0136 unsigned long extra_pages)
0137 {
0138 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
0139 ttm->caching = ttm_cached;
0140 ttm->page_flags = page_flags;
0141 ttm->dma_address = NULL;
0142 ttm->swap_storage = NULL;
0143 ttm->sg = bo->sg;
0144 ttm->caching = caching;
0145 }
0146
0147 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
0148 uint32_t page_flags, enum ttm_caching caching,
0149 unsigned long extra_pages)
0150 {
0151 ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
0152
0153 if (ttm_tt_alloc_page_directory(ttm)) {
0154 pr_err("Failed allocating page table\n");
0155 return -ENOMEM;
0156 }
0157 return 0;
0158 }
0159 EXPORT_SYMBOL(ttm_tt_init);
0160
0161 void ttm_tt_fini(struct ttm_tt *ttm)
0162 {
0163 WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
0164
0165 if (ttm->swap_storage)
0166 fput(ttm->swap_storage);
0167 ttm->swap_storage = NULL;
0168
0169 if (ttm->pages)
0170 kvfree(ttm->pages);
0171 else
0172 kvfree(ttm->dma_address);
0173 ttm->pages = NULL;
0174 ttm->dma_address = NULL;
0175 }
0176 EXPORT_SYMBOL(ttm_tt_fini);
0177
0178 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
0179 uint32_t page_flags, enum ttm_caching caching)
0180 {
0181 int ret;
0182
0183 ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
0184
0185 if (page_flags & TTM_TT_FLAG_EXTERNAL)
0186 ret = ttm_sg_tt_alloc_page_directory(ttm);
0187 else
0188 ret = ttm_dma_tt_alloc_page_directory(ttm);
0189 if (ret) {
0190 pr_err("Failed allocating page table\n");
0191 return -ENOMEM;
0192 }
0193 return 0;
0194 }
0195 EXPORT_SYMBOL(ttm_sg_tt_init);
0196
0197 int ttm_tt_swapin(struct ttm_tt *ttm)
0198 {
0199 struct address_space *swap_space;
0200 struct file *swap_storage;
0201 struct page *from_page;
0202 struct page *to_page;
0203 gfp_t gfp_mask;
0204 int i, ret;
0205
0206 swap_storage = ttm->swap_storage;
0207 BUG_ON(swap_storage == NULL);
0208
0209 swap_space = swap_storage->f_mapping;
0210 gfp_mask = mapping_gfp_mask(swap_space);
0211
0212 for (i = 0; i < ttm->num_pages; ++i) {
0213 from_page = shmem_read_mapping_page_gfp(swap_space, i,
0214 gfp_mask);
0215 if (IS_ERR(from_page)) {
0216 ret = PTR_ERR(from_page);
0217 goto out_err;
0218 }
0219 to_page = ttm->pages[i];
0220 if (unlikely(to_page == NULL)) {
0221 ret = -ENOMEM;
0222 goto out_err;
0223 }
0224
0225 copy_highpage(to_page, from_page);
0226 put_page(from_page);
0227 }
0228
0229 fput(swap_storage);
0230 ttm->swap_storage = NULL;
0231 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
0232
0233 return 0;
0234
0235 out_err:
0236 return ret;
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
0250 gfp_t gfp_flags)
0251 {
0252 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
0253 struct address_space *swap_space;
0254 struct file *swap_storage;
0255 struct page *from_page;
0256 struct page *to_page;
0257 int i, ret;
0258
0259 swap_storage = shmem_file_setup("ttm swap", size, 0);
0260 if (IS_ERR(swap_storage)) {
0261 pr_err("Failed allocating swap storage\n");
0262 return PTR_ERR(swap_storage);
0263 }
0264
0265 swap_space = swap_storage->f_mapping;
0266 gfp_flags &= mapping_gfp_mask(swap_space);
0267
0268 for (i = 0; i < ttm->num_pages; ++i) {
0269 from_page = ttm->pages[i];
0270 if (unlikely(from_page == NULL))
0271 continue;
0272
0273 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
0274 if (IS_ERR(to_page)) {
0275 ret = PTR_ERR(to_page);
0276 goto out_err;
0277 }
0278 copy_highpage(to_page, from_page);
0279 set_page_dirty(to_page);
0280 mark_page_accessed(to_page);
0281 put_page(to_page);
0282 }
0283
0284 ttm_tt_unpopulate(bdev, ttm);
0285 ttm->swap_storage = swap_storage;
0286 ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
0287
0288 return ttm->num_pages;
0289
0290 out_err:
0291 fput(swap_storage);
0292
0293 return ret;
0294 }
0295
0296 int ttm_tt_populate(struct ttm_device *bdev,
0297 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
0298 {
0299 int ret;
0300
0301 if (!ttm)
0302 return -EINVAL;
0303
0304 if (ttm_tt_is_populated(ttm))
0305 return 0;
0306
0307 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
0308 atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
0309 if (bdev->pool.use_dma32)
0310 atomic_long_add(ttm->num_pages,
0311 &ttm_dma32_pages_allocated);
0312 }
0313
0314 while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
0315 atomic_long_read(&ttm_dma32_pages_allocated) >
0316 ttm_dma32_pages_limit) {
0317
0318 ret = ttm_global_swapout(ctx, GFP_KERNEL);
0319 if (ret == 0)
0320 break;
0321 if (ret < 0)
0322 goto error;
0323 }
0324
0325 if (bdev->funcs->ttm_tt_populate)
0326 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
0327 else
0328 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
0329 if (ret)
0330 goto error;
0331
0332 ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
0333 if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
0334 ret = ttm_tt_swapin(ttm);
0335 if (unlikely(ret != 0)) {
0336 ttm_tt_unpopulate(bdev, ttm);
0337 return ret;
0338 }
0339 }
0340
0341 return 0;
0342
0343 error:
0344 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
0345 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
0346 if (bdev->pool.use_dma32)
0347 atomic_long_sub(ttm->num_pages,
0348 &ttm_dma32_pages_allocated);
0349 }
0350 return ret;
0351 }
0352 EXPORT_SYMBOL(ttm_tt_populate);
0353
0354 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
0355 {
0356 if (!ttm_tt_is_populated(ttm))
0357 return;
0358
0359 if (bdev->funcs->ttm_tt_unpopulate)
0360 bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
0361 else
0362 ttm_pool_free(&bdev->pool, ttm);
0363
0364 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
0365 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
0366 if (bdev->pool.use_dma32)
0367 atomic_long_sub(ttm->num_pages,
0368 &ttm_dma32_pages_allocated);
0369 }
0370
0371 ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
0372 }
0373
0374 #ifdef CONFIG_DEBUG_FS
0375
0376
0377 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
0378 {
0379 struct ttm_operation_ctx ctx = { false, false };
0380
0381 seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
0382 return 0;
0383 }
0384 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
0385
0386 #endif
0387
0388
0389
0390
0391
0392
0393
0394 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
0395 {
0396 #ifdef CONFIG_DEBUG_FS
0397 debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
0398 &ttm_tt_debugfs_shrink_fops);
0399 #endif
0400
0401 if (!ttm_pages_limit)
0402 ttm_pages_limit = num_pages;
0403
0404 if (!ttm_dma32_pages_limit)
0405 ttm_dma32_pages_limit = num_dma32_pages;
0406 }
0407
0408 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
0409 struct iosys_map *dmap,
0410 pgoff_t i)
0411 {
0412 struct ttm_kmap_iter_tt *iter_tt =
0413 container_of(iter, typeof(*iter_tt), base);
0414
0415 iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
0416 iter_tt->prot));
0417 }
0418
0419 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
0420 struct iosys_map *map)
0421 {
0422 kunmap_local(map->vaddr);
0423 }
0424
0425 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
0426 .map_local = ttm_kmap_iter_tt_map_local,
0427 .unmap_local = ttm_kmap_iter_tt_unmap_local,
0428 .maps_tt = true,
0429 };
0430
0431
0432
0433
0434
0435
0436
0437
0438 struct ttm_kmap_iter *
0439 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
0440 struct ttm_tt *tt)
0441 {
0442 iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
0443 iter_tt->tt = tt;
0444 if (tt)
0445 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
0446 else
0447 iter_tt->prot = PAGE_KERNEL;
0448
0449 return &iter_tt->base;
0450 }
0451 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);