0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #define pr_fmt(fmt) "[TTM DEVICE] " fmt
0029
0030 #include <linux/mm.h>
0031
0032 #include <drm/ttm/ttm_device.h>
0033 #include <drm/ttm/ttm_tt.h>
0034 #include <drm/ttm/ttm_placement.h>
0035 #include <drm/ttm/ttm_bo_api.h>
0036
0037 #include "ttm_module.h"
0038
0039
0040
0041
0042 static DEFINE_MUTEX(ttm_global_mutex);
0043 static unsigned ttm_glob_use_count;
0044 struct ttm_global ttm_glob;
0045 EXPORT_SYMBOL(ttm_glob);
0046
0047 struct dentry *ttm_debugfs_root;
0048
0049 static void ttm_global_release(void)
0050 {
0051 struct ttm_global *glob = &ttm_glob;
0052
0053 mutex_lock(&ttm_global_mutex);
0054 if (--ttm_glob_use_count > 0)
0055 goto out;
0056
0057 ttm_pool_mgr_fini();
0058 debugfs_remove(ttm_debugfs_root);
0059
0060 __free_page(glob->dummy_read_page);
0061 memset(glob, 0, sizeof(*glob));
0062 out:
0063 mutex_unlock(&ttm_global_mutex);
0064 }
0065
0066 static int ttm_global_init(void)
0067 {
0068 struct ttm_global *glob = &ttm_glob;
0069 unsigned long num_pages, num_dma32;
0070 struct sysinfo si;
0071 int ret = 0;
0072
0073 mutex_lock(&ttm_global_mutex);
0074 if (++ttm_glob_use_count > 1)
0075 goto out;
0076
0077 si_meminfo(&si);
0078
0079 ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
0080 if (IS_ERR(ttm_debugfs_root)) {
0081 ttm_debugfs_root = NULL;
0082 }
0083
0084
0085
0086
0087 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
0088 num_pages /= 2;
0089
0090
0091 num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
0092 >> PAGE_SHIFT;
0093 num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
0094
0095 ttm_pool_mgr_init(num_pages);
0096 ttm_tt_mgr_init(num_pages, num_dma32);
0097
0098 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
0099
0100 if (unlikely(glob->dummy_read_page == NULL)) {
0101 ret = -ENOMEM;
0102 goto out;
0103 }
0104
0105 INIT_LIST_HEAD(&glob->device_list);
0106 atomic_set(&glob->bo_count, 0);
0107
0108 debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
0109 &glob->bo_count);
0110 out:
0111 if (ret && ttm_debugfs_root)
0112 debugfs_remove(ttm_debugfs_root);
0113 if (ret)
0114 --ttm_glob_use_count;
0115 mutex_unlock(&ttm_global_mutex);
0116 return ret;
0117 }
0118
0119
0120
0121
0122
0123 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
0124 {
0125 struct ttm_global *glob = &ttm_glob;
0126 struct ttm_device *bdev;
0127 int ret = 0;
0128
0129 mutex_lock(&ttm_global_mutex);
0130 list_for_each_entry(bdev, &glob->device_list, device_list) {
0131 ret = ttm_device_swapout(bdev, ctx, gfp_flags);
0132 if (ret > 0) {
0133 list_move_tail(&bdev->device_list, &glob->device_list);
0134 break;
0135 }
0136 }
0137 mutex_unlock(&ttm_global_mutex);
0138 return ret;
0139 }
0140 EXPORT_SYMBOL(ttm_global_swapout);
0141
0142 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
0143 gfp_t gfp_flags)
0144 {
0145 struct ttm_resource_cursor cursor;
0146 struct ttm_resource_manager *man;
0147 struct ttm_resource *res;
0148 unsigned i;
0149 int ret;
0150
0151 spin_lock(&bdev->lru_lock);
0152 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
0153 man = ttm_manager_type(bdev, i);
0154 if (!man || !man->use_tt)
0155 continue;
0156
0157 ttm_resource_manager_for_each_res(man, &cursor, res) {
0158 struct ttm_buffer_object *bo = res->bo;
0159 uint32_t num_pages;
0160
0161 if (!bo)
0162 continue;
0163
0164 num_pages = PFN_UP(bo->base.size);
0165 ret = ttm_bo_swapout(bo, ctx, gfp_flags);
0166
0167 if (!ret)
0168 return num_pages;
0169 if (ret != -EBUSY)
0170 return ret;
0171 }
0172 }
0173 spin_unlock(&bdev->lru_lock);
0174 return 0;
0175 }
0176 EXPORT_SYMBOL(ttm_device_swapout);
0177
0178 static void ttm_device_delayed_workqueue(struct work_struct *work)
0179 {
0180 struct ttm_device *bdev =
0181 container_of(work, struct ttm_device, wq.work);
0182
0183 if (!ttm_bo_delayed_delete(bdev, false))
0184 schedule_delayed_work(&bdev->wq,
0185 ((HZ / 100) < 1) ? 1 : HZ / 100);
0186 }
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203 int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
0204 struct device *dev, struct address_space *mapping,
0205 struct drm_vma_offset_manager *vma_manager,
0206 bool use_dma_alloc, bool use_dma32)
0207 {
0208 struct ttm_global *glob = &ttm_glob;
0209 int ret;
0210
0211 if (WARN_ON(vma_manager == NULL))
0212 return -EINVAL;
0213
0214 ret = ttm_global_init();
0215 if (ret)
0216 return ret;
0217
0218 bdev->funcs = funcs;
0219
0220 ttm_sys_man_init(bdev);
0221 ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
0222
0223 bdev->vma_manager = vma_manager;
0224 INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
0225 spin_lock_init(&bdev->lru_lock);
0226 INIT_LIST_HEAD(&bdev->ddestroy);
0227 INIT_LIST_HEAD(&bdev->pinned);
0228 bdev->dev_mapping = mapping;
0229 mutex_lock(&ttm_global_mutex);
0230 list_add_tail(&bdev->device_list, &glob->device_list);
0231 mutex_unlock(&ttm_global_mutex);
0232
0233 return 0;
0234 }
0235 EXPORT_SYMBOL(ttm_device_init);
0236
0237 void ttm_device_fini(struct ttm_device *bdev)
0238 {
0239 struct ttm_resource_manager *man;
0240 unsigned i;
0241
0242 man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
0243 ttm_resource_manager_set_used(man, false);
0244 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
0245
0246 mutex_lock(&ttm_global_mutex);
0247 list_del(&bdev->device_list);
0248 mutex_unlock(&ttm_global_mutex);
0249
0250 cancel_delayed_work_sync(&bdev->wq);
0251
0252 if (ttm_bo_delayed_delete(bdev, true))
0253 pr_debug("Delayed destroy list was clean\n");
0254
0255 spin_lock(&bdev->lru_lock);
0256 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
0257 if (list_empty(&man->lru[0]))
0258 pr_debug("Swap list %d was clean\n", i);
0259 spin_unlock(&bdev->lru_lock);
0260
0261 ttm_pool_fini(&bdev->pool);
0262 ttm_global_release();
0263 }
0264 EXPORT_SYMBOL(ttm_device_fini);
0265
0266 static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
0267 struct list_head *list)
0268 {
0269 struct ttm_resource *res;
0270
0271 spin_lock(&bdev->lru_lock);
0272 while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
0273 struct ttm_buffer_object *bo = res->bo;
0274
0275
0276 if (!ttm_bo_get_unless_zero(bo))
0277 continue;
0278
0279 list_del_init(&res->lru);
0280 spin_unlock(&bdev->lru_lock);
0281
0282 if (bo->ttm)
0283 ttm_tt_unpopulate(bo->bdev, bo->ttm);
0284
0285 ttm_bo_put(bo);
0286 spin_lock(&bdev->lru_lock);
0287 }
0288 spin_unlock(&bdev->lru_lock);
0289 }
0290
0291 void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
0292 {
0293 struct ttm_resource_manager *man;
0294 unsigned int i, j;
0295
0296 ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
0297
0298 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
0299 man = ttm_manager_type(bdev, i);
0300 if (!man || !man->use_tt)
0301 continue;
0302
0303 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
0304 ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
0305 }
0306 }
0307 EXPORT_SYMBOL(ttm_device_clear_dma_mappings);