0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define pr_fmt(fmt) "[TTM] " fmt
0033
0034 #include <drm/ttm/ttm_bo_driver.h>
0035 #include <drm/ttm/ttm_placement.h>
0036 #include <drm/drm_vma_manager.h>
0037 #include <drm/drm_drv.h>
0038 #include <drm/drm_managed.h>
0039 #include <linux/mm.h>
0040 #include <linux/pfn_t.h>
0041 #include <linux/rbtree.h>
0042 #include <linux/module.h>
0043 #include <linux/uaccess.h>
0044 #include <linux/mem_encrypt.h>
0045
0046 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
0047 struct vm_fault *vmf)
0048 {
0049 long err = 0;
0050
0051
0052
0053
0054 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
0055 return 0;
0056
0057
0058
0059
0060
0061
0062 if (fault_flag_allow_retry_first(vmf->flags)) {
0063 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
0064 return VM_FAULT_RETRY;
0065
0066 ttm_bo_get(bo);
0067 mmap_read_unlock(vmf->vma->vm_mm);
0068 (void)dma_resv_wait_timeout(bo->base.resv,
0069 DMA_RESV_USAGE_KERNEL, true,
0070 MAX_SCHEDULE_TIMEOUT);
0071 dma_resv_unlock(bo->base.resv);
0072 ttm_bo_put(bo);
0073 return VM_FAULT_RETRY;
0074 }
0075
0076
0077
0078
0079 err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
0080 MAX_SCHEDULE_TIMEOUT);
0081 if (unlikely(err < 0)) {
0082 return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
0083 VM_FAULT_NOPAGE;
0084 }
0085
0086 return 0;
0087 }
0088
0089 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
0090 unsigned long page_offset)
0091 {
0092 struct ttm_device *bdev = bo->bdev;
0093
0094 if (bdev->funcs->io_mem_pfn)
0095 return bdev->funcs->io_mem_pfn(bo, page_offset);
0096
0097 return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
0122 struct vm_fault *vmf)
0123 {
0124
0125
0126
0127
0128
0129
0130 if (unlikely(!dma_resv_trylock(bo->base.resv))) {
0131
0132
0133
0134
0135
0136 if (fault_flag_allow_retry_first(vmf->flags)) {
0137 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
0138 ttm_bo_get(bo);
0139 mmap_read_unlock(vmf->vma->vm_mm);
0140 if (!dma_resv_lock_interruptible(bo->base.resv,
0141 NULL))
0142 dma_resv_unlock(bo->base.resv);
0143 ttm_bo_put(bo);
0144 }
0145
0146 return VM_FAULT_RETRY;
0147 }
0148
0149 if (dma_resv_lock_interruptible(bo->base.resv, NULL))
0150 return VM_FAULT_NOPAGE;
0151 }
0152
0153
0154
0155
0156
0157 if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
0158 if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
0159 dma_resv_unlock(bo->base.resv);
0160 return VM_FAULT_SIGBUS;
0161 }
0162 }
0163
0164 return 0;
0165 }
0166 EXPORT_SYMBOL(ttm_bo_vm_reserve);
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
0187 pgprot_t prot,
0188 pgoff_t num_prefault)
0189 {
0190 struct vm_area_struct *vma = vmf->vma;
0191 struct ttm_buffer_object *bo = vma->vm_private_data;
0192 struct ttm_device *bdev = bo->bdev;
0193 unsigned long page_offset;
0194 unsigned long page_last;
0195 unsigned long pfn;
0196 struct ttm_tt *ttm = NULL;
0197 struct page *page;
0198 int err;
0199 pgoff_t i;
0200 vm_fault_t ret = VM_FAULT_NOPAGE;
0201 unsigned long address = vmf->address;
0202
0203
0204
0205
0206
0207 ret = ttm_bo_vm_fault_idle(bo, vmf);
0208 if (unlikely(ret != 0))
0209 return ret;
0210
0211 err = ttm_mem_io_reserve(bdev, bo->resource);
0212 if (unlikely(err != 0))
0213 return VM_FAULT_SIGBUS;
0214
0215 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
0216 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
0217 page_last = vma_pages(vma) + vma->vm_pgoff -
0218 drm_vma_node_start(&bo->base.vma_node);
0219
0220 if (unlikely(page_offset >= bo->resource->num_pages))
0221 return VM_FAULT_SIGBUS;
0222
0223 prot = ttm_io_prot(bo, bo->resource, prot);
0224 if (!bo->resource->bus.is_iomem) {
0225 struct ttm_operation_ctx ctx = {
0226 .interruptible = false,
0227 .no_wait_gpu = false,
0228 .force_alloc = true
0229 };
0230
0231 ttm = bo->ttm;
0232 if (ttm_tt_populate(bdev, bo->ttm, &ctx))
0233 return VM_FAULT_OOM;
0234 } else {
0235
0236 prot = pgprot_decrypted(prot);
0237 }
0238
0239
0240
0241
0242
0243 for (i = 0; i < num_prefault; ++i) {
0244 if (bo->resource->bus.is_iomem) {
0245 pfn = ttm_bo_io_mem_pfn(bo, page_offset);
0246 } else {
0247 page = ttm->pages[page_offset];
0248 if (unlikely(!page && i == 0)) {
0249 return VM_FAULT_OOM;
0250 } else if (unlikely(!page)) {
0251 break;
0252 }
0253 pfn = page_to_pfn(page);
0254 }
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
0265
0266
0267 if (unlikely((ret & VM_FAULT_ERROR))) {
0268 if (i == 0)
0269 return VM_FAULT_NOPAGE;
0270 else
0271 break;
0272 }
0273
0274 address += PAGE_SIZE;
0275 if (unlikely(++page_offset >= page_last))
0276 break;
0277 }
0278 return ret;
0279 }
0280 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
0281
0282 static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
0283 {
0284 struct page *dummy_page = (struct page *)res;
0285
0286 __free_page(dummy_page);
0287 }
0288
0289 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
0290 {
0291 struct vm_area_struct *vma = vmf->vma;
0292 struct ttm_buffer_object *bo = vma->vm_private_data;
0293 struct drm_device *ddev = bo->base.dev;
0294 vm_fault_t ret = VM_FAULT_NOPAGE;
0295 unsigned long address;
0296 unsigned long pfn;
0297 struct page *page;
0298
0299
0300 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
0301 if (!page)
0302 return VM_FAULT_OOM;
0303
0304
0305 if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
0306 return VM_FAULT_OOM;
0307
0308 pfn = page_to_pfn(page);
0309
0310
0311 for (address = vma->vm_start; address < vma->vm_end;
0312 address += PAGE_SIZE)
0313 ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
0314
0315 return ret;
0316 }
0317 EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
0318
0319 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
0320 {
0321 struct vm_area_struct *vma = vmf->vma;
0322 pgprot_t prot;
0323 struct ttm_buffer_object *bo = vma->vm_private_data;
0324 struct drm_device *ddev = bo->base.dev;
0325 vm_fault_t ret;
0326 int idx;
0327
0328 ret = ttm_bo_vm_reserve(bo, vmf);
0329 if (ret)
0330 return ret;
0331
0332 prot = vma->vm_page_prot;
0333 if (drm_dev_enter(ddev, &idx)) {
0334 ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
0335 drm_dev_exit(idx);
0336 } else {
0337 ret = ttm_bo_vm_dummy_page(vmf, prot);
0338 }
0339 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
0340 return ret;
0341
0342 dma_resv_unlock(bo->base.resv);
0343
0344 return ret;
0345 }
0346 EXPORT_SYMBOL(ttm_bo_vm_fault);
0347
0348 void ttm_bo_vm_open(struct vm_area_struct *vma)
0349 {
0350 struct ttm_buffer_object *bo = vma->vm_private_data;
0351
0352 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
0353
0354 ttm_bo_get(bo);
0355 }
0356 EXPORT_SYMBOL(ttm_bo_vm_open);
0357
0358 void ttm_bo_vm_close(struct vm_area_struct *vma)
0359 {
0360 struct ttm_buffer_object *bo = vma->vm_private_data;
0361
0362 ttm_bo_put(bo);
0363 vma->vm_private_data = NULL;
0364 }
0365 EXPORT_SYMBOL(ttm_bo_vm_close);
0366
0367 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
0368 unsigned long offset,
0369 uint8_t *buf, int len, int write)
0370 {
0371 unsigned long page = offset >> PAGE_SHIFT;
0372 unsigned long bytes_left = len;
0373 int ret;
0374
0375
0376
0377
0378 offset -= page << PAGE_SHIFT;
0379 do {
0380 unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
0381 struct ttm_bo_kmap_obj map;
0382 void *ptr;
0383 bool is_iomem;
0384
0385 ret = ttm_bo_kmap(bo, page, 1, &map);
0386 if (ret)
0387 return ret;
0388
0389 ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
0390 WARN_ON_ONCE(is_iomem);
0391 if (write)
0392 memcpy(ptr, buf, bytes);
0393 else
0394 memcpy(buf, ptr, bytes);
0395 ttm_bo_kunmap(&map);
0396
0397 page++;
0398 buf += bytes;
0399 bytes_left -= bytes;
0400 offset = 0;
0401 } while (bytes_left);
0402
0403 return len;
0404 }
0405
0406 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
0407 void *buf, int len, int write)
0408 {
0409 struct ttm_buffer_object *bo = vma->vm_private_data;
0410 unsigned long offset = (addr) - vma->vm_start +
0411 ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
0412 << PAGE_SHIFT);
0413 int ret;
0414
0415 if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
0416 return -EIO;
0417
0418 ret = ttm_bo_reserve(bo, true, false, NULL);
0419 if (ret)
0420 return ret;
0421
0422 switch (bo->resource->mem_type) {
0423 case TTM_PL_SYSTEM:
0424 fallthrough;
0425 case TTM_PL_TT:
0426 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
0427 break;
0428 default:
0429 if (bo->bdev->funcs->access_memory)
0430 ret = bo->bdev->funcs->access_memory(
0431 bo, offset, buf, len, write);
0432 else
0433 ret = -EIO;
0434 }
0435
0436 ttm_bo_unreserve(bo);
0437
0438 return ret;
0439 }
0440 EXPORT_SYMBOL(ttm_bo_vm_access);
0441
0442 static const struct vm_operations_struct ttm_bo_vm_ops = {
0443 .fault = ttm_bo_vm_fault,
0444 .open = ttm_bo_vm_open,
0445 .close = ttm_bo_vm_close,
0446 .access = ttm_bo_vm_access,
0447 };
0448
0449 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
0450 {
0451
0452 if (is_cow_mapping(vma->vm_flags))
0453 return -EINVAL;
0454
0455 ttm_bo_get(bo);
0456
0457
0458
0459
0460
0461 if (!vma->vm_ops)
0462 vma->vm_ops = &ttm_bo_vm_ops;
0463
0464
0465
0466
0467
0468
0469 vma->vm_private_data = bo;
0470
0471 vma->vm_flags |= VM_PFNMAP;
0472 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
0473 return 0;
0474 }
0475 EXPORT_SYMBOL(ttm_bo_mmap_obj);