Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
0002 /**************************************************************************
0003  *
0004  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
0005  * All Rights Reserved.
0006  *
0007  * Permission is hereby granted, free of charge, to any person obtaining a
0008  * copy of this software and associated documentation files (the
0009  * "Software"), to deal in the Software without restriction, including
0010  * without limitation the rights to use, copy, modify, merge, publish,
0011  * distribute, sub license, and/or sell copies of the Software, and to
0012  * permit persons to whom the Software is furnished to do so, subject to
0013  * the following conditions:
0014  *
0015  * The above copyright notice and this permission notice (including the
0016  * next paragraph) shall be included in all copies or substantial portions
0017  * of the Software.
0018  *
0019  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0020  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0021  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0022  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0023  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0024  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0025  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0026  *
0027  **************************************************************************/
0028 /*
0029  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
0030  */
0031 
0032 #define pr_fmt(fmt) "[TTM] " fmt
0033 
0034 #include <drm/ttm/ttm_bo_driver.h>
0035 #include <drm/ttm/ttm_placement.h>
0036 #include <drm/drm_vma_manager.h>
0037 #include <drm/drm_drv.h>
0038 #include <drm/drm_managed.h>
0039 #include <linux/mm.h>
0040 #include <linux/pfn_t.h>
0041 #include <linux/rbtree.h>
0042 #include <linux/module.h>
0043 #include <linux/uaccess.h>
0044 #include <linux/mem_encrypt.h>
0045 
0046 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
0047                 struct vm_fault *vmf)
0048 {
0049     long err = 0;
0050 
0051     /*
0052      * Quick non-stalling check for idle.
0053      */
0054     if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
0055         return 0;
0056 
0057     /*
0058      * If possible, avoid waiting for GPU with mmap_lock
0059      * held.  We only do this if the fault allows retry and this
0060      * is the first attempt.
0061      */
0062     if (fault_flag_allow_retry_first(vmf->flags)) {
0063         if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
0064             return VM_FAULT_RETRY;
0065 
0066         ttm_bo_get(bo);
0067         mmap_read_unlock(vmf->vma->vm_mm);
0068         (void)dma_resv_wait_timeout(bo->base.resv,
0069                         DMA_RESV_USAGE_KERNEL, true,
0070                         MAX_SCHEDULE_TIMEOUT);
0071         dma_resv_unlock(bo->base.resv);
0072         ttm_bo_put(bo);
0073         return VM_FAULT_RETRY;
0074     }
0075 
0076     /*
0077      * Ordinary wait.
0078      */
0079     err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
0080                     MAX_SCHEDULE_TIMEOUT);
0081     if (unlikely(err < 0)) {
0082         return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
0083             VM_FAULT_NOPAGE;
0084     }
0085 
0086     return 0;
0087 }
0088 
0089 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
0090                        unsigned long page_offset)
0091 {
0092     struct ttm_device *bdev = bo->bdev;
0093 
0094     if (bdev->funcs->io_mem_pfn)
0095         return bdev->funcs->io_mem_pfn(bo, page_offset);
0096 
0097     return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
0098 }
0099 
0100 /**
0101  * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
0102  * @bo: The buffer object
0103  * @vmf: The fault structure handed to the callback
0104  *
0105  * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
0106  * during long waits, and after the wait the callback will be restarted. This
0107  * is to allow other threads using the same virtual memory space concurrent
0108  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
0109  * object reservations sometimes wait for GPU and should therefore be
0110  * considered long waits. This function reserves the buffer object interruptibly
0111  * taking this into account. Starvation is avoided by the vm system not
0112  * allowing too many repeated restarts.
0113  * This function is intended to be used in customized fault() and _mkwrite()
0114  * handlers.
0115  *
0116  * Return:
0117  *    0 on success and the bo was reserved.
0118  *    VM_FAULT_RETRY if blocking wait.
0119  *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
0120  */
0121 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
0122                  struct vm_fault *vmf)
0123 {
0124     /*
0125      * Work around locking order reversal in fault / nopfn
0126      * between mmap_lock and bo_reserve: Perform a trylock operation
0127      * for reserve, and if it fails, retry the fault after waiting
0128      * for the buffer to become unreserved.
0129      */
0130     if (unlikely(!dma_resv_trylock(bo->base.resv))) {
0131         /*
0132          * If the fault allows retry and this is the first
0133          * fault attempt, we try to release the mmap_lock
0134          * before waiting
0135          */
0136         if (fault_flag_allow_retry_first(vmf->flags)) {
0137             if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
0138                 ttm_bo_get(bo);
0139                 mmap_read_unlock(vmf->vma->vm_mm);
0140                 if (!dma_resv_lock_interruptible(bo->base.resv,
0141                                  NULL))
0142                     dma_resv_unlock(bo->base.resv);
0143                 ttm_bo_put(bo);
0144             }
0145 
0146             return VM_FAULT_RETRY;
0147         }
0148 
0149         if (dma_resv_lock_interruptible(bo->base.resv, NULL))
0150             return VM_FAULT_NOPAGE;
0151     }
0152 
0153     /*
0154      * Refuse to fault imported pages. This should be handled
0155      * (if at all) by redirecting mmap to the exporter.
0156      */
0157     if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
0158         if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
0159             dma_resv_unlock(bo->base.resv);
0160             return VM_FAULT_SIGBUS;
0161         }
0162     }
0163 
0164     return 0;
0165 }
0166 EXPORT_SYMBOL(ttm_bo_vm_reserve);
0167 
0168 /**
0169  * ttm_bo_vm_fault_reserved - TTM fault helper
0170  * @vmf: The struct vm_fault given as argument to the fault callback
0171  * @prot: The page protection to be used for this memory area.
0172  * @num_prefault: Maximum number of prefault pages. The caller may want to
0173  * specify this based on madvice settings and the size of the GPU object
0174  * backed by the memory.
0175  *
0176  * This function inserts one or more page table entries pointing to the
0177  * memory backing the buffer object, and then returns a return code
0178  * instructing the caller to retry the page access.
0179  *
0180  * Return:
0181  *   VM_FAULT_NOPAGE on success or pending signal
0182  *   VM_FAULT_SIGBUS on unspecified error
0183  *   VM_FAULT_OOM on out-of-memory
0184  *   VM_FAULT_RETRY if retryable wait
0185  */
0186 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
0187                     pgprot_t prot,
0188                     pgoff_t num_prefault)
0189 {
0190     struct vm_area_struct *vma = vmf->vma;
0191     struct ttm_buffer_object *bo = vma->vm_private_data;
0192     struct ttm_device *bdev = bo->bdev;
0193     unsigned long page_offset;
0194     unsigned long page_last;
0195     unsigned long pfn;
0196     struct ttm_tt *ttm = NULL;
0197     struct page *page;
0198     int err;
0199     pgoff_t i;
0200     vm_fault_t ret = VM_FAULT_NOPAGE;
0201     unsigned long address = vmf->address;
0202 
0203     /*
0204      * Wait for buffer data in transit, due to a pipelined
0205      * move.
0206      */
0207     ret = ttm_bo_vm_fault_idle(bo, vmf);
0208     if (unlikely(ret != 0))
0209         return ret;
0210 
0211     err = ttm_mem_io_reserve(bdev, bo->resource);
0212     if (unlikely(err != 0))
0213         return VM_FAULT_SIGBUS;
0214 
0215     page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
0216         vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
0217     page_last = vma_pages(vma) + vma->vm_pgoff -
0218         drm_vma_node_start(&bo->base.vma_node);
0219 
0220     if (unlikely(page_offset >= bo->resource->num_pages))
0221         return VM_FAULT_SIGBUS;
0222 
0223     prot = ttm_io_prot(bo, bo->resource, prot);
0224     if (!bo->resource->bus.is_iomem) {
0225         struct ttm_operation_ctx ctx = {
0226             .interruptible = false,
0227             .no_wait_gpu = false,
0228             .force_alloc = true
0229         };
0230 
0231         ttm = bo->ttm;
0232         if (ttm_tt_populate(bdev, bo->ttm, &ctx))
0233             return VM_FAULT_OOM;
0234     } else {
0235         /* Iomem should not be marked encrypted */
0236         prot = pgprot_decrypted(prot);
0237     }
0238 
0239     /*
0240      * Speculatively prefault a number of pages. Only error on
0241      * first page.
0242      */
0243     for (i = 0; i < num_prefault; ++i) {
0244         if (bo->resource->bus.is_iomem) {
0245             pfn = ttm_bo_io_mem_pfn(bo, page_offset);
0246         } else {
0247             page = ttm->pages[page_offset];
0248             if (unlikely(!page && i == 0)) {
0249                 return VM_FAULT_OOM;
0250             } else if (unlikely(!page)) {
0251                 break;
0252             }
0253             pfn = page_to_pfn(page);
0254         }
0255 
0256         /*
0257          * Note that the value of @prot at this point may differ from
0258          * the value of @vma->vm_page_prot in the caching- and
0259          * encryption bits. This is because the exact location of the
0260          * data may not be known at mmap() time and may also change
0261          * at arbitrary times while the data is mmap'ed.
0262          * See vmf_insert_mixed_prot() for a discussion.
0263          */
0264         ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
0265 
0266         /* Never error on prefaulted PTEs */
0267         if (unlikely((ret & VM_FAULT_ERROR))) {
0268             if (i == 0)
0269                 return VM_FAULT_NOPAGE;
0270             else
0271                 break;
0272         }
0273 
0274         address += PAGE_SIZE;
0275         if (unlikely(++page_offset >= page_last))
0276             break;
0277     }
0278     return ret;
0279 }
0280 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
0281 
0282 static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
0283 {
0284     struct page *dummy_page = (struct page *)res;
0285 
0286     __free_page(dummy_page);
0287 }
0288 
0289 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
0290 {
0291     struct vm_area_struct *vma = vmf->vma;
0292     struct ttm_buffer_object *bo = vma->vm_private_data;
0293     struct drm_device *ddev = bo->base.dev;
0294     vm_fault_t ret = VM_FAULT_NOPAGE;
0295     unsigned long address;
0296     unsigned long pfn;
0297     struct page *page;
0298 
0299     /* Allocate new dummy page to map all the VA range in this VMA to it*/
0300     page = alloc_page(GFP_KERNEL | __GFP_ZERO);
0301     if (!page)
0302         return VM_FAULT_OOM;
0303 
0304     /* Set the page to be freed using drmm release action */
0305     if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
0306         return VM_FAULT_OOM;
0307 
0308     pfn = page_to_pfn(page);
0309 
0310     /* Prefault the entire VMA range right away to avoid further faults */
0311     for (address = vma->vm_start; address < vma->vm_end;
0312          address += PAGE_SIZE)
0313         ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
0314 
0315     return ret;
0316 }
0317 EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
0318 
0319 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
0320 {
0321     struct vm_area_struct *vma = vmf->vma;
0322     pgprot_t prot;
0323     struct ttm_buffer_object *bo = vma->vm_private_data;
0324     struct drm_device *ddev = bo->base.dev;
0325     vm_fault_t ret;
0326     int idx;
0327 
0328     ret = ttm_bo_vm_reserve(bo, vmf);
0329     if (ret)
0330         return ret;
0331 
0332     prot = vma->vm_page_prot;
0333     if (drm_dev_enter(ddev, &idx)) {
0334         ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
0335         drm_dev_exit(idx);
0336     } else {
0337         ret = ttm_bo_vm_dummy_page(vmf, prot);
0338     }
0339     if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
0340         return ret;
0341 
0342     dma_resv_unlock(bo->base.resv);
0343 
0344     return ret;
0345 }
0346 EXPORT_SYMBOL(ttm_bo_vm_fault);
0347 
0348 void ttm_bo_vm_open(struct vm_area_struct *vma)
0349 {
0350     struct ttm_buffer_object *bo = vma->vm_private_data;
0351 
0352     WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
0353 
0354     ttm_bo_get(bo);
0355 }
0356 EXPORT_SYMBOL(ttm_bo_vm_open);
0357 
0358 void ttm_bo_vm_close(struct vm_area_struct *vma)
0359 {
0360     struct ttm_buffer_object *bo = vma->vm_private_data;
0361 
0362     ttm_bo_put(bo);
0363     vma->vm_private_data = NULL;
0364 }
0365 EXPORT_SYMBOL(ttm_bo_vm_close);
0366 
0367 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
0368                  unsigned long offset,
0369                  uint8_t *buf, int len, int write)
0370 {
0371     unsigned long page = offset >> PAGE_SHIFT;
0372     unsigned long bytes_left = len;
0373     int ret;
0374 
0375     /* Copy a page at a time, that way no extra virtual address
0376      * mapping is needed
0377      */
0378     offset -= page << PAGE_SHIFT;
0379     do {
0380         unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
0381         struct ttm_bo_kmap_obj map;
0382         void *ptr;
0383         bool is_iomem;
0384 
0385         ret = ttm_bo_kmap(bo, page, 1, &map);
0386         if (ret)
0387             return ret;
0388 
0389         ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
0390         WARN_ON_ONCE(is_iomem);
0391         if (write)
0392             memcpy(ptr, buf, bytes);
0393         else
0394             memcpy(buf, ptr, bytes);
0395         ttm_bo_kunmap(&map);
0396 
0397         page++;
0398         buf += bytes;
0399         bytes_left -= bytes;
0400         offset = 0;
0401     } while (bytes_left);
0402 
0403     return len;
0404 }
0405 
0406 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
0407              void *buf, int len, int write)
0408 {
0409     struct ttm_buffer_object *bo = vma->vm_private_data;
0410     unsigned long offset = (addr) - vma->vm_start +
0411         ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
0412          << PAGE_SHIFT);
0413     int ret;
0414 
0415     if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
0416         return -EIO;
0417 
0418     ret = ttm_bo_reserve(bo, true, false, NULL);
0419     if (ret)
0420         return ret;
0421 
0422     switch (bo->resource->mem_type) {
0423     case TTM_PL_SYSTEM:
0424         fallthrough;
0425     case TTM_PL_TT:
0426         ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
0427         break;
0428     default:
0429         if (bo->bdev->funcs->access_memory)
0430             ret = bo->bdev->funcs->access_memory(
0431                 bo, offset, buf, len, write);
0432         else
0433             ret = -EIO;
0434     }
0435 
0436     ttm_bo_unreserve(bo);
0437 
0438     return ret;
0439 }
0440 EXPORT_SYMBOL(ttm_bo_vm_access);
0441 
0442 static const struct vm_operations_struct ttm_bo_vm_ops = {
0443     .fault = ttm_bo_vm_fault,
0444     .open = ttm_bo_vm_open,
0445     .close = ttm_bo_vm_close,
0446     .access = ttm_bo_vm_access,
0447 };
0448 
0449 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
0450 {
0451     /* Enforce no COW since would have really strange behavior with it. */
0452     if (is_cow_mapping(vma->vm_flags))
0453         return -EINVAL;
0454 
0455     ttm_bo_get(bo);
0456 
0457     /*
0458      * Drivers may want to override the vm_ops field. Otherwise we
0459      * use TTM's default callbacks.
0460      */
0461     if (!vma->vm_ops)
0462         vma->vm_ops = &ttm_bo_vm_ops;
0463 
0464     /*
0465      * Note: We're transferring the bo reference to
0466      * vma->vm_private_data here.
0467      */
0468 
0469     vma->vm_private_data = bo;
0470 
0471     vma->vm_flags |= VM_PFNMAP;
0472     vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
0473     return 0;
0474 }
0475 EXPORT_SYMBOL(ttm_bo_mmap_obj);