Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2013 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  */
0006 
0007 #include <linux/dma-map-ops.h>
0008 #include <linux/vmalloc.h>
0009 #include <linux/spinlock.h>
0010 #include <linux/shmem_fs.h>
0011 #include <linux/dma-buf.h>
0012 #include <linux/pfn_t.h>
0013 
0014 #include <drm/drm_prime.h>
0015 
0016 #include "msm_drv.h"
0017 #include "msm_fence.h"
0018 #include "msm_gem.h"
0019 #include "msm_gpu.h"
0020 #include "msm_mmu.h"
0021 
0022 static void update_inactive(struct msm_gem_object *msm_obj);
0023 
0024 static dma_addr_t physaddr(struct drm_gem_object *obj)
0025 {
0026     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0027     struct msm_drm_private *priv = obj->dev->dev_private;
0028     return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
0029             priv->vram.paddr;
0030 }
0031 
0032 static bool use_pages(struct drm_gem_object *obj)
0033 {
0034     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0035     return !msm_obj->vram_node;
0036 }
0037 
0038 /*
0039  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
0040  * API.  Really GPU cache is out of scope here (handled on cmdstream)
0041  * and all we need to do is invalidate newly allocated pages before
0042  * mapping to CPU as uncached/writecombine.
0043  *
0044  * On top of this, we have the added headache, that depending on
0045  * display generation, the display's iommu may be wired up to either
0046  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
0047  * that here we either have dma-direct or iommu ops.
0048  *
0049  * Let this be a cautionary tail of abstraction gone wrong.
0050  */
0051 
0052 static void sync_for_device(struct msm_gem_object *msm_obj)
0053 {
0054     struct device *dev = msm_obj->base.dev->dev;
0055 
0056     dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
0057 }
0058 
0059 static void sync_for_cpu(struct msm_gem_object *msm_obj)
0060 {
0061     struct device *dev = msm_obj->base.dev->dev;
0062 
0063     dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
0064 }
0065 
0066 /* allocate pages from VRAM carveout, used when no IOMMU: */
0067 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
0068 {
0069     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0070     struct msm_drm_private *priv = obj->dev->dev_private;
0071     dma_addr_t paddr;
0072     struct page **p;
0073     int ret, i;
0074 
0075     p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
0076     if (!p)
0077         return ERR_PTR(-ENOMEM);
0078 
0079     spin_lock(&priv->vram.lock);
0080     ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
0081     spin_unlock(&priv->vram.lock);
0082     if (ret) {
0083         kvfree(p);
0084         return ERR_PTR(ret);
0085     }
0086 
0087     paddr = physaddr(obj);
0088     for (i = 0; i < npages; i++) {
0089         p[i] = pfn_to_page(__phys_to_pfn(paddr));
0090         paddr += PAGE_SIZE;
0091     }
0092 
0093     return p;
0094 }
0095 
0096 static struct page **get_pages(struct drm_gem_object *obj)
0097 {
0098     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0099 
0100     GEM_WARN_ON(!msm_gem_is_locked(obj));
0101 
0102     if (!msm_obj->pages) {
0103         struct drm_device *dev = obj->dev;
0104         struct page **p;
0105         int npages = obj->size >> PAGE_SHIFT;
0106 
0107         if (use_pages(obj))
0108             p = drm_gem_get_pages(obj);
0109         else
0110             p = get_pages_vram(obj, npages);
0111 
0112         if (IS_ERR(p)) {
0113             DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
0114                     PTR_ERR(p));
0115             return p;
0116         }
0117 
0118         msm_obj->pages = p;
0119 
0120         msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
0121         if (IS_ERR(msm_obj->sgt)) {
0122             void *ptr = ERR_CAST(msm_obj->sgt);
0123 
0124             DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
0125             msm_obj->sgt = NULL;
0126             return ptr;
0127         }
0128 
0129         /* For non-cached buffers, ensure the new pages are clean
0130          * because display controller, GPU, etc. are not coherent:
0131          */
0132         if (msm_obj->flags & MSM_BO_WC)
0133             sync_for_device(msm_obj);
0134 
0135         update_inactive(msm_obj);
0136     }
0137 
0138     return msm_obj->pages;
0139 }
0140 
0141 static void put_pages_vram(struct drm_gem_object *obj)
0142 {
0143     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0144     struct msm_drm_private *priv = obj->dev->dev_private;
0145 
0146     spin_lock(&priv->vram.lock);
0147     drm_mm_remove_node(msm_obj->vram_node);
0148     spin_unlock(&priv->vram.lock);
0149 
0150     kvfree(msm_obj->pages);
0151 }
0152 
0153 static void put_pages(struct drm_gem_object *obj)
0154 {
0155     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0156 
0157     if (msm_obj->pages) {
0158         if (msm_obj->sgt) {
0159             /* For non-cached buffers, ensure the new
0160              * pages are clean because display controller,
0161              * GPU, etc. are not coherent:
0162              */
0163             if (msm_obj->flags & MSM_BO_WC)
0164                 sync_for_cpu(msm_obj);
0165 
0166             sg_free_table(msm_obj->sgt);
0167             kfree(msm_obj->sgt);
0168             msm_obj->sgt = NULL;
0169         }
0170 
0171         if (use_pages(obj))
0172             drm_gem_put_pages(obj, msm_obj->pages, true, false);
0173         else
0174             put_pages_vram(obj);
0175 
0176         msm_obj->pages = NULL;
0177     }
0178 }
0179 
0180 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
0181 {
0182     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0183     struct page **p;
0184 
0185     msm_gem_lock(obj);
0186 
0187     if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
0188         msm_gem_unlock(obj);
0189         return ERR_PTR(-EBUSY);
0190     }
0191 
0192     p = get_pages(obj);
0193 
0194     if (!IS_ERR(p)) {
0195         msm_obj->pin_count++;
0196         update_inactive(msm_obj);
0197     }
0198 
0199     msm_gem_unlock(obj);
0200     return p;
0201 }
0202 
0203 void msm_gem_put_pages(struct drm_gem_object *obj)
0204 {
0205     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0206 
0207     msm_gem_lock(obj);
0208     msm_obj->pin_count--;
0209     GEM_WARN_ON(msm_obj->pin_count < 0);
0210     update_inactive(msm_obj);
0211     msm_gem_unlock(obj);
0212 }
0213 
0214 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
0215 {
0216     if (msm_obj->flags & MSM_BO_WC)
0217         return pgprot_writecombine(prot);
0218     return prot;
0219 }
0220 
0221 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
0222 {
0223     struct vm_area_struct *vma = vmf->vma;
0224     struct drm_gem_object *obj = vma->vm_private_data;
0225     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0226     struct page **pages;
0227     unsigned long pfn;
0228     pgoff_t pgoff;
0229     int err;
0230     vm_fault_t ret;
0231 
0232     /*
0233      * vm_ops.open/drm_gem_mmap_obj and close get and put
0234      * a reference on obj. So, we dont need to hold one here.
0235      */
0236     err = msm_gem_lock_interruptible(obj);
0237     if (err) {
0238         ret = VM_FAULT_NOPAGE;
0239         goto out;
0240     }
0241 
0242     if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
0243         msm_gem_unlock(obj);
0244         return VM_FAULT_SIGBUS;
0245     }
0246 
0247     /* make sure we have pages attached now */
0248     pages = get_pages(obj);
0249     if (IS_ERR(pages)) {
0250         ret = vmf_error(PTR_ERR(pages));
0251         goto out_unlock;
0252     }
0253 
0254     /* We don't use vmf->pgoff since that has the fake offset: */
0255     pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0256 
0257     pfn = page_to_pfn(pages[pgoff]);
0258 
0259     VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
0260             pfn, pfn << PAGE_SHIFT);
0261 
0262     ret = vmf_insert_pfn(vma, vmf->address, pfn);
0263 
0264 out_unlock:
0265     msm_gem_unlock(obj);
0266 out:
0267     return ret;
0268 }
0269 
0270 /** get mmap offset */
0271 static uint64_t mmap_offset(struct drm_gem_object *obj)
0272 {
0273     struct drm_device *dev = obj->dev;
0274     int ret;
0275 
0276     GEM_WARN_ON(!msm_gem_is_locked(obj));
0277 
0278     /* Make it mmapable */
0279     ret = drm_gem_create_mmap_offset(obj);
0280 
0281     if (ret) {
0282         DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
0283         return 0;
0284     }
0285 
0286     return drm_vma_node_offset_addr(&obj->vma_node);
0287 }
0288 
0289 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
0290 {
0291     uint64_t offset;
0292 
0293     msm_gem_lock(obj);
0294     offset = mmap_offset(obj);
0295     msm_gem_unlock(obj);
0296     return offset;
0297 }
0298 
0299 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
0300         struct msm_gem_address_space *aspace)
0301 {
0302     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0303     struct msm_gem_vma *vma;
0304 
0305     GEM_WARN_ON(!msm_gem_is_locked(obj));
0306 
0307     vma = kzalloc(sizeof(*vma), GFP_KERNEL);
0308     if (!vma)
0309         return ERR_PTR(-ENOMEM);
0310 
0311     vma->aspace = aspace;
0312 
0313     list_add_tail(&vma->list, &msm_obj->vmas);
0314 
0315     return vma;
0316 }
0317 
0318 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
0319         struct msm_gem_address_space *aspace)
0320 {
0321     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0322     struct msm_gem_vma *vma;
0323 
0324     GEM_WARN_ON(!msm_gem_is_locked(obj));
0325 
0326     list_for_each_entry(vma, &msm_obj->vmas, list) {
0327         if (vma->aspace == aspace)
0328             return vma;
0329     }
0330 
0331     return NULL;
0332 }
0333 
0334 static void del_vma(struct msm_gem_vma *vma)
0335 {
0336     if (!vma)
0337         return;
0338 
0339     list_del(&vma->list);
0340     kfree(vma);
0341 }
0342 
0343 /*
0344  * If close is true, this also closes the VMA (releasing the allocated
0345  * iova range) in addition to removing the iommu mapping.  In the eviction
0346  * case (!close), we keep the iova allocated, but only remove the iommu
0347  * mapping.
0348  */
0349 static void
0350 put_iova_spaces(struct drm_gem_object *obj, bool close)
0351 {
0352     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0353     struct msm_gem_vma *vma;
0354 
0355     GEM_WARN_ON(!msm_gem_is_locked(obj));
0356 
0357     list_for_each_entry(vma, &msm_obj->vmas, list) {
0358         if (vma->aspace) {
0359             msm_gem_purge_vma(vma->aspace, vma);
0360             if (close)
0361                 msm_gem_close_vma(vma->aspace, vma);
0362         }
0363     }
0364 }
0365 
0366 /* Called with msm_obj locked */
0367 static void
0368 put_iova_vmas(struct drm_gem_object *obj)
0369 {
0370     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0371     struct msm_gem_vma *vma, *tmp;
0372 
0373     GEM_WARN_ON(!msm_gem_is_locked(obj));
0374 
0375     list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
0376         del_vma(vma);
0377     }
0378 }
0379 
0380 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
0381         struct msm_gem_address_space *aspace,
0382         u64 range_start, u64 range_end)
0383 {
0384     struct msm_gem_vma *vma;
0385 
0386     GEM_WARN_ON(!msm_gem_is_locked(obj));
0387 
0388     vma = lookup_vma(obj, aspace);
0389 
0390     if (!vma) {
0391         int ret;
0392 
0393         vma = add_vma(obj, aspace);
0394         if (IS_ERR(vma))
0395             return vma;
0396 
0397         ret = msm_gem_init_vma(aspace, vma, obj->size,
0398             range_start, range_end);
0399         if (ret) {
0400             del_vma(vma);
0401             return ERR_PTR(ret);
0402         }
0403     } else {
0404         GEM_WARN_ON(vma->iova < range_start);
0405         GEM_WARN_ON((vma->iova + obj->size) > range_end);
0406     }
0407 
0408     return vma;
0409 }
0410 
0411 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
0412 {
0413     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0414     struct page **pages;
0415     int ret, prot = IOMMU_READ;
0416 
0417     if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
0418         prot |= IOMMU_WRITE;
0419 
0420     if (msm_obj->flags & MSM_BO_MAP_PRIV)
0421         prot |= IOMMU_PRIV;
0422 
0423     if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
0424         prot |= IOMMU_CACHE;
0425 
0426     GEM_WARN_ON(!msm_gem_is_locked(obj));
0427 
0428     if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
0429         return -EBUSY;
0430 
0431     pages = get_pages(obj);
0432     if (IS_ERR(pages))
0433         return PTR_ERR(pages);
0434 
0435     ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
0436 
0437     if (!ret)
0438         msm_obj->pin_count++;
0439 
0440     return ret;
0441 }
0442 
0443 void msm_gem_unpin_locked(struct drm_gem_object *obj)
0444 {
0445     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0446 
0447     GEM_WARN_ON(!msm_gem_is_locked(obj));
0448 
0449     msm_obj->pin_count--;
0450     GEM_WARN_ON(msm_obj->pin_count < 0);
0451 
0452     update_inactive(msm_obj);
0453 }
0454 
0455 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
0456                        struct msm_gem_address_space *aspace)
0457 {
0458     return get_vma_locked(obj, aspace, 0, U64_MAX);
0459 }
0460 
0461 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
0462         struct msm_gem_address_space *aspace, uint64_t *iova,
0463         u64 range_start, u64 range_end)
0464 {
0465     struct msm_gem_vma *vma;
0466     int ret;
0467 
0468     GEM_WARN_ON(!msm_gem_is_locked(obj));
0469 
0470     vma = get_vma_locked(obj, aspace, range_start, range_end);
0471     if (IS_ERR(vma))
0472         return PTR_ERR(vma);
0473 
0474     ret = msm_gem_pin_vma_locked(obj, vma);
0475     if (!ret)
0476         *iova = vma->iova;
0477 
0478     return ret;
0479 }
0480 
0481 /*
0482  * get iova and pin it. Should have a matching put
0483  * limits iova to specified range (in pages)
0484  */
0485 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
0486         struct msm_gem_address_space *aspace, uint64_t *iova,
0487         u64 range_start, u64 range_end)
0488 {
0489     int ret;
0490 
0491     msm_gem_lock(obj);
0492     ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
0493     msm_gem_unlock(obj);
0494 
0495     return ret;
0496 }
0497 
0498 /* get iova and pin it. Should have a matching put */
0499 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
0500         struct msm_gem_address_space *aspace, uint64_t *iova)
0501 {
0502     return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
0503 }
0504 
0505 /*
0506  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
0507  * valid for the life of the object
0508  */
0509 int msm_gem_get_iova(struct drm_gem_object *obj,
0510         struct msm_gem_address_space *aspace, uint64_t *iova)
0511 {
0512     struct msm_gem_vma *vma;
0513     int ret = 0;
0514 
0515     msm_gem_lock(obj);
0516     vma = get_vma_locked(obj, aspace, 0, U64_MAX);
0517     if (IS_ERR(vma)) {
0518         ret = PTR_ERR(vma);
0519     } else {
0520         *iova = vma->iova;
0521     }
0522     msm_gem_unlock(obj);
0523 
0524     return ret;
0525 }
0526 
0527 static int clear_iova(struct drm_gem_object *obj,
0528               struct msm_gem_address_space *aspace)
0529 {
0530     struct msm_gem_vma *vma = lookup_vma(obj, aspace);
0531 
0532     if (!vma)
0533         return 0;
0534 
0535     if (msm_gem_vma_inuse(vma))
0536         return -EBUSY;
0537 
0538     msm_gem_purge_vma(vma->aspace, vma);
0539     msm_gem_close_vma(vma->aspace, vma);
0540     del_vma(vma);
0541 
0542     return 0;
0543 }
0544 
0545 /*
0546  * Get the requested iova but don't pin it.  Fails if the requested iova is
0547  * not available.  Doesn't need a put because iovas are currently valid for
0548  * the life of the object.
0549  *
0550  * Setting an iova of zero will clear the vma.
0551  */
0552 int msm_gem_set_iova(struct drm_gem_object *obj,
0553              struct msm_gem_address_space *aspace, uint64_t iova)
0554 {
0555     int ret = 0;
0556 
0557     msm_gem_lock(obj);
0558     if (!iova) {
0559         ret = clear_iova(obj, aspace);
0560     } else {
0561         struct msm_gem_vma *vma;
0562         vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
0563         if (IS_ERR(vma)) {
0564             ret = PTR_ERR(vma);
0565         } else if (GEM_WARN_ON(vma->iova != iova)) {
0566             clear_iova(obj, aspace);
0567             ret = -EBUSY;
0568         }
0569     }
0570     msm_gem_unlock(obj);
0571 
0572     return ret;
0573 }
0574 
0575 /*
0576  * Unpin a iova by updating the reference counts. The memory isn't actually
0577  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
0578  * to get rid of it
0579  */
0580 void msm_gem_unpin_iova(struct drm_gem_object *obj,
0581         struct msm_gem_address_space *aspace)
0582 {
0583     struct msm_gem_vma *vma;
0584 
0585     msm_gem_lock(obj);
0586     vma = lookup_vma(obj, aspace);
0587     if (!GEM_WARN_ON(!vma)) {
0588         msm_gem_unpin_vma(vma);
0589         msm_gem_unpin_locked(obj);
0590     }
0591     msm_gem_unlock(obj);
0592 }
0593 
0594 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
0595         struct drm_mode_create_dumb *args)
0596 {
0597     args->pitch = align_pitch(args->width, args->bpp);
0598     args->size  = PAGE_ALIGN(args->pitch * args->height);
0599     return msm_gem_new_handle(dev, file, args->size,
0600             MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
0601 }
0602 
0603 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
0604         uint32_t handle, uint64_t *offset)
0605 {
0606     struct drm_gem_object *obj;
0607     int ret = 0;
0608 
0609     /* GEM does all our handle to object mapping */
0610     obj = drm_gem_object_lookup(file, handle);
0611     if (obj == NULL) {
0612         ret = -ENOENT;
0613         goto fail;
0614     }
0615 
0616     *offset = msm_gem_mmap_offset(obj);
0617 
0618     drm_gem_object_put(obj);
0619 
0620 fail:
0621     return ret;
0622 }
0623 
0624 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
0625 {
0626     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0627     int ret = 0;
0628 
0629     GEM_WARN_ON(!msm_gem_is_locked(obj));
0630 
0631     if (obj->import_attach)
0632         return ERR_PTR(-ENODEV);
0633 
0634     if (GEM_WARN_ON(msm_obj->madv > madv)) {
0635         DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
0636             msm_obj->madv, madv);
0637         return ERR_PTR(-EBUSY);
0638     }
0639 
0640     /* increment vmap_count *before* vmap() call, so shrinker can
0641      * check vmap_count (is_vunmapable()) outside of msm_obj lock.
0642      * This guarantees that we won't try to msm_gem_vunmap() this
0643      * same object from within the vmap() call (while we already
0644      * hold msm_obj lock)
0645      */
0646     msm_obj->vmap_count++;
0647 
0648     if (!msm_obj->vaddr) {
0649         struct page **pages = get_pages(obj);
0650         if (IS_ERR(pages)) {
0651             ret = PTR_ERR(pages);
0652             goto fail;
0653         }
0654         msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
0655                 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
0656         if (msm_obj->vaddr == NULL) {
0657             ret = -ENOMEM;
0658             goto fail;
0659         }
0660 
0661         update_inactive(msm_obj);
0662     }
0663 
0664     return msm_obj->vaddr;
0665 
0666 fail:
0667     msm_obj->vmap_count--;
0668     return ERR_PTR(ret);
0669 }
0670 
0671 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
0672 {
0673     return get_vaddr(obj, MSM_MADV_WILLNEED);
0674 }
0675 
0676 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
0677 {
0678     void *ret;
0679 
0680     msm_gem_lock(obj);
0681     ret = msm_gem_get_vaddr_locked(obj);
0682     msm_gem_unlock(obj);
0683 
0684     return ret;
0685 }
0686 
0687 /*
0688  * Don't use this!  It is for the very special case of dumping
0689  * submits from GPU hangs or faults, were the bo may already
0690  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
0691  * active list.
0692  */
0693 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
0694 {
0695     return get_vaddr(obj, __MSM_MADV_PURGED);
0696 }
0697 
0698 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
0699 {
0700     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0701 
0702     GEM_WARN_ON(!msm_gem_is_locked(obj));
0703     GEM_WARN_ON(msm_obj->vmap_count < 1);
0704 
0705     msm_obj->vmap_count--;
0706 }
0707 
0708 void msm_gem_put_vaddr(struct drm_gem_object *obj)
0709 {
0710     msm_gem_lock(obj);
0711     msm_gem_put_vaddr_locked(obj);
0712     msm_gem_unlock(obj);
0713 }
0714 
0715 /* Update madvise status, returns true if not purged, else
0716  * false or -errno.
0717  */
0718 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
0719 {
0720     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0721 
0722     msm_gem_lock(obj);
0723 
0724     if (msm_obj->madv != __MSM_MADV_PURGED)
0725         msm_obj->madv = madv;
0726 
0727     madv = msm_obj->madv;
0728 
0729     /* If the obj is inactive, we might need to move it
0730      * between inactive lists
0731      */
0732     if (msm_obj->active_count == 0)
0733         update_inactive(msm_obj);
0734 
0735     msm_gem_unlock(obj);
0736 
0737     return (madv != __MSM_MADV_PURGED);
0738 }
0739 
0740 void msm_gem_purge(struct drm_gem_object *obj)
0741 {
0742     struct drm_device *dev = obj->dev;
0743     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0744 
0745     GEM_WARN_ON(!msm_gem_is_locked(obj));
0746     GEM_WARN_ON(!is_purgeable(msm_obj));
0747 
0748     /* Get rid of any iommu mapping(s): */
0749     put_iova_spaces(obj, true);
0750 
0751     msm_gem_vunmap(obj);
0752 
0753     drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
0754 
0755     put_pages(obj);
0756 
0757     put_iova_vmas(obj);
0758 
0759     msm_obj->madv = __MSM_MADV_PURGED;
0760     update_inactive(msm_obj);
0761 
0762     drm_gem_free_mmap_offset(obj);
0763 
0764     /* Our goal here is to return as much of the memory as
0765      * is possible back to the system as we are called from OOM.
0766      * To do this we must instruct the shmfs to drop all of its
0767      * backing pages, *now*.
0768      */
0769     shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
0770 
0771     invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0772             0, (loff_t)-1);
0773 }
0774 
0775 /*
0776  * Unpin the backing pages and make them available to be swapped out.
0777  */
0778 void msm_gem_evict(struct drm_gem_object *obj)
0779 {
0780     struct drm_device *dev = obj->dev;
0781     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0782 
0783     GEM_WARN_ON(!msm_gem_is_locked(obj));
0784     GEM_WARN_ON(is_unevictable(msm_obj));
0785     GEM_WARN_ON(!msm_obj->evictable);
0786     GEM_WARN_ON(msm_obj->active_count);
0787 
0788     /* Get rid of any iommu mapping(s): */
0789     put_iova_spaces(obj, false);
0790 
0791     drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
0792 
0793     put_pages(obj);
0794 
0795     update_inactive(msm_obj);
0796 }
0797 
0798 void msm_gem_vunmap(struct drm_gem_object *obj)
0799 {
0800     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0801 
0802     GEM_WARN_ON(!msm_gem_is_locked(obj));
0803 
0804     if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
0805         return;
0806 
0807     vunmap(msm_obj->vaddr);
0808     msm_obj->vaddr = NULL;
0809 }
0810 
0811 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
0812 {
0813     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0814     struct msm_drm_private *priv = obj->dev->dev_private;
0815 
0816     might_sleep();
0817     GEM_WARN_ON(!msm_gem_is_locked(obj));
0818     GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
0819     GEM_WARN_ON(msm_obj->dontneed);
0820 
0821     if (msm_obj->active_count++ == 0) {
0822         mutex_lock(&priv->mm_lock);
0823         if (msm_obj->evictable)
0824             mark_unevictable(msm_obj);
0825         list_move_tail(&msm_obj->mm_list, &gpu->active_list);
0826         mutex_unlock(&priv->mm_lock);
0827     }
0828 }
0829 
0830 void msm_gem_active_put(struct drm_gem_object *obj)
0831 {
0832     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0833 
0834     might_sleep();
0835     GEM_WARN_ON(!msm_gem_is_locked(obj));
0836 
0837     if (--msm_obj->active_count == 0) {
0838         update_inactive(msm_obj);
0839     }
0840 }
0841 
0842 static void update_inactive(struct msm_gem_object *msm_obj)
0843 {
0844     struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
0845 
0846     GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
0847 
0848     if (msm_obj->active_count != 0)
0849         return;
0850 
0851     mutex_lock(&priv->mm_lock);
0852 
0853     if (msm_obj->dontneed)
0854         mark_unpurgeable(msm_obj);
0855     if (msm_obj->evictable)
0856         mark_unevictable(msm_obj);
0857 
0858     list_del(&msm_obj->mm_list);
0859     if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
0860         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
0861         mark_evictable(msm_obj);
0862     } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
0863         list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
0864         mark_purgeable(msm_obj);
0865     } else {
0866         GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
0867         list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
0868     }
0869 
0870     mutex_unlock(&priv->mm_lock);
0871 }
0872 
0873 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
0874 {
0875     bool write = !!(op & MSM_PREP_WRITE);
0876     unsigned long remain =
0877         op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
0878     long ret;
0879 
0880     ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
0881                     true,  remain);
0882     if (ret == 0)
0883         return remain == 0 ? -EBUSY : -ETIMEDOUT;
0884     else if (ret < 0)
0885         return ret;
0886 
0887     /* TODO cache maintenance */
0888 
0889     return 0;
0890 }
0891 
0892 int msm_gem_cpu_fini(struct drm_gem_object *obj)
0893 {
0894     /* TODO cache maintenance */
0895     return 0;
0896 }
0897 
0898 #ifdef CONFIG_DEBUG_FS
0899 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
0900         struct msm_gem_stats *stats)
0901 {
0902     struct msm_gem_object *msm_obj = to_msm_bo(obj);
0903     struct dma_resv *robj = obj->resv;
0904     struct msm_gem_vma *vma;
0905     uint64_t off = drm_vma_node_start(&obj->vma_node);
0906     const char *madv;
0907 
0908     msm_gem_lock(obj);
0909 
0910     stats->all.count++;
0911     stats->all.size += obj->size;
0912 
0913     if (is_active(msm_obj)) {
0914         stats->active.count++;
0915         stats->active.size += obj->size;
0916     }
0917 
0918     if (msm_obj->pages) {
0919         stats->resident.count++;
0920         stats->resident.size += obj->size;
0921     }
0922 
0923     switch (msm_obj->madv) {
0924     case __MSM_MADV_PURGED:
0925         stats->purged.count++;
0926         stats->purged.size += obj->size;
0927         madv = " purged";
0928         break;
0929     case MSM_MADV_DONTNEED:
0930         stats->purgeable.count++;
0931         stats->purgeable.size += obj->size;
0932         madv = " purgeable";
0933         break;
0934     case MSM_MADV_WILLNEED:
0935     default:
0936         madv = "";
0937         break;
0938     }
0939 
0940     seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
0941             msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
0942             obj->name, kref_read(&obj->refcount),
0943             off, msm_obj->vaddr);
0944 
0945     seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
0946 
0947     if (!list_empty(&msm_obj->vmas)) {
0948 
0949         seq_puts(m, "      vmas:");
0950 
0951         list_for_each_entry(vma, &msm_obj->vmas, list) {
0952             const char *name, *comm;
0953             if (vma->aspace) {
0954                 struct msm_gem_address_space *aspace = vma->aspace;
0955                 struct task_struct *task =
0956                     get_pid_task(aspace->pid, PIDTYPE_PID);
0957                 if (task) {
0958                     comm = kstrdup(task->comm, GFP_KERNEL);
0959                     put_task_struct(task);
0960                 } else {
0961                     comm = NULL;
0962                 }
0963                 name = aspace->name;
0964             } else {
0965                 name = comm = NULL;
0966             }
0967             seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
0968                 name, comm ? ":" : "", comm ? comm : "",
0969                 vma->aspace, vma->iova,
0970                 vma->mapped ? "mapped" : "unmapped",
0971                 msm_gem_vma_inuse(vma));
0972             kfree(comm);
0973         }
0974 
0975         seq_puts(m, "\n");
0976     }
0977 
0978     dma_resv_describe(robj, m);
0979     msm_gem_unlock(obj);
0980 }
0981 
0982 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
0983 {
0984     struct msm_gem_stats stats = {};
0985     struct msm_gem_object *msm_obj;
0986 
0987     seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
0988     list_for_each_entry(msm_obj, list, node) {
0989         struct drm_gem_object *obj = &msm_obj->base;
0990         seq_puts(m, "   ");
0991         msm_gem_describe(obj, m, &stats);
0992     }
0993 
0994     seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
0995             stats.all.count, stats.all.size);
0996     seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
0997             stats.active.count, stats.active.size);
0998     seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
0999             stats.resident.count, stats.resident.size);
1000     seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1001             stats.purgeable.count, stats.purgeable.size);
1002     seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1003             stats.purged.count, stats.purged.size);
1004 }
1005 #endif
1006 
1007 /* don't call directly!  Use drm_gem_object_put() */
1008 static void msm_gem_free_object(struct drm_gem_object *obj)
1009 {
1010     struct msm_gem_object *msm_obj = to_msm_bo(obj);
1011     struct drm_device *dev = obj->dev;
1012     struct msm_drm_private *priv = dev->dev_private;
1013 
1014     mutex_lock(&priv->obj_lock);
1015     list_del(&msm_obj->node);
1016     mutex_unlock(&priv->obj_lock);
1017 
1018     mutex_lock(&priv->mm_lock);
1019     if (msm_obj->dontneed)
1020         mark_unpurgeable(msm_obj);
1021     list_del(&msm_obj->mm_list);
1022     mutex_unlock(&priv->mm_lock);
1023 
1024     /* object should not be on active list: */
1025     GEM_WARN_ON(is_active(msm_obj));
1026 
1027     put_iova_spaces(obj, true);
1028 
1029     if (obj->import_attach) {
1030         GEM_WARN_ON(msm_obj->vaddr);
1031 
1032         /* Don't drop the pages for imported dmabuf, as they are not
1033          * ours, just free the array we allocated:
1034          */
1035         kvfree(msm_obj->pages);
1036 
1037         put_iova_vmas(obj);
1038 
1039         drm_prime_gem_destroy(obj, msm_obj->sgt);
1040     } else {
1041         msm_gem_vunmap(obj);
1042         put_pages(obj);
1043         put_iova_vmas(obj);
1044     }
1045 
1046     drm_gem_object_release(obj);
1047 
1048     kfree(msm_obj);
1049 }
1050 
1051 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1052 {
1053     struct msm_gem_object *msm_obj = to_msm_bo(obj);
1054 
1055     vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1056     vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1057 
1058     return 0;
1059 }
1060 
1061 /* convenience method to construct a GEM buffer object, and userspace handle */
1062 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1063         uint32_t size, uint32_t flags, uint32_t *handle,
1064         char *name)
1065 {
1066     struct drm_gem_object *obj;
1067     int ret;
1068 
1069     obj = msm_gem_new(dev, size, flags);
1070 
1071     if (IS_ERR(obj))
1072         return PTR_ERR(obj);
1073 
1074     if (name)
1075         msm_gem_object_set_name(obj, "%s", name);
1076 
1077     ret = drm_gem_handle_create(file, obj, handle);
1078 
1079     /* drop reference from allocate - handle holds it now */
1080     drm_gem_object_put(obj);
1081 
1082     return ret;
1083 }
1084 
1085 static const struct vm_operations_struct vm_ops = {
1086     .fault = msm_gem_fault,
1087     .open = drm_gem_vm_open,
1088     .close = drm_gem_vm_close,
1089 };
1090 
1091 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1092     .free = msm_gem_free_object,
1093     .pin = msm_gem_prime_pin,
1094     .unpin = msm_gem_prime_unpin,
1095     .get_sg_table = msm_gem_prime_get_sg_table,
1096     .vmap = msm_gem_prime_vmap,
1097     .vunmap = msm_gem_prime_vunmap,
1098     .mmap = msm_gem_object_mmap,
1099     .vm_ops = &vm_ops,
1100 };
1101 
1102 static int msm_gem_new_impl(struct drm_device *dev,
1103         uint32_t size, uint32_t flags,
1104         struct drm_gem_object **obj)
1105 {
1106     struct msm_drm_private *priv = dev->dev_private;
1107     struct msm_gem_object *msm_obj;
1108 
1109     switch (flags & MSM_BO_CACHE_MASK) {
1110     case MSM_BO_CACHED:
1111     case MSM_BO_WC:
1112         break;
1113     case MSM_BO_CACHED_COHERENT:
1114         if (priv->has_cached_coherent)
1115             break;
1116         fallthrough;
1117     default:
1118         DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1119                 (flags & MSM_BO_CACHE_MASK));
1120         return -EINVAL;
1121     }
1122 
1123     msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1124     if (!msm_obj)
1125         return -ENOMEM;
1126 
1127     msm_obj->flags = flags;
1128     msm_obj->madv = MSM_MADV_WILLNEED;
1129 
1130     INIT_LIST_HEAD(&msm_obj->node);
1131     INIT_LIST_HEAD(&msm_obj->vmas);
1132 
1133     *obj = &msm_obj->base;
1134     (*obj)->funcs = &msm_gem_object_funcs;
1135 
1136     return 0;
1137 }
1138 
1139 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1140 {
1141     struct msm_drm_private *priv = dev->dev_private;
1142     struct msm_gem_object *msm_obj;
1143     struct drm_gem_object *obj = NULL;
1144     bool use_vram = false;
1145     int ret;
1146 
1147     size = PAGE_ALIGN(size);
1148 
1149     if (!msm_use_mmu(dev))
1150         use_vram = true;
1151     else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1152         use_vram = true;
1153 
1154     if (GEM_WARN_ON(use_vram && !priv->vram.size))
1155         return ERR_PTR(-EINVAL);
1156 
1157     /* Disallow zero sized objects as they make the underlying
1158      * infrastructure grumpy
1159      */
1160     if (size == 0)
1161         return ERR_PTR(-EINVAL);
1162 
1163     ret = msm_gem_new_impl(dev, size, flags, &obj);
1164     if (ret)
1165         return ERR_PTR(ret);
1166 
1167     msm_obj = to_msm_bo(obj);
1168 
1169     if (use_vram) {
1170         struct msm_gem_vma *vma;
1171         struct page **pages;
1172 
1173         drm_gem_private_object_init(dev, obj, size);
1174 
1175         msm_gem_lock(obj);
1176 
1177         vma = add_vma(obj, NULL);
1178         msm_gem_unlock(obj);
1179         if (IS_ERR(vma)) {
1180             ret = PTR_ERR(vma);
1181             goto fail;
1182         }
1183 
1184         to_msm_bo(obj)->vram_node = &vma->node;
1185 
1186         /* Call chain get_pages() -> update_inactive() tries to
1187          * access msm_obj->mm_list, but it is not initialized yet.
1188          * To avoid NULL pointer dereference error, initialize
1189          * mm_list to be empty.
1190          */
1191         INIT_LIST_HEAD(&msm_obj->mm_list);
1192 
1193         msm_gem_lock(obj);
1194         pages = get_pages(obj);
1195         msm_gem_unlock(obj);
1196         if (IS_ERR(pages)) {
1197             ret = PTR_ERR(pages);
1198             goto fail;
1199         }
1200 
1201         vma->iova = physaddr(obj);
1202     } else {
1203         ret = drm_gem_object_init(dev, obj, size);
1204         if (ret)
1205             goto fail;
1206         /*
1207          * Our buffers are kept pinned, so allocating them from the
1208          * MOVABLE zone is a really bad idea, and conflicts with CMA.
1209          * See comments above new_inode() why this is required _and_
1210          * expected if you're going to pin these pages.
1211          */
1212         mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1213     }
1214 
1215     mutex_lock(&priv->mm_lock);
1216     list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1217     mutex_unlock(&priv->mm_lock);
1218 
1219     mutex_lock(&priv->obj_lock);
1220     list_add_tail(&msm_obj->node, &priv->objects);
1221     mutex_unlock(&priv->obj_lock);
1222 
1223     return obj;
1224 
1225 fail:
1226     drm_gem_object_put(obj);
1227     return ERR_PTR(ret);
1228 }
1229 
1230 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1231         struct dma_buf *dmabuf, struct sg_table *sgt)
1232 {
1233     struct msm_drm_private *priv = dev->dev_private;
1234     struct msm_gem_object *msm_obj;
1235     struct drm_gem_object *obj;
1236     uint32_t size;
1237     int ret, npages;
1238 
1239     /* if we don't have IOMMU, don't bother pretending we can import: */
1240     if (!msm_use_mmu(dev)) {
1241         DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1242         return ERR_PTR(-EINVAL);
1243     }
1244 
1245     size = PAGE_ALIGN(dmabuf->size);
1246 
1247     ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1248     if (ret)
1249         return ERR_PTR(ret);
1250 
1251     drm_gem_private_object_init(dev, obj, size);
1252 
1253     npages = size / PAGE_SIZE;
1254 
1255     msm_obj = to_msm_bo(obj);
1256     msm_gem_lock(obj);
1257     msm_obj->sgt = sgt;
1258     msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1259     if (!msm_obj->pages) {
1260         msm_gem_unlock(obj);
1261         ret = -ENOMEM;
1262         goto fail;
1263     }
1264 
1265     ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1266     if (ret) {
1267         msm_gem_unlock(obj);
1268         goto fail;
1269     }
1270 
1271     msm_gem_unlock(obj);
1272 
1273     mutex_lock(&priv->mm_lock);
1274     list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1275     mutex_unlock(&priv->mm_lock);
1276 
1277     mutex_lock(&priv->obj_lock);
1278     list_add_tail(&msm_obj->node, &priv->objects);
1279     mutex_unlock(&priv->obj_lock);
1280 
1281     return obj;
1282 
1283 fail:
1284     drm_gem_object_put(obj);
1285     return ERR_PTR(ret);
1286 }
1287 
1288 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1289         uint32_t flags, struct msm_gem_address_space *aspace,
1290         struct drm_gem_object **bo, uint64_t *iova)
1291 {
1292     void *vaddr;
1293     struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1294     int ret;
1295 
1296     if (IS_ERR(obj))
1297         return ERR_CAST(obj);
1298 
1299     if (iova) {
1300         ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1301         if (ret)
1302             goto err;
1303     }
1304 
1305     vaddr = msm_gem_get_vaddr(obj);
1306     if (IS_ERR(vaddr)) {
1307         msm_gem_unpin_iova(obj, aspace);
1308         ret = PTR_ERR(vaddr);
1309         goto err;
1310     }
1311 
1312     if (bo)
1313         *bo = obj;
1314 
1315     return vaddr;
1316 err:
1317     drm_gem_object_put(obj);
1318 
1319     return ERR_PTR(ret);
1320 
1321 }
1322 
1323 void msm_gem_kernel_put(struct drm_gem_object *bo,
1324         struct msm_gem_address_space *aspace)
1325 {
1326     if (IS_ERR_OR_NULL(bo))
1327         return;
1328 
1329     msm_gem_put_vaddr(bo);
1330     msm_gem_unpin_iova(bo, aspace);
1331     drm_gem_object_put(bo);
1332 }
1333 
1334 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1335 {
1336     struct msm_gem_object *msm_obj = to_msm_bo(bo);
1337     va_list ap;
1338 
1339     if (!fmt)
1340         return;
1341 
1342     va_start(ap, fmt);
1343     vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1344     va_end(ap);
1345 }