Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2016 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  */
0006 
0007 #include "msm_drv.h"
0008 #include "msm_fence.h"
0009 #include "msm_gem.h"
0010 #include "msm_mmu.h"
0011 
0012 static void
0013 msm_gem_address_space_destroy(struct kref *kref)
0014 {
0015     struct msm_gem_address_space *aspace = container_of(kref,
0016             struct msm_gem_address_space, kref);
0017 
0018     drm_mm_takedown(&aspace->mm);
0019     if (aspace->mmu)
0020         aspace->mmu->funcs->destroy(aspace->mmu);
0021     put_pid(aspace->pid);
0022     kfree(aspace);
0023 }
0024 
0025 
0026 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
0027 {
0028     if (aspace)
0029         kref_put(&aspace->kref, msm_gem_address_space_destroy);
0030 }
0031 
0032 struct msm_gem_address_space *
0033 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
0034 {
0035     if (!IS_ERR_OR_NULL(aspace))
0036         kref_get(&aspace->kref);
0037 
0038     return aspace;
0039 }
0040 
0041 bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
0042 {
0043     if (vma->inuse > 0)
0044         return true;
0045 
0046     while (vma->fence_mask) {
0047         unsigned idx = ffs(vma->fence_mask) - 1;
0048 
0049         if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
0050             return true;
0051 
0052         vma->fence_mask &= ~BIT(idx);
0053     }
0054 
0055     return false;
0056 }
0057 
0058 /* Actually unmap memory for the vma */
0059 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
0060         struct msm_gem_vma *vma)
0061 {
0062     unsigned size = vma->node.size;
0063 
0064     /* Print a message if we try to purge a vma in use */
0065     GEM_WARN_ON(msm_gem_vma_inuse(vma));
0066 
0067     /* Don't do anything if the memory isn't mapped */
0068     if (!vma->mapped)
0069         return;
0070 
0071     if (aspace->mmu)
0072         aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
0073 
0074     vma->mapped = false;
0075 }
0076 
0077 /* Remove reference counts for the mapping */
0078 void msm_gem_unpin_vma(struct msm_gem_vma *vma)
0079 {
0080     if (GEM_WARN_ON(!vma->inuse))
0081         return;
0082     if (!GEM_WARN_ON(!vma->iova))
0083         vma->inuse--;
0084 }
0085 
0086 /* Replace pin reference with fence: */
0087 void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
0088 {
0089     vma->fctx[fctx->index] = fctx;
0090     vma->fence[fctx->index] = fctx->last_fence;
0091     vma->fence_mask |= BIT(fctx->index);
0092     msm_gem_unpin_vma(vma);
0093 }
0094 
0095 /* Map and pin vma: */
0096 int
0097 msm_gem_map_vma(struct msm_gem_address_space *aspace,
0098         struct msm_gem_vma *vma, int prot,
0099         struct sg_table *sgt, int size)
0100 {
0101     int ret = 0;
0102 
0103     if (GEM_WARN_ON(!vma->iova))
0104         return -EINVAL;
0105 
0106     /* Increase the usage counter */
0107     vma->inuse++;
0108 
0109     if (vma->mapped)
0110         return 0;
0111 
0112     vma->mapped = true;
0113 
0114     if (aspace && aspace->mmu)
0115         ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
0116                 size, prot);
0117 
0118     if (ret) {
0119         vma->mapped = false;
0120         vma->inuse--;
0121     }
0122 
0123     return ret;
0124 }
0125 
0126 /* Close an iova.  Warn if it is still in use */
0127 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
0128         struct msm_gem_vma *vma)
0129 {
0130     GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
0131 
0132     spin_lock(&aspace->lock);
0133     if (vma->iova)
0134         drm_mm_remove_node(&vma->node);
0135     spin_unlock(&aspace->lock);
0136 
0137     vma->iova = 0;
0138 
0139     msm_gem_address_space_put(aspace);
0140 }
0141 
0142 /* Initialize a new vma and allocate an iova for it */
0143 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
0144         struct msm_gem_vma *vma, int size,
0145         u64 range_start, u64 range_end)
0146 {
0147     int ret;
0148 
0149     if (GEM_WARN_ON(vma->iova))
0150         return -EBUSY;
0151 
0152     spin_lock(&aspace->lock);
0153     ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
0154                       size, PAGE_SIZE, 0,
0155                       range_start, range_end, 0);
0156     spin_unlock(&aspace->lock);
0157 
0158     if (ret)
0159         return ret;
0160 
0161     vma->iova = vma->node.start;
0162     vma->mapped = false;
0163 
0164     kref_get(&aspace->kref);
0165 
0166     return 0;
0167 }
0168 
0169 struct msm_gem_address_space *
0170 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
0171         u64 va_start, u64 size)
0172 {
0173     struct msm_gem_address_space *aspace;
0174 
0175     if (IS_ERR(mmu))
0176         return ERR_CAST(mmu);
0177 
0178     aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
0179     if (!aspace)
0180         return ERR_PTR(-ENOMEM);
0181 
0182     spin_lock_init(&aspace->lock);
0183     aspace->name = name;
0184     aspace->mmu = mmu;
0185     aspace->va_start = va_start;
0186     aspace->va_size  = size;
0187 
0188     drm_mm_init(&aspace->mm, va_start, size);
0189 
0190     kref_init(&aspace->kref);
0191 
0192     return aspace;
0193 }