0001
0002
0003
0004 #include <linux/err.h>
0005 #include <linux/slab.h>
0006 #include <linux/dma-buf.h>
0007 #include <linux/dma-mapping.h>
0008
0009 #include <drm/panfrost_drm.h>
0010 #include "panfrost_device.h"
0011 #include "panfrost_gem.h"
0012 #include "panfrost_mmu.h"
0013
0014
0015
0016
0017 static void panfrost_gem_free_object(struct drm_gem_object *obj)
0018 {
0019 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
0020 struct panfrost_device *pfdev = obj->dev->dev_private;
0021
0022
0023
0024
0025
0026
0027
0028 mutex_lock(&pfdev->shrinker_lock);
0029 list_del_init(&bo->base.madv_list);
0030 mutex_unlock(&pfdev->shrinker_lock);
0031
0032
0033
0034
0035
0036 WARN_ON_ONCE(!list_empty(&bo->mappings.list));
0037
0038 if (bo->sgts) {
0039 int i;
0040 int n_sgt = bo->base.base.size / SZ_2M;
0041
0042 for (i = 0; i < n_sgt; i++) {
0043 if (bo->sgts[i].sgl) {
0044 dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
0045 DMA_BIDIRECTIONAL, 0);
0046 sg_free_table(&bo->sgts[i]);
0047 }
0048 }
0049 kvfree(bo->sgts);
0050 }
0051
0052 drm_gem_shmem_free(&bo->base);
0053 }
0054
0055 struct panfrost_gem_mapping *
0056 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
0057 struct panfrost_file_priv *priv)
0058 {
0059 struct panfrost_gem_mapping *iter, *mapping = NULL;
0060
0061 mutex_lock(&bo->mappings.lock);
0062 list_for_each_entry(iter, &bo->mappings.list, node) {
0063 if (iter->mmu == priv->mmu) {
0064 kref_get(&iter->refcount);
0065 mapping = iter;
0066 break;
0067 }
0068 }
0069 mutex_unlock(&bo->mappings.lock);
0070
0071 return mapping;
0072 }
0073
0074 static void
0075 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
0076 {
0077 if (mapping->active)
0078 panfrost_mmu_unmap(mapping);
0079
0080 spin_lock(&mapping->mmu->mm_lock);
0081 if (drm_mm_node_allocated(&mapping->mmnode))
0082 drm_mm_remove_node(&mapping->mmnode);
0083 spin_unlock(&mapping->mmu->mm_lock);
0084 }
0085
0086 static void panfrost_gem_mapping_release(struct kref *kref)
0087 {
0088 struct panfrost_gem_mapping *mapping;
0089
0090 mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
0091
0092 panfrost_gem_teardown_mapping(mapping);
0093 drm_gem_object_put(&mapping->obj->base.base);
0094 panfrost_mmu_ctx_put(mapping->mmu);
0095 kfree(mapping);
0096 }
0097
0098 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
0099 {
0100 if (!mapping)
0101 return;
0102
0103 kref_put(&mapping->refcount, panfrost_gem_mapping_release);
0104 }
0105
0106 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
0107 {
0108 struct panfrost_gem_mapping *mapping;
0109
0110 list_for_each_entry(mapping, &bo->mappings.list, node)
0111 panfrost_gem_teardown_mapping(mapping);
0112 }
0113
0114 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
0115 {
0116 int ret;
0117 size_t size = obj->size;
0118 u64 align;
0119 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
0120 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
0121 struct panfrost_file_priv *priv = file_priv->driver_priv;
0122 struct panfrost_gem_mapping *mapping;
0123
0124 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
0125 if (!mapping)
0126 return -ENOMEM;
0127
0128 INIT_LIST_HEAD(&mapping->node);
0129 kref_init(&mapping->refcount);
0130 drm_gem_object_get(obj);
0131 mapping->obj = bo;
0132
0133
0134
0135
0136
0137
0138
0139 if (!bo->noexec)
0140 align = size >> PAGE_SHIFT;
0141 else
0142 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
0143
0144 mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
0145 spin_lock(&mapping->mmu->mm_lock);
0146 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
0147 size >> PAGE_SHIFT, align, color, 0);
0148 spin_unlock(&mapping->mmu->mm_lock);
0149 if (ret)
0150 goto err;
0151
0152 if (!bo->is_heap) {
0153 ret = panfrost_mmu_map(mapping);
0154 if (ret)
0155 goto err;
0156 }
0157
0158 mutex_lock(&bo->mappings.lock);
0159 WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
0160 list_add_tail(&mapping->node, &bo->mappings.list);
0161 mutex_unlock(&bo->mappings.lock);
0162
0163 err:
0164 if (ret)
0165 panfrost_gem_mapping_put(mapping);
0166 return ret;
0167 }
0168
0169 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
0170 {
0171 struct panfrost_file_priv *priv = file_priv->driver_priv;
0172 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
0173 struct panfrost_gem_mapping *mapping = NULL, *iter;
0174
0175 mutex_lock(&bo->mappings.lock);
0176 list_for_each_entry(iter, &bo->mappings.list, node) {
0177 if (iter->mmu == priv->mmu) {
0178 mapping = iter;
0179 list_del(&iter->node);
0180 break;
0181 }
0182 }
0183 mutex_unlock(&bo->mappings.lock);
0184
0185 panfrost_gem_mapping_put(mapping);
0186 }
0187
0188 static int panfrost_gem_pin(struct drm_gem_object *obj)
0189 {
0190 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
0191
0192 if (bo->is_heap)
0193 return -EINVAL;
0194
0195 return drm_gem_shmem_pin(&bo->base);
0196 }
0197
0198 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
0199 .free = panfrost_gem_free_object,
0200 .open = panfrost_gem_open,
0201 .close = panfrost_gem_close,
0202 .print_info = drm_gem_shmem_object_print_info,
0203 .pin = panfrost_gem_pin,
0204 .unpin = drm_gem_shmem_object_unpin,
0205 .get_sg_table = drm_gem_shmem_object_get_sg_table,
0206 .vmap = drm_gem_shmem_object_vmap,
0207 .vunmap = drm_gem_shmem_object_vunmap,
0208 .mmap = drm_gem_shmem_object_mmap,
0209 .vm_ops = &drm_gem_shmem_vm_ops,
0210 };
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
0221 {
0222 struct panfrost_device *pfdev = dev->dev_private;
0223 struct panfrost_gem_object *obj;
0224
0225 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
0226 if (!obj)
0227 return ERR_PTR(-ENOMEM);
0228
0229 INIT_LIST_HEAD(&obj->mappings.list);
0230 mutex_init(&obj->mappings.lock);
0231 obj->base.base.funcs = &panfrost_gem_funcs;
0232 obj->base.map_wc = !pfdev->coherent;
0233
0234 return &obj->base.base;
0235 }
0236
0237 struct panfrost_gem_object *
0238 panfrost_gem_create_with_handle(struct drm_file *file_priv,
0239 struct drm_device *dev, size_t size,
0240 u32 flags,
0241 uint32_t *handle)
0242 {
0243 int ret;
0244 struct drm_gem_shmem_object *shmem;
0245 struct panfrost_gem_object *bo;
0246
0247
0248 if (flags & PANFROST_BO_HEAP)
0249 size = roundup(size, SZ_2M);
0250
0251 shmem = drm_gem_shmem_create(dev, size);
0252 if (IS_ERR(shmem))
0253 return ERR_CAST(shmem);
0254
0255 bo = to_panfrost_bo(&shmem->base);
0256 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
0257 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
0258
0259
0260
0261
0262
0263 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
0264
0265 drm_gem_object_put(&shmem->base);
0266 if (ret)
0267 return ERR_PTR(ret);
0268
0269 return bo;
0270 }
0271
0272 struct drm_gem_object *
0273 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
0274 struct dma_buf_attachment *attach,
0275 struct sg_table *sgt)
0276 {
0277 struct drm_gem_object *obj;
0278 struct panfrost_gem_object *bo;
0279
0280 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
0281 if (IS_ERR(obj))
0282 return ERR_CAST(obj);
0283
0284 bo = to_panfrost_bo(obj);
0285 bo->noexec = true;
0286
0287 return obj;
0288 }