Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
0002 /**************************************************************************
0003  *
0004  * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
0005  * All Rights Reserved.
0006  *
0007  * Permission is hereby granted, free of charge, to any person obtaining a
0008  * copy of this software and associated documentation files (the
0009  * "Software"), to deal in the Software without restriction, including
0010  * without limitation the rights to use, copy, modify, merge, publish,
0011  * distribute, sub license, and/or sell copies of the Software, and to
0012  * permit persons to whom the Software is furnished to do so, subject to
0013  * the following conditions:
0014  *
0015  * The above copyright notice and this permission notice (including the
0016  * next paragraph) shall be included in all copies or substantial portions
0017  * of the Software.
0018  *
0019  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0020  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0021  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0022  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0023  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0024  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0025  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0026  *
0027  **************************************************************************/
0028 /*
0029  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
0030  *
0031  * While no substantial code is shared, the prime code is inspired by
0032  * drm_prime.c, with
0033  * Authors:
0034  *      Dave Airlie <airlied@redhat.com>
0035  *      Rob Clark <rob.clark@linaro.org>
0036  */
0037 /** @file ttm_ref_object.c
0038  *
0039  * Base- and reference object implementation for the various
0040  * ttm objects. Implements reference counting, minimal security checks
0041  * and release on file close.
0042  */
0043 
0044 
0045 #define pr_fmt(fmt) "[TTM] " fmt
0046 
0047 #include <linux/list.h>
0048 #include <linux/spinlock.h>
0049 #include <linux/slab.h>
0050 #include <linux/atomic.h>
0051 #include <linux/module.h>
0052 #include "ttm_object.h"
0053 #include "vmwgfx_drv.h"
0054 
0055 MODULE_IMPORT_NS(DMA_BUF);
0056 
0057 /**
0058  * struct ttm_object_file
0059  *
0060  * @tdev: Pointer to the ttm_object_device.
0061  *
0062  * @lock: Lock that protects the ref_list list and the
0063  * ref_hash hash tables.
0064  *
0065  * @ref_list: List of ttm_ref_objects to be destroyed at
0066  * file release.
0067  *
0068  * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
0069  * for fast lookup of ref objects given a base object.
0070  *
0071  * @refcount: reference/usage count
0072  */
0073 struct ttm_object_file {
0074     struct ttm_object_device *tdev;
0075     spinlock_t lock;
0076     struct list_head ref_list;
0077     struct vmwgfx_open_hash ref_hash;
0078     struct kref refcount;
0079 };
0080 
0081 /*
0082  * struct ttm_object_device
0083  *
0084  * @object_lock: lock that protects the object_hash hash table.
0085  *
0086  * @object_hash: hash table for fast lookup of object global names.
0087  *
0088  * @object_count: Per device object count.
0089  *
0090  * This is the per-device data structure needed for ttm object management.
0091  */
0092 
0093 struct ttm_object_device {
0094     spinlock_t object_lock;
0095     struct vmwgfx_open_hash object_hash;
0096     atomic_t object_count;
0097     struct dma_buf_ops ops;
0098     void (*dmabuf_release)(struct dma_buf *dma_buf);
0099     struct idr idr;
0100 };
0101 
0102 /*
0103  * struct ttm_ref_object
0104  *
0105  * @hash: Hash entry for the per-file object reference hash.
0106  *
0107  * @head: List entry for the per-file list of ref-objects.
0108  *
0109  * @kref: Ref count.
0110  *
0111  * @obj: Base object this ref object is referencing.
0112  *
0113  * @ref_type: Type of ref object.
0114  *
0115  * This is similar to an idr object, but it also has a hash table entry
0116  * that allows lookup with a pointer to the referenced object as a key. In
0117  * that way, one can easily detect whether a base object is referenced by
0118  * a particular ttm_object_file. It also carries a ref count to avoid creating
0119  * multiple ref objects if a ttm_object_file references the same base
0120  * object more than once.
0121  */
0122 
0123 struct ttm_ref_object {
0124     struct rcu_head rcu_head;
0125     struct vmwgfx_hash_item hash;
0126     struct list_head head;
0127     struct kref kref;
0128     struct ttm_base_object *obj;
0129     struct ttm_object_file *tfile;
0130 };
0131 
0132 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
0133 
0134 static inline struct ttm_object_file *
0135 ttm_object_file_ref(struct ttm_object_file *tfile)
0136 {
0137     kref_get(&tfile->refcount);
0138     return tfile;
0139 }
0140 
0141 static void ttm_object_file_destroy(struct kref *kref)
0142 {
0143     struct ttm_object_file *tfile =
0144         container_of(kref, struct ttm_object_file, refcount);
0145 
0146     kfree(tfile);
0147 }
0148 
0149 
0150 static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
0151 {
0152     struct ttm_object_file *tfile = *p_tfile;
0153 
0154     *p_tfile = NULL;
0155     kref_put(&tfile->refcount, ttm_object_file_destroy);
0156 }
0157 
0158 
0159 int ttm_base_object_init(struct ttm_object_file *tfile,
0160              struct ttm_base_object *base,
0161              bool shareable,
0162              enum ttm_object_type object_type,
0163              void (*refcount_release) (struct ttm_base_object **))
0164 {
0165     struct ttm_object_device *tdev = tfile->tdev;
0166     int ret;
0167 
0168     base->shareable = shareable;
0169     base->tfile = ttm_object_file_ref(tfile);
0170     base->refcount_release = refcount_release;
0171     base->object_type = object_type;
0172     kref_init(&base->refcount);
0173     idr_preload(GFP_KERNEL);
0174     spin_lock(&tdev->object_lock);
0175     ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
0176     spin_unlock(&tdev->object_lock);
0177     idr_preload_end();
0178     if (ret < 0)
0179         return ret;
0180 
0181     base->handle = ret;
0182     ret = ttm_ref_object_add(tfile, base, NULL, false);
0183     if (unlikely(ret != 0))
0184         goto out_err1;
0185 
0186     ttm_base_object_unref(&base);
0187 
0188     return 0;
0189 out_err1:
0190     spin_lock(&tdev->object_lock);
0191     idr_remove(&tdev->idr, base->handle);
0192     spin_unlock(&tdev->object_lock);
0193     return ret;
0194 }
0195 
0196 static void ttm_release_base(struct kref *kref)
0197 {
0198     struct ttm_base_object *base =
0199         container_of(kref, struct ttm_base_object, refcount);
0200     struct ttm_object_device *tdev = base->tfile->tdev;
0201 
0202     spin_lock(&tdev->object_lock);
0203     idr_remove(&tdev->idr, base->handle);
0204     spin_unlock(&tdev->object_lock);
0205 
0206     /*
0207      * Note: We don't use synchronize_rcu() here because it's far
0208      * too slow. It's up to the user to free the object using
0209      * call_rcu() or ttm_base_object_kfree().
0210      */
0211 
0212     ttm_object_file_unref(&base->tfile);
0213     if (base->refcount_release)
0214         base->refcount_release(&base);
0215 }
0216 
0217 void ttm_base_object_unref(struct ttm_base_object **p_base)
0218 {
0219     struct ttm_base_object *base = *p_base;
0220 
0221     *p_base = NULL;
0222 
0223     kref_put(&base->refcount, ttm_release_base);
0224 }
0225 
0226 /**
0227  * ttm_base_object_noref_lookup - look up a base object without reference
0228  * @tfile: The struct ttm_object_file the object is registered with.
0229  * @key: The object handle.
0230  *
0231  * This function looks up a ttm base object and returns a pointer to it
0232  * without refcounting the pointer. The returned pointer is only valid
0233  * until ttm_base_object_noref_release() is called, and the object
0234  * pointed to by the returned pointer may be doomed. Any persistent usage
0235  * of the object requires a refcount to be taken using kref_get_unless_zero().
0236  * Iff this function returns successfully it needs to be paired with
0237  * ttm_base_object_noref_release() and no sleeping- or scheduling functions
0238  * may be called inbetween these function callse.
0239  *
0240  * Return: A pointer to the object if successful or NULL otherwise.
0241  */
0242 struct ttm_base_object *
0243 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
0244 {
0245     struct vmwgfx_hash_item *hash;
0246     struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0247     int ret;
0248 
0249     rcu_read_lock();
0250     ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
0251     if (ret) {
0252         rcu_read_unlock();
0253         return NULL;
0254     }
0255 
0256     __release(RCU);
0257     return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
0258 }
0259 EXPORT_SYMBOL(ttm_base_object_noref_lookup);
0260 
0261 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
0262                            uint32_t key)
0263 {
0264     struct ttm_base_object *base = NULL;
0265     struct vmwgfx_hash_item *hash;
0266     struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0267     int ret;
0268 
0269     rcu_read_lock();
0270     ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
0271 
0272     if (likely(ret == 0)) {
0273         base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
0274         if (!kref_get_unless_zero(&base->refcount))
0275             base = NULL;
0276     }
0277     rcu_read_unlock();
0278 
0279     return base;
0280 }
0281 
0282 struct ttm_base_object *
0283 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
0284 {
0285     struct ttm_base_object *base;
0286 
0287     rcu_read_lock();
0288     base = idr_find(&tdev->idr, key);
0289 
0290     if (base && !kref_get_unless_zero(&base->refcount))
0291         base = NULL;
0292     rcu_read_unlock();
0293 
0294     return base;
0295 }
0296 
0297 int ttm_ref_object_add(struct ttm_object_file *tfile,
0298                struct ttm_base_object *base,
0299                bool *existed,
0300                bool require_existed)
0301 {
0302     struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0303     struct ttm_ref_object *ref;
0304     struct vmwgfx_hash_item *hash;
0305     int ret = -EINVAL;
0306 
0307     if (base->tfile != tfile && !base->shareable)
0308         return -EPERM;
0309 
0310     if (existed != NULL)
0311         *existed = true;
0312 
0313     while (ret == -EINVAL) {
0314         rcu_read_lock();
0315         ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
0316 
0317         if (ret == 0) {
0318             ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
0319             if (kref_get_unless_zero(&ref->kref)) {
0320                 rcu_read_unlock();
0321                 break;
0322             }
0323         }
0324 
0325         rcu_read_unlock();
0326         if (require_existed)
0327             return -EPERM;
0328 
0329         ref = kmalloc(sizeof(*ref), GFP_KERNEL);
0330         if (unlikely(ref == NULL)) {
0331             return -ENOMEM;
0332         }
0333 
0334         ref->hash.key = base->handle;
0335         ref->obj = base;
0336         ref->tfile = tfile;
0337         kref_init(&ref->kref);
0338 
0339         spin_lock(&tfile->lock);
0340         ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
0341 
0342         if (likely(ret == 0)) {
0343             list_add_tail(&ref->head, &tfile->ref_list);
0344             kref_get(&base->refcount);
0345             spin_unlock(&tfile->lock);
0346             if (existed != NULL)
0347                 *existed = false;
0348             break;
0349         }
0350 
0351         spin_unlock(&tfile->lock);
0352         BUG_ON(ret != -EINVAL);
0353 
0354         kfree(ref);
0355     }
0356 
0357     return ret;
0358 }
0359 
0360 static void __releases(tfile->lock) __acquires(tfile->lock)
0361 ttm_ref_object_release(struct kref *kref)
0362 {
0363     struct ttm_ref_object *ref =
0364         container_of(kref, struct ttm_ref_object, kref);
0365     struct ttm_object_file *tfile = ref->tfile;
0366     struct vmwgfx_open_hash *ht;
0367 
0368     ht = &tfile->ref_hash;
0369     (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
0370     list_del(&ref->head);
0371     spin_unlock(&tfile->lock);
0372 
0373     ttm_base_object_unref(&ref->obj);
0374     kfree_rcu(ref, rcu_head);
0375     spin_lock(&tfile->lock);
0376 }
0377 
0378 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
0379                   unsigned long key)
0380 {
0381     struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0382     struct ttm_ref_object *ref;
0383     struct vmwgfx_hash_item *hash;
0384     int ret;
0385 
0386     spin_lock(&tfile->lock);
0387     ret = vmwgfx_ht_find_item(ht, key, &hash);
0388     if (unlikely(ret != 0)) {
0389         spin_unlock(&tfile->lock);
0390         return -EINVAL;
0391     }
0392     ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
0393     kref_put(&ref->kref, ttm_ref_object_release);
0394     spin_unlock(&tfile->lock);
0395     return 0;
0396 }
0397 
0398 void ttm_object_file_release(struct ttm_object_file **p_tfile)
0399 {
0400     struct ttm_ref_object *ref;
0401     struct list_head *list;
0402     struct ttm_object_file *tfile = *p_tfile;
0403 
0404     *p_tfile = NULL;
0405     spin_lock(&tfile->lock);
0406 
0407     /*
0408      * Since we release the lock within the loop, we have to
0409      * restart it from the beginning each time.
0410      */
0411 
0412     while (!list_empty(&tfile->ref_list)) {
0413         list = tfile->ref_list.next;
0414         ref = list_entry(list, struct ttm_ref_object, head);
0415         ttm_ref_object_release(&ref->kref);
0416     }
0417 
0418     spin_unlock(&tfile->lock);
0419     vmwgfx_ht_remove(&tfile->ref_hash);
0420 
0421     ttm_object_file_unref(&tfile);
0422 }
0423 
0424 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
0425                          unsigned int hash_order)
0426 {
0427     struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
0428     int ret;
0429 
0430     if (unlikely(tfile == NULL))
0431         return NULL;
0432 
0433     spin_lock_init(&tfile->lock);
0434     tfile->tdev = tdev;
0435     kref_init(&tfile->refcount);
0436     INIT_LIST_HEAD(&tfile->ref_list);
0437 
0438     ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
0439     if (ret)
0440         goto out_err;
0441 
0442     return tfile;
0443 out_err:
0444     vmwgfx_ht_remove(&tfile->ref_hash);
0445 
0446     kfree(tfile);
0447 
0448     return NULL;
0449 }
0450 
0451 struct ttm_object_device *
0452 ttm_object_device_init(unsigned int hash_order,
0453                const struct dma_buf_ops *ops)
0454 {
0455     struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
0456     int ret;
0457 
0458     if (unlikely(tdev == NULL))
0459         return NULL;
0460 
0461     spin_lock_init(&tdev->object_lock);
0462     atomic_set(&tdev->object_count, 0);
0463     ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
0464     if (ret != 0)
0465         goto out_no_object_hash;
0466 
0467     /*
0468      * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
0469      * a seperate namespace for GEM handles (which are
0470      * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
0471      * can take either handle as an argument so we want to
0472      * easily be able to tell whether the handle refers to a
0473      * GEM buffer or a surface.
0474      */
0475     idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
0476     tdev->ops = *ops;
0477     tdev->dmabuf_release = tdev->ops.release;
0478     tdev->ops.release = ttm_prime_dmabuf_release;
0479     return tdev;
0480 
0481 out_no_object_hash:
0482     kfree(tdev);
0483     return NULL;
0484 }
0485 
0486 void ttm_object_device_release(struct ttm_object_device **p_tdev)
0487 {
0488     struct ttm_object_device *tdev = *p_tdev;
0489 
0490     *p_tdev = NULL;
0491 
0492     WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
0493     idr_destroy(&tdev->idr);
0494     vmwgfx_ht_remove(&tdev->object_hash);
0495 
0496     kfree(tdev);
0497 }
0498 
0499 /**
0500  * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
0501  *
0502  * @dmabuf: Non-refcounted pointer to a struct dma-buf.
0503  *
0504  * Obtain a file reference from a lookup structure that doesn't refcount
0505  * the file, but synchronizes with its release method to make sure it has
0506  * not been freed yet. See for example kref_get_unless_zero documentation.
0507  * Returns true if refcounting succeeds, false otherwise.
0508  *
0509  * Nobody really wants this as a public API yet, so let it mature here
0510  * for some time...
0511  */
0512 static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
0513 {
0514     return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
0515 }
0516 
0517 /**
0518  * ttm_prime_refcount_release - refcount release method for a prime object.
0519  *
0520  * @p_base: Pointer to ttm_base_object pointer.
0521  *
0522  * This is a wrapper that calls the refcount_release founction of the
0523  * underlying object. At the same time it cleans up the prime object.
0524  * This function is called when all references to the base object we
0525  * derive from are gone.
0526  */
0527 static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
0528 {
0529     struct ttm_base_object *base = *p_base;
0530     struct ttm_prime_object *prime;
0531 
0532     *p_base = NULL;
0533     prime = container_of(base, struct ttm_prime_object, base);
0534     BUG_ON(prime->dma_buf != NULL);
0535     mutex_destroy(&prime->mutex);
0536     if (prime->refcount_release)
0537         prime->refcount_release(&base);
0538 }
0539 
0540 /**
0541  * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
0542  *
0543  * @dma_buf:
0544  *
0545  * This function first calls the dma_buf release method the driver
0546  * provides. Then it cleans up our dma_buf pointer used for lookup,
0547  * and finally releases the reference the dma_buf has on our base
0548  * object.
0549  */
0550 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
0551 {
0552     struct ttm_prime_object *prime =
0553         (struct ttm_prime_object *) dma_buf->priv;
0554     struct ttm_base_object *base = &prime->base;
0555     struct ttm_object_device *tdev = base->tfile->tdev;
0556 
0557     if (tdev->dmabuf_release)
0558         tdev->dmabuf_release(dma_buf);
0559     mutex_lock(&prime->mutex);
0560     if (prime->dma_buf == dma_buf)
0561         prime->dma_buf = NULL;
0562     mutex_unlock(&prime->mutex);
0563     ttm_base_object_unref(&base);
0564 }
0565 
0566 /**
0567  * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
0568  *
0569  * @tfile: A struct ttm_object_file identifying the caller.
0570  * @fd: The prime / dmabuf fd.
0571  * @handle: The returned handle.
0572  *
0573  * This function returns a handle to an object that previously exported
0574  * a dma-buf. Note that we don't handle imports yet, because we simply
0575  * have no consumers of that implementation.
0576  */
0577 int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
0578                int fd, u32 *handle)
0579 {
0580     struct ttm_object_device *tdev = tfile->tdev;
0581     struct dma_buf *dma_buf;
0582     struct ttm_prime_object *prime;
0583     struct ttm_base_object *base;
0584     int ret;
0585 
0586     dma_buf = dma_buf_get(fd);
0587     if (IS_ERR(dma_buf))
0588         return PTR_ERR(dma_buf);
0589 
0590     if (dma_buf->ops != &tdev->ops)
0591         return -ENOSYS;
0592 
0593     prime = (struct ttm_prime_object *) dma_buf->priv;
0594     base = &prime->base;
0595     *handle = base->handle;
0596     ret = ttm_ref_object_add(tfile, base, NULL, false);
0597 
0598     dma_buf_put(dma_buf);
0599 
0600     return ret;
0601 }
0602 
0603 /**
0604  * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
0605  *
0606  * @tfile: Struct ttm_object_file identifying the caller.
0607  * @handle: Handle to the object we're exporting from.
0608  * @flags: flags for dma-buf creation. We just pass them on.
0609  * @prime_fd: The returned file descriptor.
0610  *
0611  */
0612 int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
0613                uint32_t handle, uint32_t flags,
0614                int *prime_fd)
0615 {
0616     struct ttm_object_device *tdev = tfile->tdev;
0617     struct ttm_base_object *base;
0618     struct dma_buf *dma_buf;
0619     struct ttm_prime_object *prime;
0620     int ret;
0621 
0622     base = ttm_base_object_lookup(tfile, handle);
0623     if (unlikely(base == NULL ||
0624              base->object_type != ttm_prime_type)) {
0625         ret = -ENOENT;
0626         goto out_unref;
0627     }
0628 
0629     prime = container_of(base, struct ttm_prime_object, base);
0630     if (unlikely(!base->shareable)) {
0631         ret = -EPERM;
0632         goto out_unref;
0633     }
0634 
0635     ret = mutex_lock_interruptible(&prime->mutex);
0636     if (unlikely(ret != 0)) {
0637         ret = -ERESTARTSYS;
0638         goto out_unref;
0639     }
0640 
0641     dma_buf = prime->dma_buf;
0642     if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
0643         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0644         exp_info.ops = &tdev->ops;
0645         exp_info.size = prime->size;
0646         exp_info.flags = flags;
0647         exp_info.priv = prime;
0648 
0649         /*
0650          * Need to create a new dma_buf
0651          */
0652 
0653         dma_buf = dma_buf_export(&exp_info);
0654         if (IS_ERR(dma_buf)) {
0655             ret = PTR_ERR(dma_buf);
0656             mutex_unlock(&prime->mutex);
0657             goto out_unref;
0658         }
0659 
0660         /*
0661          * dma_buf has taken the base object reference
0662          */
0663         base = NULL;
0664         prime->dma_buf = dma_buf;
0665     }
0666     mutex_unlock(&prime->mutex);
0667 
0668     ret = dma_buf_fd(dma_buf, flags);
0669     if (ret >= 0) {
0670         *prime_fd = ret;
0671         ret = 0;
0672     } else
0673         dma_buf_put(dma_buf);
0674 
0675 out_unref:
0676     if (base)
0677         ttm_base_object_unref(&base);
0678     return ret;
0679 }
0680 
0681 /**
0682  * ttm_prime_object_init - Initialize a ttm_prime_object
0683  *
0684  * @tfile: struct ttm_object_file identifying the caller
0685  * @size: The size of the dma_bufs we export.
0686  * @prime: The object to be initialized.
0687  * @shareable: See ttm_base_object_init
0688  * @type: See ttm_base_object_init
0689  * @refcount_release: See ttm_base_object_init
0690  *
0691  * Initializes an object which is compatible with the drm_prime model
0692  * for data sharing between processes and devices.
0693  */
0694 int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
0695               struct ttm_prime_object *prime, bool shareable,
0696               enum ttm_object_type type,
0697               void (*refcount_release) (struct ttm_base_object **))
0698 {
0699     mutex_init(&prime->mutex);
0700     prime->size = PAGE_ALIGN(size);
0701     prime->real_type = type;
0702     prime->dma_buf = NULL;
0703     prime->refcount_release = refcount_release;
0704     return ttm_base_object_init(tfile, &prime->base, shareable,
0705                     ttm_prime_type,
0706                     ttm_prime_refcount_release);
0707 }