Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2017 Intel Corporation. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
0021  * DEALINGS IN THE SOFTWARE.
0022  *
0023  * Authors:
0024  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
0025  *
0026  * Contributors:
0027  *    Xiaoguang Chen
0028  *    Tina Zhang <tina.zhang@intel.com>
0029  */
0030 
0031 #include <linux/dma-buf.h>
0032 #include <linux/mdev.h>
0033 
0034 #include <drm/drm_fourcc.h>
0035 #include <drm/drm_plane.h>
0036 
0037 #include "gem/i915_gem_dmabuf.h"
0038 
0039 #include "i915_drv.h"
0040 #include "i915_reg.h"
0041 #include "gvt.h"
0042 
0043 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
0044 
0045 static int vgpu_gem_get_pages(
0046         struct drm_i915_gem_object *obj)
0047 {
0048     struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
0049     struct intel_vgpu *vgpu;
0050     struct sg_table *st;
0051     struct scatterlist *sg;
0052     int i, j, ret;
0053     gen8_pte_t __iomem *gtt_entries;
0054     struct intel_vgpu_fb_info *fb_info;
0055     u32 page_num;
0056 
0057     fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
0058     if (drm_WARN_ON(&dev_priv->drm, !fb_info))
0059         return -ENODEV;
0060 
0061     vgpu = fb_info->obj->vgpu;
0062     if (drm_WARN_ON(&dev_priv->drm, !vgpu))
0063         return -ENODEV;
0064 
0065     st = kmalloc(sizeof(*st), GFP_KERNEL);
0066     if (unlikely(!st))
0067         return -ENOMEM;
0068 
0069     page_num = obj->base.size >> PAGE_SHIFT;
0070     ret = sg_alloc_table(st, page_num, GFP_KERNEL);
0071     if (ret) {
0072         kfree(st);
0073         return ret;
0074     }
0075     gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
0076         (fb_info->start >> PAGE_SHIFT);
0077     for_each_sg(st->sgl, sg, page_num, i) {
0078         dma_addr_t dma_addr =
0079             GEN8_DECODE_PTE(readq(&gtt_entries[i]));
0080         if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
0081             ret = -EINVAL;
0082             goto out;
0083         }
0084 
0085         sg->offset = 0;
0086         sg->length = PAGE_SIZE;
0087         sg_dma_len(sg) = PAGE_SIZE;
0088         sg_dma_address(sg) = dma_addr;
0089     }
0090 
0091     __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
0092 out:
0093     if (ret) {
0094         dma_addr_t dma_addr;
0095 
0096         for_each_sg(st->sgl, sg, i, j) {
0097             dma_addr = sg_dma_address(sg);
0098             if (dma_addr)
0099                 intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
0100         }
0101         sg_free_table(st);
0102         kfree(st);
0103     }
0104 
0105     return ret;
0106 
0107 }
0108 
0109 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
0110         struct sg_table *pages)
0111 {
0112     struct scatterlist *sg;
0113 
0114     if (obj->base.dma_buf) {
0115         struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
0116         struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
0117         struct intel_vgpu *vgpu = obj->vgpu;
0118         int i;
0119 
0120         for_each_sg(pages->sgl, sg, fb_info->size, i)
0121             intel_gvt_dma_unmap_guest_page(vgpu,
0122                            sg_dma_address(sg));
0123     }
0124 
0125     sg_free_table(pages);
0126     kfree(pages);
0127 }
0128 
0129 static void dmabuf_gem_object_free(struct kref *kref)
0130 {
0131     struct intel_vgpu_dmabuf_obj *obj =
0132         container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
0133     struct intel_vgpu *vgpu = obj->vgpu;
0134     struct list_head *pos;
0135     struct intel_vgpu_dmabuf_obj *dmabuf_obj;
0136 
0137     if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
0138         list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
0139             dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
0140             if (dmabuf_obj == obj) {
0141                 list_del(pos);
0142                 idr_remove(&vgpu->object_idr,
0143                        dmabuf_obj->dmabuf_id);
0144                 kfree(dmabuf_obj->info);
0145                 kfree(dmabuf_obj);
0146                 break;
0147             }
0148         }
0149     } else {
0150         /* Free the orphan dmabuf_objs here */
0151         kfree(obj->info);
0152         kfree(obj);
0153     }
0154 }
0155 
0156 
0157 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
0158 {
0159     kref_get(&obj->kref);
0160 }
0161 
0162 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
0163 {
0164     kref_put(&obj->kref, dmabuf_gem_object_free);
0165 }
0166 
0167 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
0168 {
0169 
0170     struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
0171     struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
0172     struct intel_vgpu *vgpu = obj->vgpu;
0173 
0174     if (vgpu) {
0175         mutex_lock(&vgpu->dmabuf_lock);
0176         gem_obj->base.dma_buf = NULL;
0177         dmabuf_obj_put(obj);
0178         mutex_unlock(&vgpu->dmabuf_lock);
0179     } else {
0180         /* vgpu is NULL, as it has been removed already */
0181         gem_obj->base.dma_buf = NULL;
0182         dmabuf_obj_put(obj);
0183     }
0184 }
0185 
0186 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
0187     .name = "i915_gem_object_vgpu",
0188     .flags = I915_GEM_OBJECT_IS_PROXY,
0189     .get_pages = vgpu_gem_get_pages,
0190     .put_pages = vgpu_gem_put_pages,
0191     .release = vgpu_gem_release,
0192 };
0193 
0194 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
0195         struct intel_vgpu_fb_info *info)
0196 {
0197     static struct lock_class_key lock_class;
0198     struct drm_i915_private *dev_priv = to_i915(dev);
0199     struct drm_i915_gem_object *obj;
0200 
0201     obj = i915_gem_object_alloc();
0202     if (obj == NULL)
0203         return NULL;
0204 
0205     drm_gem_private_object_init(dev, &obj->base,
0206         roundup(info->size, PAGE_SIZE));
0207     i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0);
0208     i915_gem_object_set_readonly(obj);
0209 
0210     obj->read_domains = I915_GEM_DOMAIN_GTT;
0211     obj->write_domain = 0;
0212     if (GRAPHICS_VER(dev_priv) >= 9) {
0213         unsigned int tiling_mode = 0;
0214         unsigned int stride = 0;
0215 
0216         switch (info->drm_format_mod) {
0217         case DRM_FORMAT_MOD_LINEAR:
0218             tiling_mode = I915_TILING_NONE;
0219             break;
0220         case I915_FORMAT_MOD_X_TILED:
0221             tiling_mode = I915_TILING_X;
0222             stride = info->stride;
0223             break;
0224         case I915_FORMAT_MOD_Y_TILED:
0225         case I915_FORMAT_MOD_Yf_TILED:
0226             tiling_mode = I915_TILING_Y;
0227             stride = info->stride;
0228             break;
0229         default:
0230             gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
0231                      info->drm_format_mod);
0232         }
0233         obj->tiling_and_stride = tiling_mode | stride;
0234     } else {
0235         obj->tiling_and_stride = info->drm_format_mod ?
0236                     I915_TILING_X : 0;
0237     }
0238 
0239     return obj;
0240 }
0241 
0242 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
0243 {
0244     if (c && c->x_hot <= c->width && c->y_hot <= c->height)
0245         return true;
0246     else
0247         return false;
0248 }
0249 
0250 static int vgpu_get_plane_info(struct drm_device *dev,
0251         struct intel_vgpu *vgpu,
0252         struct intel_vgpu_fb_info *info,
0253         int plane_id)
0254 {
0255     struct intel_vgpu_primary_plane_format p;
0256     struct intel_vgpu_cursor_plane_format c;
0257     int ret, tile_height = 1;
0258 
0259     memset(info, 0, sizeof(*info));
0260 
0261     if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
0262         ret = intel_vgpu_decode_primary_plane(vgpu, &p);
0263         if (ret)
0264             return ret;
0265         info->start = p.base;
0266         info->start_gpa = p.base_gpa;
0267         info->width = p.width;
0268         info->height = p.height;
0269         info->stride = p.stride;
0270         info->drm_format = p.drm_format;
0271 
0272         switch (p.tiled) {
0273         case PLANE_CTL_TILED_LINEAR:
0274             info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
0275             break;
0276         case PLANE_CTL_TILED_X:
0277             info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
0278             tile_height = 8;
0279             break;
0280         case PLANE_CTL_TILED_Y:
0281             info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
0282             tile_height = 32;
0283             break;
0284         case PLANE_CTL_TILED_YF:
0285             info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
0286             tile_height = 32;
0287             break;
0288         default:
0289             gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
0290         }
0291     } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
0292         ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
0293         if (ret)
0294             return ret;
0295         info->start = c.base;
0296         info->start_gpa = c.base_gpa;
0297         info->width = c.width;
0298         info->height = c.height;
0299         info->stride = c.width * (c.bpp / 8);
0300         info->drm_format = c.drm_format;
0301         info->drm_format_mod = 0;
0302         info->x_pos = c.x_pos;
0303         info->y_pos = c.y_pos;
0304 
0305         if (validate_hotspot(&c)) {
0306             info->x_hot = c.x_hot;
0307             info->y_hot = c.y_hot;
0308         } else {
0309             info->x_hot = UINT_MAX;
0310             info->y_hot = UINT_MAX;
0311         }
0312     } else {
0313         gvt_vgpu_err("invalid plane id:%d\n", plane_id);
0314         return -EINVAL;
0315     }
0316 
0317     info->size = info->stride * roundup(info->height, tile_height);
0318     if (info->size == 0) {
0319         gvt_vgpu_err("fb size is zero\n");
0320         return -EINVAL;
0321     }
0322 
0323     if (info->start & (PAGE_SIZE - 1)) {
0324         gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
0325         return -EFAULT;
0326     }
0327 
0328     if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
0329         gvt_vgpu_err("invalid gma addr\n");
0330         return -EFAULT;
0331     }
0332 
0333     return 0;
0334 }
0335 
0336 static struct intel_vgpu_dmabuf_obj *
0337 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
0338             struct intel_vgpu_fb_info *latest_info)
0339 {
0340     struct list_head *pos;
0341     struct intel_vgpu_fb_info *fb_info;
0342     struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
0343     struct intel_vgpu_dmabuf_obj *ret = NULL;
0344 
0345     list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
0346         dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
0347         if (!dmabuf_obj->info)
0348             continue;
0349 
0350         fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
0351         if ((fb_info->start == latest_info->start) &&
0352             (fb_info->start_gpa == latest_info->start_gpa) &&
0353             (fb_info->size == latest_info->size) &&
0354             (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
0355             (fb_info->drm_format == latest_info->drm_format) &&
0356             (fb_info->width == latest_info->width) &&
0357             (fb_info->height == latest_info->height)) {
0358             ret = dmabuf_obj;
0359             break;
0360         }
0361     }
0362 
0363     return ret;
0364 }
0365 
0366 static struct intel_vgpu_dmabuf_obj *
0367 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
0368 {
0369     struct list_head *pos;
0370     struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
0371     struct intel_vgpu_dmabuf_obj *ret = NULL;
0372 
0373     list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
0374         dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
0375         if (dmabuf_obj->dmabuf_id == id) {
0376             ret = dmabuf_obj;
0377             break;
0378         }
0379     }
0380 
0381     return ret;
0382 }
0383 
0384 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
0385               struct intel_vgpu_fb_info *fb_info)
0386 {
0387     gvt_dmabuf->drm_format = fb_info->drm_format;
0388     gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
0389     gvt_dmabuf->width = fb_info->width;
0390     gvt_dmabuf->height = fb_info->height;
0391     gvt_dmabuf->stride = fb_info->stride;
0392     gvt_dmabuf->size = fb_info->size;
0393     gvt_dmabuf->x_pos = fb_info->x_pos;
0394     gvt_dmabuf->y_pos = fb_info->y_pos;
0395     gvt_dmabuf->x_hot = fb_info->x_hot;
0396     gvt_dmabuf->y_hot = fb_info->y_hot;
0397 }
0398 
0399 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
0400 {
0401     struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
0402     struct vfio_device_gfx_plane_info *gfx_plane_info = args;
0403     struct intel_vgpu_dmabuf_obj *dmabuf_obj;
0404     struct intel_vgpu_fb_info fb_info;
0405     int ret = 0;
0406 
0407     if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
0408                        VFIO_GFX_PLANE_TYPE_PROBE))
0409         return ret;
0410     else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
0411             (!gfx_plane_info->flags))
0412         return -EINVAL;
0413 
0414     ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
0415                     gfx_plane_info->drm_plane_type);
0416     if (ret != 0)
0417         goto out;
0418 
0419     mutex_lock(&vgpu->dmabuf_lock);
0420     /* If exists, pick up the exposed dmabuf_obj */
0421     dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
0422     if (dmabuf_obj) {
0423         update_fb_info(gfx_plane_info, &fb_info);
0424         gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
0425 
0426         /* This buffer may be released between query_plane ioctl and
0427          * get_dmabuf ioctl. Add the refcount to make sure it won't
0428          * be released between the two ioctls.
0429          */
0430         if (!dmabuf_obj->initref) {
0431             dmabuf_obj->initref = true;
0432             dmabuf_obj_get(dmabuf_obj);
0433         }
0434         ret = 0;
0435         gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
0436                 vgpu->id, kref_read(&dmabuf_obj->kref),
0437                 gfx_plane_info->dmabuf_id);
0438         mutex_unlock(&vgpu->dmabuf_lock);
0439         goto out;
0440     }
0441 
0442     mutex_unlock(&vgpu->dmabuf_lock);
0443 
0444     /* Need to allocate a new one*/
0445     dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
0446     if (unlikely(!dmabuf_obj)) {
0447         gvt_vgpu_err("alloc dmabuf_obj failed\n");
0448         ret = -ENOMEM;
0449         goto out;
0450     }
0451 
0452     dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
0453                    GFP_KERNEL);
0454     if (unlikely(!dmabuf_obj->info)) {
0455         gvt_vgpu_err("allocate intel vgpu fb info failed\n");
0456         ret = -ENOMEM;
0457         goto out_free_dmabuf;
0458     }
0459     memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
0460 
0461     ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
0462 
0463     dmabuf_obj->vgpu = vgpu;
0464 
0465     ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
0466     if (ret < 0)
0467         goto out_free_info;
0468     gfx_plane_info->dmabuf_id = ret;
0469     dmabuf_obj->dmabuf_id = ret;
0470 
0471     dmabuf_obj->initref = true;
0472 
0473     kref_init(&dmabuf_obj->kref);
0474 
0475     update_fb_info(gfx_plane_info, &fb_info);
0476 
0477     INIT_LIST_HEAD(&dmabuf_obj->list);
0478     mutex_lock(&vgpu->dmabuf_lock);
0479     list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
0480     mutex_unlock(&vgpu->dmabuf_lock);
0481 
0482     gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
0483             __func__, kref_read(&dmabuf_obj->kref), ret);
0484 
0485     return 0;
0486 
0487 out_free_info:
0488     kfree(dmabuf_obj->info);
0489 out_free_dmabuf:
0490     kfree(dmabuf_obj);
0491 out:
0492     /* ENODEV means plane isn't ready, which might be a normal case. */
0493     return (ret == -ENODEV) ? 0 : ret;
0494 }
0495 
0496 /* To associate an exposed dmabuf with the dmabuf_obj */
0497 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
0498 {
0499     struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
0500     struct intel_vgpu_dmabuf_obj *dmabuf_obj;
0501     struct drm_i915_gem_object *obj;
0502     struct dma_buf *dmabuf;
0503     int dmabuf_fd;
0504     int ret = 0;
0505 
0506     mutex_lock(&vgpu->dmabuf_lock);
0507 
0508     dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
0509     if (dmabuf_obj == NULL) {
0510         gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
0511         ret = -EINVAL;
0512         goto out;
0513     }
0514 
0515     obj = vgpu_create_gem(dev, dmabuf_obj->info);
0516     if (obj == NULL) {
0517         gvt_vgpu_err("create gvt gem obj failed\n");
0518         ret = -ENOMEM;
0519         goto out;
0520     }
0521 
0522     obj->gvt_info = dmabuf_obj->info;
0523 
0524     dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
0525     if (IS_ERR(dmabuf)) {
0526         gvt_vgpu_err("export dma-buf failed\n");
0527         ret = PTR_ERR(dmabuf);
0528         goto out_free_gem;
0529     }
0530 
0531     ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
0532     if (ret < 0) {
0533         gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
0534         goto out_free_dmabuf;
0535     }
0536     dmabuf_fd = ret;
0537 
0538     dmabuf_obj_get(dmabuf_obj);
0539 
0540     if (dmabuf_obj->initref) {
0541         dmabuf_obj->initref = false;
0542         dmabuf_obj_put(dmabuf_obj);
0543     }
0544 
0545     mutex_unlock(&vgpu->dmabuf_lock);
0546 
0547     gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
0548             "        file count: %ld, GEM ref: %d\n",
0549             vgpu->id, dmabuf_obj->dmabuf_id,
0550             kref_read(&dmabuf_obj->kref),
0551             dmabuf_fd,
0552             file_count(dmabuf->file),
0553             kref_read(&obj->base.refcount));
0554 
0555     i915_gem_object_put(obj);
0556 
0557     return dmabuf_fd;
0558 
0559 out_free_dmabuf:
0560     dma_buf_put(dmabuf);
0561 out_free_gem:
0562     i915_gem_object_put(obj);
0563 out:
0564     mutex_unlock(&vgpu->dmabuf_lock);
0565     return ret;
0566 }
0567 
0568 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
0569 {
0570     struct list_head *pos, *n;
0571     struct intel_vgpu_dmabuf_obj *dmabuf_obj;
0572 
0573     mutex_lock(&vgpu->dmabuf_lock);
0574     list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
0575         dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
0576         dmabuf_obj->vgpu = NULL;
0577 
0578         idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
0579         list_del(pos);
0580 
0581         /* dmabuf_obj might be freed in dmabuf_obj_put */
0582         if (dmabuf_obj->initref) {
0583             dmabuf_obj->initref = false;
0584             dmabuf_obj_put(dmabuf_obj);
0585         }
0586 
0587     }
0588     mutex_unlock(&vgpu->dmabuf_lock);
0589 }