Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /**************************************************************************
0003  *
0004  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the
0008  * "Software"), to deal in the Software without restriction, including
0009  * without limitation the rights to use, copy, modify, merge, publish,
0010  * distribute, sub license, and/or sell copies of the Software, and to
0011  * permit persons to whom the Software is furnished to do so, subject to
0012  * the following conditions:
0013  *
0014  * The above copyright notice and this permission notice (including the
0015  * next paragraph) shall be included in all copies or substantial portions
0016  * of the Software.
0017  *
0018  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0019  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0020  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0021  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0022  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0023  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0024  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0025  *
0026  **************************************************************************/
0027 
0028 #include <drm/ttm/ttm_placement.h>
0029 
0030 #include "vmwgfx_drv.h"
0031 #include "vmwgfx_resource_priv.h"
0032 #include "vmwgfx_binding.h"
0033 
0034 struct vmw_user_context {
0035     struct ttm_base_object base;
0036     struct vmw_resource res;
0037     struct vmw_ctx_binding_state *cbs;
0038     struct vmw_cmdbuf_res_manager *man;
0039     struct vmw_resource *cotables[SVGA_COTABLE_MAX];
0040     spinlock_t cotable_lock;
0041     struct vmw_buffer_object *dx_query_mob;
0042 };
0043 
0044 static void vmw_user_context_free(struct vmw_resource *res);
0045 static struct vmw_resource *
0046 vmw_user_context_base_to_res(struct ttm_base_object *base);
0047 
0048 static int vmw_gb_context_create(struct vmw_resource *res);
0049 static int vmw_gb_context_bind(struct vmw_resource *res,
0050                    struct ttm_validate_buffer *val_buf);
0051 static int vmw_gb_context_unbind(struct vmw_resource *res,
0052                  bool readback,
0053                  struct ttm_validate_buffer *val_buf);
0054 static int vmw_gb_context_destroy(struct vmw_resource *res);
0055 static int vmw_dx_context_create(struct vmw_resource *res);
0056 static int vmw_dx_context_bind(struct vmw_resource *res,
0057                    struct ttm_validate_buffer *val_buf);
0058 static int vmw_dx_context_unbind(struct vmw_resource *res,
0059                  bool readback,
0060                  struct ttm_validate_buffer *val_buf);
0061 static int vmw_dx_context_destroy(struct vmw_resource *res);
0062 
0063 static const struct vmw_user_resource_conv user_context_conv = {
0064     .object_type = VMW_RES_CONTEXT,
0065     .base_obj_to_res = vmw_user_context_base_to_res,
0066     .res_free = vmw_user_context_free
0067 };
0068 
0069 const struct vmw_user_resource_conv *user_context_converter =
0070     &user_context_conv;
0071 
0072 
0073 static const struct vmw_res_func vmw_legacy_context_func = {
0074     .res_type = vmw_res_context,
0075     .needs_backup = false,
0076     .may_evict = false,
0077     .type_name = "legacy contexts",
0078     .backup_placement = NULL,
0079     .create = NULL,
0080     .destroy = NULL,
0081     .bind = NULL,
0082     .unbind = NULL
0083 };
0084 
0085 static const struct vmw_res_func vmw_gb_context_func = {
0086     .res_type = vmw_res_context,
0087     .needs_backup = true,
0088     .may_evict = true,
0089     .prio = 3,
0090     .dirty_prio = 3,
0091     .type_name = "guest backed contexts",
0092     .backup_placement = &vmw_mob_placement,
0093     .create = vmw_gb_context_create,
0094     .destroy = vmw_gb_context_destroy,
0095     .bind = vmw_gb_context_bind,
0096     .unbind = vmw_gb_context_unbind
0097 };
0098 
0099 static const struct vmw_res_func vmw_dx_context_func = {
0100     .res_type = vmw_res_dx_context,
0101     .needs_backup = true,
0102     .may_evict = true,
0103     .prio = 3,
0104     .dirty_prio = 3,
0105     .type_name = "dx contexts",
0106     .backup_placement = &vmw_mob_placement,
0107     .create = vmw_dx_context_create,
0108     .destroy = vmw_dx_context_destroy,
0109     .bind = vmw_dx_context_bind,
0110     .unbind = vmw_dx_context_unbind
0111 };
0112 
0113 /*
0114  * Context management:
0115  */
0116 
0117 static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
0118                        struct vmw_user_context *uctx)
0119 {
0120     struct vmw_resource *res;
0121     int i;
0122     u32 cotable_max = has_sm5_context(dev_priv) ?
0123         SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
0124 
0125     for (i = 0; i < cotable_max; ++i) {
0126         spin_lock(&uctx->cotable_lock);
0127         res = uctx->cotables[i];
0128         uctx->cotables[i] = NULL;
0129         spin_unlock(&uctx->cotable_lock);
0130 
0131         if (res)
0132             vmw_resource_unreference(&res);
0133     }
0134 }
0135 
0136 static void vmw_hw_context_destroy(struct vmw_resource *res)
0137 {
0138     struct vmw_user_context *uctx =
0139         container_of(res, struct vmw_user_context, res);
0140     struct vmw_private *dev_priv = res->dev_priv;
0141     struct {
0142         SVGA3dCmdHeader header;
0143         SVGA3dCmdDestroyContext body;
0144     } *cmd;
0145 
0146 
0147     if (res->func->destroy == vmw_gb_context_destroy ||
0148         res->func->destroy == vmw_dx_context_destroy) {
0149         mutex_lock(&dev_priv->cmdbuf_mutex);
0150         vmw_cmdbuf_res_man_destroy(uctx->man);
0151         mutex_lock(&dev_priv->binding_mutex);
0152         vmw_binding_state_kill(uctx->cbs);
0153         (void) res->func->destroy(res);
0154         mutex_unlock(&dev_priv->binding_mutex);
0155         if (dev_priv->pinned_bo != NULL &&
0156             !dev_priv->query_cid_valid)
0157             __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
0158         mutex_unlock(&dev_priv->cmdbuf_mutex);
0159         vmw_context_cotables_unref(dev_priv, uctx);
0160         return;
0161     }
0162 
0163     vmw_execbuf_release_pinned_bo(dev_priv);
0164     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0165     if (unlikely(cmd == NULL))
0166         return;
0167 
0168     cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
0169     cmd->header.size = sizeof(cmd->body);
0170     cmd->body.cid = res->id;
0171 
0172     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0173     vmw_fifo_resource_dec(dev_priv);
0174 }
0175 
0176 static int vmw_gb_context_init(struct vmw_private *dev_priv,
0177                    bool dx,
0178                    struct vmw_resource *res,
0179                    void (*res_free)(struct vmw_resource *res))
0180 {
0181     int ret, i;
0182     struct vmw_user_context *uctx =
0183         container_of(res, struct vmw_user_context, res);
0184 
0185     res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
0186                  sizeof(SVGAGBContextData));
0187     ret = vmw_resource_init(dev_priv, res, true,
0188                 res_free,
0189                 dx ? &vmw_dx_context_func :
0190                 &vmw_gb_context_func);
0191     if (unlikely(ret != 0))
0192         goto out_err;
0193 
0194     if (dev_priv->has_mob) {
0195         uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
0196         if (IS_ERR(uctx->man)) {
0197             ret = PTR_ERR(uctx->man);
0198             uctx->man = NULL;
0199             goto out_err;
0200         }
0201     }
0202 
0203     uctx->cbs = vmw_binding_state_alloc(dev_priv);
0204     if (IS_ERR(uctx->cbs)) {
0205         ret = PTR_ERR(uctx->cbs);
0206         goto out_err;
0207     }
0208 
0209     spin_lock_init(&uctx->cotable_lock);
0210 
0211     if (dx) {
0212         u32 cotable_max = has_sm5_context(dev_priv) ?
0213             SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
0214         for (i = 0; i < cotable_max; ++i) {
0215             uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
0216                                   &uctx->res, i);
0217             if (IS_ERR(uctx->cotables[i])) {
0218                 ret = PTR_ERR(uctx->cotables[i]);
0219                 goto out_cotables;
0220             }
0221         }
0222     }
0223 
0224     res->hw_destroy = vmw_hw_context_destroy;
0225     return 0;
0226 
0227 out_cotables:
0228     vmw_context_cotables_unref(dev_priv, uctx);
0229 out_err:
0230     if (res_free)
0231         res_free(res);
0232     else
0233         kfree(res);
0234     return ret;
0235 }
0236 
0237 static int vmw_context_init(struct vmw_private *dev_priv,
0238                 struct vmw_resource *res,
0239                 void (*res_free)(struct vmw_resource *res),
0240                 bool dx)
0241 {
0242     int ret;
0243 
0244     struct {
0245         SVGA3dCmdHeader header;
0246         SVGA3dCmdDefineContext body;
0247     } *cmd;
0248 
0249     if (dev_priv->has_mob)
0250         return vmw_gb_context_init(dev_priv, dx, res, res_free);
0251 
0252     ret = vmw_resource_init(dev_priv, res, false,
0253                 res_free, &vmw_legacy_context_func);
0254 
0255     if (unlikely(ret != 0)) {
0256         DRM_ERROR("Failed to allocate a resource id.\n");
0257         goto out_early;
0258     }
0259 
0260     if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
0261         DRM_ERROR("Out of hw context ids.\n");
0262         vmw_resource_unreference(&res);
0263         return -ENOMEM;
0264     }
0265 
0266     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0267     if (unlikely(cmd == NULL)) {
0268         vmw_resource_unreference(&res);
0269         return -ENOMEM;
0270     }
0271 
0272     cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
0273     cmd->header.size = sizeof(cmd->body);
0274     cmd->body.cid = res->id;
0275 
0276     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0277     vmw_fifo_resource_inc(dev_priv);
0278     res->hw_destroy = vmw_hw_context_destroy;
0279     return 0;
0280 
0281 out_early:
0282     if (res_free == NULL)
0283         kfree(res);
0284     else
0285         res_free(res);
0286     return ret;
0287 }
0288 
0289 
0290 /*
0291  * GB context.
0292  */
0293 
0294 static int vmw_gb_context_create(struct vmw_resource *res)
0295 {
0296     struct vmw_private *dev_priv = res->dev_priv;
0297     int ret;
0298     struct {
0299         SVGA3dCmdHeader header;
0300         SVGA3dCmdDefineGBContext body;
0301     } *cmd;
0302 
0303     if (likely(res->id != -1))
0304         return 0;
0305 
0306     ret = vmw_resource_alloc_id(res);
0307     if (unlikely(ret != 0)) {
0308         DRM_ERROR("Failed to allocate a context id.\n");
0309         goto out_no_id;
0310     }
0311 
0312     if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
0313         ret = -EBUSY;
0314         goto out_no_fifo;
0315     }
0316 
0317     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0318     if (unlikely(cmd == NULL)) {
0319         ret = -ENOMEM;
0320         goto out_no_fifo;
0321     }
0322 
0323     cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
0324     cmd->header.size = sizeof(cmd->body);
0325     cmd->body.cid = res->id;
0326     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0327     vmw_fifo_resource_inc(dev_priv);
0328 
0329     return 0;
0330 
0331 out_no_fifo:
0332     vmw_resource_release_id(res);
0333 out_no_id:
0334     return ret;
0335 }
0336 
0337 static int vmw_gb_context_bind(struct vmw_resource *res,
0338                    struct ttm_validate_buffer *val_buf)
0339 {
0340     struct vmw_private *dev_priv = res->dev_priv;
0341     struct {
0342         SVGA3dCmdHeader header;
0343         SVGA3dCmdBindGBContext body;
0344     } *cmd;
0345     struct ttm_buffer_object *bo = val_buf->bo;
0346 
0347     BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
0348 
0349     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0350     if (unlikely(cmd == NULL))
0351         return -ENOMEM;
0352 
0353     cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
0354     cmd->header.size = sizeof(cmd->body);
0355     cmd->body.cid = res->id;
0356     cmd->body.mobid = bo->resource->start;
0357     cmd->body.validContents = res->backup_dirty;
0358     res->backup_dirty = false;
0359     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0360 
0361     return 0;
0362 }
0363 
0364 static int vmw_gb_context_unbind(struct vmw_resource *res,
0365                  bool readback,
0366                  struct ttm_validate_buffer *val_buf)
0367 {
0368     struct vmw_private *dev_priv = res->dev_priv;
0369     struct ttm_buffer_object *bo = val_buf->bo;
0370     struct vmw_fence_obj *fence;
0371     struct vmw_user_context *uctx =
0372         container_of(res, struct vmw_user_context, res);
0373 
0374     struct {
0375         SVGA3dCmdHeader header;
0376         SVGA3dCmdReadbackGBContext body;
0377     } *cmd1;
0378     struct {
0379         SVGA3dCmdHeader header;
0380         SVGA3dCmdBindGBContext body;
0381     } *cmd2;
0382     uint32_t submit_size;
0383     uint8_t *cmd;
0384 
0385 
0386     BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
0387 
0388     mutex_lock(&dev_priv->binding_mutex);
0389     vmw_binding_state_scrub(uctx->cbs);
0390 
0391     submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
0392 
0393     cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
0394     if (unlikely(cmd == NULL)) {
0395         mutex_unlock(&dev_priv->binding_mutex);
0396         return -ENOMEM;
0397     }
0398 
0399     cmd2 = (void *) cmd;
0400     if (readback) {
0401         cmd1 = (void *) cmd;
0402         cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
0403         cmd1->header.size = sizeof(cmd1->body);
0404         cmd1->body.cid = res->id;
0405         cmd2 = (void *) (&cmd1[1]);
0406     }
0407     cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
0408     cmd2->header.size = sizeof(cmd2->body);
0409     cmd2->body.cid = res->id;
0410     cmd2->body.mobid = SVGA3D_INVALID_ID;
0411 
0412     vmw_cmd_commit(dev_priv, submit_size);
0413     mutex_unlock(&dev_priv->binding_mutex);
0414 
0415     /*
0416      * Create a fence object and fence the backup buffer.
0417      */
0418 
0419     (void) vmw_execbuf_fence_commands(NULL, dev_priv,
0420                       &fence, NULL);
0421 
0422     vmw_bo_fence_single(bo, fence);
0423 
0424     if (likely(fence != NULL))
0425         vmw_fence_obj_unreference(&fence);
0426 
0427     return 0;
0428 }
0429 
0430 static int vmw_gb_context_destroy(struct vmw_resource *res)
0431 {
0432     struct vmw_private *dev_priv = res->dev_priv;
0433     struct {
0434         SVGA3dCmdHeader header;
0435         SVGA3dCmdDestroyGBContext body;
0436     } *cmd;
0437 
0438     if (likely(res->id == -1))
0439         return 0;
0440 
0441     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0442     if (unlikely(cmd == NULL))
0443         return -ENOMEM;
0444 
0445     cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
0446     cmd->header.size = sizeof(cmd->body);
0447     cmd->body.cid = res->id;
0448     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0449     if (dev_priv->query_cid == res->id)
0450         dev_priv->query_cid_valid = false;
0451     vmw_resource_release_id(res);
0452     vmw_fifo_resource_dec(dev_priv);
0453 
0454     return 0;
0455 }
0456 
0457 /*
0458  * DX context.
0459  */
0460 
0461 static int vmw_dx_context_create(struct vmw_resource *res)
0462 {
0463     struct vmw_private *dev_priv = res->dev_priv;
0464     int ret;
0465     struct {
0466         SVGA3dCmdHeader header;
0467         SVGA3dCmdDXDefineContext body;
0468     } *cmd;
0469 
0470     if (likely(res->id != -1))
0471         return 0;
0472 
0473     ret = vmw_resource_alloc_id(res);
0474     if (unlikely(ret != 0)) {
0475         DRM_ERROR("Failed to allocate a context id.\n");
0476         goto out_no_id;
0477     }
0478 
0479     if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
0480         ret = -EBUSY;
0481         goto out_no_fifo;
0482     }
0483 
0484     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0485     if (unlikely(cmd == NULL)) {
0486         ret = -ENOMEM;
0487         goto out_no_fifo;
0488     }
0489 
0490     cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
0491     cmd->header.size = sizeof(cmd->body);
0492     cmd->body.cid = res->id;
0493     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0494     vmw_fifo_resource_inc(dev_priv);
0495 
0496     return 0;
0497 
0498 out_no_fifo:
0499     vmw_resource_release_id(res);
0500 out_no_id:
0501     return ret;
0502 }
0503 
0504 static int vmw_dx_context_bind(struct vmw_resource *res,
0505                    struct ttm_validate_buffer *val_buf)
0506 {
0507     struct vmw_private *dev_priv = res->dev_priv;
0508     struct {
0509         SVGA3dCmdHeader header;
0510         SVGA3dCmdDXBindContext body;
0511     } *cmd;
0512     struct ttm_buffer_object *bo = val_buf->bo;
0513 
0514     BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
0515 
0516     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0517     if (unlikely(cmd == NULL))
0518         return -ENOMEM;
0519 
0520     cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
0521     cmd->header.size = sizeof(cmd->body);
0522     cmd->body.cid = res->id;
0523     cmd->body.mobid = bo->resource->start;
0524     cmd->body.validContents = res->backup_dirty;
0525     res->backup_dirty = false;
0526     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0527 
0528 
0529     return 0;
0530 }
0531 
0532 /**
0533  * vmw_dx_context_scrub_cotables - Scrub all bindings and
0534  * cotables from a context
0535  *
0536  * @ctx: Pointer to the context resource
0537  * @readback: Whether to save the otable contents on scrubbing.
0538  *
0539  * COtables must be unbound before their context, but unbinding requires
0540  * the backup buffer being reserved, whereas scrubbing does not.
0541  * This function scrubs all cotables of a context, potentially reading back
0542  * the contents into their backup buffers. However, scrubbing cotables
0543  * also makes the device context invalid, so scrub all bindings first so
0544  * that doesn't have to be done later with an invalid context.
0545  */
0546 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
0547                    bool readback)
0548 {
0549     struct vmw_user_context *uctx =
0550         container_of(ctx, struct vmw_user_context, res);
0551     u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
0552         SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
0553     int i;
0554 
0555     vmw_binding_state_scrub(uctx->cbs);
0556     for (i = 0; i < cotable_max; ++i) {
0557         struct vmw_resource *res;
0558 
0559         /* Avoid racing with ongoing cotable destruction. */
0560         spin_lock(&uctx->cotable_lock);
0561         res = uctx->cotables[vmw_cotable_scrub_order[i]];
0562         if (res)
0563             res = vmw_resource_reference_unless_doomed(res);
0564         spin_unlock(&uctx->cotable_lock);
0565         if (!res)
0566             continue;
0567 
0568         WARN_ON(vmw_cotable_scrub(res, readback));
0569         vmw_resource_unreference(&res);
0570     }
0571 }
0572 
0573 static int vmw_dx_context_unbind(struct vmw_resource *res,
0574                  bool readback,
0575                  struct ttm_validate_buffer *val_buf)
0576 {
0577     struct vmw_private *dev_priv = res->dev_priv;
0578     struct ttm_buffer_object *bo = val_buf->bo;
0579     struct vmw_fence_obj *fence;
0580     struct vmw_user_context *uctx =
0581         container_of(res, struct vmw_user_context, res);
0582 
0583     struct {
0584         SVGA3dCmdHeader header;
0585         SVGA3dCmdDXReadbackContext body;
0586     } *cmd1;
0587     struct {
0588         SVGA3dCmdHeader header;
0589         SVGA3dCmdDXBindContext body;
0590     } *cmd2;
0591     uint32_t submit_size;
0592     uint8_t *cmd;
0593 
0594 
0595     BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
0596 
0597     mutex_lock(&dev_priv->binding_mutex);
0598     vmw_dx_context_scrub_cotables(res, readback);
0599 
0600     if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
0601         readback) {
0602         WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
0603         if (vmw_query_readback_all(uctx->dx_query_mob))
0604             DRM_ERROR("Failed to read back query states\n");
0605     }
0606 
0607     submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
0608 
0609     cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
0610     if (unlikely(cmd == NULL)) {
0611         mutex_unlock(&dev_priv->binding_mutex);
0612         return -ENOMEM;
0613     }
0614 
0615     cmd2 = (void *) cmd;
0616     if (readback) {
0617         cmd1 = (void *) cmd;
0618         cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
0619         cmd1->header.size = sizeof(cmd1->body);
0620         cmd1->body.cid = res->id;
0621         cmd2 = (void *) (&cmd1[1]);
0622     }
0623     cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
0624     cmd2->header.size = sizeof(cmd2->body);
0625     cmd2->body.cid = res->id;
0626     cmd2->body.mobid = SVGA3D_INVALID_ID;
0627 
0628     vmw_cmd_commit(dev_priv, submit_size);
0629     mutex_unlock(&dev_priv->binding_mutex);
0630 
0631     /*
0632      * Create a fence object and fence the backup buffer.
0633      */
0634 
0635     (void) vmw_execbuf_fence_commands(NULL, dev_priv,
0636                       &fence, NULL);
0637 
0638     vmw_bo_fence_single(bo, fence);
0639 
0640     if (likely(fence != NULL))
0641         vmw_fence_obj_unreference(&fence);
0642 
0643     return 0;
0644 }
0645 
0646 static int vmw_dx_context_destroy(struct vmw_resource *res)
0647 {
0648     struct vmw_private *dev_priv = res->dev_priv;
0649     struct {
0650         SVGA3dCmdHeader header;
0651         SVGA3dCmdDXDestroyContext body;
0652     } *cmd;
0653 
0654     if (likely(res->id == -1))
0655         return 0;
0656 
0657     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0658     if (unlikely(cmd == NULL))
0659         return -ENOMEM;
0660 
0661     cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
0662     cmd->header.size = sizeof(cmd->body);
0663     cmd->body.cid = res->id;
0664     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0665     if (dev_priv->query_cid == res->id)
0666         dev_priv->query_cid_valid = false;
0667     vmw_resource_release_id(res);
0668     vmw_fifo_resource_dec(dev_priv);
0669 
0670     return 0;
0671 }
0672 
0673 /*
0674  * User-space context management:
0675  */
0676 
0677 static struct vmw_resource *
0678 vmw_user_context_base_to_res(struct ttm_base_object *base)
0679 {
0680     return &(container_of(base, struct vmw_user_context, base)->res);
0681 }
0682 
0683 static void vmw_user_context_free(struct vmw_resource *res)
0684 {
0685     struct vmw_user_context *ctx =
0686         container_of(res, struct vmw_user_context, res);
0687 
0688     if (ctx->cbs)
0689         vmw_binding_state_free(ctx->cbs);
0690 
0691     (void) vmw_context_bind_dx_query(res, NULL);
0692 
0693     ttm_base_object_kfree(ctx, base);
0694 }
0695 
0696 /*
0697  * This function is called when user space has no more references on the
0698  * base object. It releases the base-object's reference on the resource object.
0699  */
0700 
0701 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
0702 {
0703     struct ttm_base_object *base = *p_base;
0704     struct vmw_user_context *ctx =
0705         container_of(base, struct vmw_user_context, base);
0706     struct vmw_resource *res = &ctx->res;
0707 
0708     *p_base = NULL;
0709     vmw_resource_unreference(&res);
0710 }
0711 
0712 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
0713                   struct drm_file *file_priv)
0714 {
0715     struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
0716     struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0717 
0718     return ttm_ref_object_base_unref(tfile, arg->cid);
0719 }
0720 
0721 static int vmw_context_define(struct drm_device *dev, void *data,
0722                   struct drm_file *file_priv, bool dx)
0723 {
0724     struct vmw_private *dev_priv = vmw_priv(dev);
0725     struct vmw_user_context *ctx;
0726     struct vmw_resource *res;
0727     struct vmw_resource *tmp;
0728     struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
0729     struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0730     int ret;
0731 
0732     if (!has_sm4_context(dev_priv) && dx) {
0733         VMW_DEBUG_USER("DX contexts not supported by device.\n");
0734         return -EINVAL;
0735     }
0736 
0737     ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
0738     if (unlikely(!ctx)) {
0739         ret = -ENOMEM;
0740         goto out_ret;
0741     }
0742 
0743     res = &ctx->res;
0744     ctx->base.shareable = false;
0745     ctx->base.tfile = NULL;
0746 
0747     /*
0748      * From here on, the destructor takes over resource freeing.
0749      */
0750 
0751     ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
0752     if (unlikely(ret != 0))
0753         goto out_ret;
0754 
0755     tmp = vmw_resource_reference(&ctx->res);
0756     ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
0757                    &vmw_user_context_base_release);
0758 
0759     if (unlikely(ret != 0)) {
0760         vmw_resource_unreference(&tmp);
0761         goto out_err;
0762     }
0763 
0764     arg->cid = ctx->base.handle;
0765 out_err:
0766     vmw_resource_unreference(&res);
0767 out_ret:
0768     return ret;
0769 }
0770 
0771 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
0772                  struct drm_file *file_priv)
0773 {
0774     return vmw_context_define(dev, data, file_priv, false);
0775 }
0776 
0777 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
0778                       struct drm_file *file_priv)
0779 {
0780     union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
0781     struct drm_vmw_context_arg *rep = &arg->rep;
0782 
0783     switch (arg->req) {
0784     case drm_vmw_context_legacy:
0785         return vmw_context_define(dev, rep, file_priv, false);
0786     case drm_vmw_context_dx:
0787         return vmw_context_define(dev, rep, file_priv, true);
0788     default:
0789         break;
0790     }
0791     return -EINVAL;
0792 }
0793 
0794 /**
0795  * vmw_context_binding_list - Return a list of context bindings
0796  *
0797  * @ctx: The context resource
0798  *
0799  * Returns the current list of bindings of the given context. Note that
0800  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
0801  */
0802 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
0803 {
0804     struct vmw_user_context *uctx =
0805         container_of(ctx, struct vmw_user_context, res);
0806 
0807     return vmw_binding_state_list(uctx->cbs);
0808 }
0809 
0810 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
0811 {
0812     return container_of(ctx, struct vmw_user_context, res)->man;
0813 }
0814 
0815 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
0816                      SVGACOTableType cotable_type)
0817 {
0818     u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
0819         SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
0820 
0821     if (cotable_type >= cotable_max)
0822         return ERR_PTR(-EINVAL);
0823 
0824     return container_of(ctx, struct vmw_user_context, res)->
0825         cotables[cotable_type];
0826 }
0827 
0828 /**
0829  * vmw_context_binding_state -
0830  * Return a pointer to a context binding state structure
0831  *
0832  * @ctx: The context resource
0833  *
0834  * Returns the current state of bindings of the given context. Note that
0835  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
0836  */
0837 struct vmw_ctx_binding_state *
0838 vmw_context_binding_state(struct vmw_resource *ctx)
0839 {
0840     return container_of(ctx, struct vmw_user_context, res)->cbs;
0841 }
0842 
0843 /**
0844  * vmw_context_bind_dx_query -
0845  * Sets query MOB for the context.  If @mob is NULL, then this function will
0846  * remove the association between the MOB and the context.  This function
0847  * assumes the binding_mutex is held.
0848  *
0849  * @ctx_res: The context resource
0850  * @mob: a reference to the query MOB
0851  *
0852  * Returns -EINVAL if a MOB has already been set and does not match the one
0853  * specified in the parameter.  0 otherwise.
0854  */
0855 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
0856                   struct vmw_buffer_object *mob)
0857 {
0858     struct vmw_user_context *uctx =
0859         container_of(ctx_res, struct vmw_user_context, res);
0860 
0861     if (mob == NULL) {
0862         if (uctx->dx_query_mob) {
0863             uctx->dx_query_mob->dx_query_ctx = NULL;
0864             vmw_bo_unreference(&uctx->dx_query_mob);
0865             uctx->dx_query_mob = NULL;
0866         }
0867 
0868         return 0;
0869     }
0870 
0871     /* Can only have one MOB per context for queries */
0872     if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
0873         return -EINVAL;
0874 
0875     mob->dx_query_ctx  = ctx_res;
0876 
0877     if (!uctx->dx_query_mob)
0878         uctx->dx_query_mob = vmw_bo_reference(mob);
0879 
0880     return 0;
0881 }
0882 
0883 /**
0884  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
0885  *
0886  * @ctx_res: The context resource
0887  */
0888 struct vmw_buffer_object *
0889 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
0890 {
0891     struct vmw_user_context *uctx =
0892         container_of(ctx_res, struct vmw_user_context, res);
0893 
0894     return uctx->dx_query_mob;
0895 }