Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /**************************************************************************
0003  *
0004  * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the
0008  * "Software"), to deal in the Software without restriction, including
0009  * without limitation the rights to use, copy, modify, merge, publish,
0010  * distribute, sub license, and/or sell copies of the Software, and to
0011  * permit persons to whom the Software is furnished to do so, subject to
0012  * the following conditions:
0013  *
0014  * The above copyright notice and this permission notice (including the
0015  * next paragraph) shall be included in all copies or substantial portions
0016  * of the Software.
0017  *
0018  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0019  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0020  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0021  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0022  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0023  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0024  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0025  *
0026  **************************************************************************/
0027 /*
0028  * Treat context OTables as resources to make use of the resource
0029  * backing MOB eviction mechanism, that is used to read back the COTable
0030  * whenever the backing MOB is evicted.
0031  */
0032 
0033 #include <drm/ttm/ttm_placement.h>
0034 
0035 #include "vmwgfx_drv.h"
0036 #include "vmwgfx_resource_priv.h"
0037 #include "vmwgfx_so.h"
0038 
0039 /**
0040  * struct vmw_cotable - Context Object Table resource
0041  *
0042  * @res: struct vmw_resource we are deriving from.
0043  * @ctx: non-refcounted pointer to the owning context.
0044  * @size_read_back: Size of data read back during eviction.
0045  * @seen_entries: Seen entries in command stream for this cotable.
0046  * @type: The cotable type.
0047  * @scrubbed: Whether the cotable has been scrubbed.
0048  * @resource_list: List of resources in the cotable.
0049  */
0050 struct vmw_cotable {
0051     struct vmw_resource res;
0052     struct vmw_resource *ctx;
0053     size_t size_read_back;
0054     int seen_entries;
0055     u32 type;
0056     bool scrubbed;
0057     struct list_head resource_list;
0058 };
0059 
0060 /**
0061  * struct vmw_cotable_info - Static info about cotable types
0062  *
0063  * @min_initial_entries: Min number of initial intries at cotable allocation
0064  * for this cotable type.
0065  * @size: Size of each entry.
0066  * @unbind_func: Unbind call-back function.
0067  */
0068 struct vmw_cotable_info {
0069     u32 min_initial_entries;
0070     u32 size;
0071     void (*unbind_func)(struct vmw_private *, struct list_head *,
0072                 bool);
0073 };
0074 
0075 static const struct vmw_cotable_info co_info[] = {
0076     {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
0077     {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
0078     {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
0079     {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
0080     {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
0081     {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
0082     {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
0083     {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
0084     {1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
0085     {1, sizeof(SVGACOTableDXQueryEntry), NULL},
0086     {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
0087     {1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
0088 };
0089 
0090 /*
0091  * Cotables with bindings that we remove must be scrubbed first,
0092  * otherwise, the device will swap in an invalid context when we remove
0093  * bindings before scrubbing a cotable...
0094  */
0095 const SVGACOTableType vmw_cotable_scrub_order[] = {
0096     SVGA_COTABLE_RTVIEW,
0097     SVGA_COTABLE_DSVIEW,
0098     SVGA_COTABLE_SRVIEW,
0099     SVGA_COTABLE_DXSHADER,
0100     SVGA_COTABLE_ELEMENTLAYOUT,
0101     SVGA_COTABLE_BLENDSTATE,
0102     SVGA_COTABLE_DEPTHSTENCIL,
0103     SVGA_COTABLE_RASTERIZERSTATE,
0104     SVGA_COTABLE_SAMPLER,
0105     SVGA_COTABLE_STREAMOUTPUT,
0106     SVGA_COTABLE_DXQUERY,
0107     SVGA_COTABLE_UAVIEW,
0108 };
0109 
0110 static int vmw_cotable_bind(struct vmw_resource *res,
0111                 struct ttm_validate_buffer *val_buf);
0112 static int vmw_cotable_unbind(struct vmw_resource *res,
0113                   bool readback,
0114                   struct ttm_validate_buffer *val_buf);
0115 static int vmw_cotable_create(struct vmw_resource *res);
0116 static int vmw_cotable_destroy(struct vmw_resource *res);
0117 
0118 static const struct vmw_res_func vmw_cotable_func = {
0119     .res_type = vmw_res_cotable,
0120     .needs_backup = true,
0121     .may_evict = true,
0122     .prio = 3,
0123     .dirty_prio = 3,
0124     .type_name = "context guest backed object tables",
0125     .backup_placement = &vmw_mob_placement,
0126     .create = vmw_cotable_create,
0127     .destroy = vmw_cotable_destroy,
0128     .bind = vmw_cotable_bind,
0129     .unbind = vmw_cotable_unbind,
0130 };
0131 
0132 /**
0133  * vmw_cotable - Convert a struct vmw_resource pointer to a struct
0134  * vmw_cotable pointer
0135  *
0136  * @res: Pointer to the resource.
0137  */
0138 static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
0139 {
0140     return container_of(res, struct vmw_cotable, res);
0141 }
0142 
0143 /**
0144  * vmw_cotable_destroy - Cotable resource destroy callback
0145  *
0146  * @res: Pointer to the cotable resource.
0147  *
0148  * There is no device cotable destroy command, so this function only
0149  * makes sure that the resource id is set to invalid.
0150  */
0151 static int vmw_cotable_destroy(struct vmw_resource *res)
0152 {
0153     res->id = -1;
0154     return 0;
0155 }
0156 
0157 /**
0158  * vmw_cotable_unscrub - Undo a cotable unscrub operation
0159  *
0160  * @res: Pointer to the cotable resource
0161  *
0162  * This function issues commands to (re)bind the cotable to
0163  * its backing mob, which needs to be validated and reserved at this point.
0164  * This is identical to bind() except the function interface looks different.
0165  */
0166 static int vmw_cotable_unscrub(struct vmw_resource *res)
0167 {
0168     struct vmw_cotable *vcotbl = vmw_cotable(res);
0169     struct vmw_private *dev_priv = res->dev_priv;
0170     struct ttm_buffer_object *bo = &res->backup->base;
0171     struct {
0172         SVGA3dCmdHeader header;
0173         SVGA3dCmdDXSetCOTable body;
0174     } *cmd;
0175 
0176     WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
0177     dma_resv_assert_held(bo->base.resv);
0178 
0179     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0180     if (!cmd)
0181         return -ENOMEM;
0182 
0183     WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
0184     WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
0185     cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
0186     cmd->header.size = sizeof(cmd->body);
0187     cmd->body.cid = vcotbl->ctx->id;
0188     cmd->body.type = vcotbl->type;
0189     cmd->body.mobid = bo->resource->start;
0190     cmd->body.validSizeInBytes = vcotbl->size_read_back;
0191 
0192     vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
0193     vcotbl->scrubbed = false;
0194 
0195     return 0;
0196 }
0197 
0198 /**
0199  * vmw_cotable_bind - Undo a cotable unscrub operation
0200  *
0201  * @res: Pointer to the cotable resource
0202  * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
0203  * for convenience / fencing.
0204  *
0205  * This function issues commands to (re)bind the cotable to
0206  * its backing mob, which needs to be validated and reserved at this point.
0207  */
0208 static int vmw_cotable_bind(struct vmw_resource *res,
0209                 struct ttm_validate_buffer *val_buf)
0210 {
0211     /*
0212      * The create() callback may have changed @res->backup without
0213      * the caller noticing, and with val_buf->bo still pointing to
0214      * the old backup buffer. Although hackish, and not used currently,
0215      * take the opportunity to correct the value here so that it's not
0216      * misused in the future.
0217      */
0218     val_buf->bo = &res->backup->base;
0219 
0220     return vmw_cotable_unscrub(res);
0221 }
0222 
0223 /**
0224  * vmw_cotable_scrub - Scrub the cotable from the device.
0225  *
0226  * @res: Pointer to the cotable resource.
0227  * @readback: Whether initiate a readback of the cotable data to the backup
0228  * buffer.
0229  *
0230  * In some situations (context swapouts) it might be desirable to make the
0231  * device forget about the cotable without performing a full unbind. A full
0232  * unbind requires reserved backup buffers and it might not be possible to
0233  * reserve them due to locking order violation issues. The vmw_cotable_scrub
0234  * function implements a partial unbind() without that requirement but with the
0235  * following restrictions.
0236  * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
0237  *    be called.
0238  * 2) Before the cotable backing buffer is used by the CPU, or during the
0239  *    resource destruction, vmw_cotable_unbind() must be called.
0240  */
0241 int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
0242 {
0243     struct vmw_cotable *vcotbl = vmw_cotable(res);
0244     struct vmw_private *dev_priv = res->dev_priv;
0245     size_t submit_size;
0246 
0247     struct {
0248         SVGA3dCmdHeader header;
0249         SVGA3dCmdDXReadbackCOTable body;
0250     } *cmd0;
0251     struct {
0252         SVGA3dCmdHeader header;
0253         SVGA3dCmdDXSetCOTable body;
0254     } *cmd1;
0255 
0256     if (vcotbl->scrubbed)
0257         return 0;
0258 
0259     if (co_info[vcotbl->type].unbind_func)
0260         co_info[vcotbl->type].unbind_func(dev_priv,
0261                           &vcotbl->resource_list,
0262                           readback);
0263     submit_size = sizeof(*cmd1);
0264     if (readback)
0265         submit_size += sizeof(*cmd0);
0266 
0267     cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
0268     if (!cmd1)
0269         return -ENOMEM;
0270 
0271     vcotbl->size_read_back = 0;
0272     if (readback) {
0273         cmd0 = (void *) cmd1;
0274         cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
0275         cmd0->header.size = sizeof(cmd0->body);
0276         cmd0->body.cid = vcotbl->ctx->id;
0277         cmd0->body.type = vcotbl->type;
0278         cmd1 = (void *) &cmd0[1];
0279         vcotbl->size_read_back = res->backup_size;
0280     }
0281     cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
0282     cmd1->header.size = sizeof(cmd1->body);
0283     cmd1->body.cid = vcotbl->ctx->id;
0284     cmd1->body.type = vcotbl->type;
0285     cmd1->body.mobid = SVGA3D_INVALID_ID;
0286     cmd1->body.validSizeInBytes = 0;
0287     vmw_cmd_commit_flush(dev_priv, submit_size);
0288     vcotbl->scrubbed = true;
0289 
0290     /* Trigger a create() on next validate. */
0291     res->id = -1;
0292 
0293     return 0;
0294 }
0295 
0296 /**
0297  * vmw_cotable_unbind - Cotable resource unbind callback
0298  *
0299  * @res: Pointer to the cotable resource.
0300  * @readback: Whether to read back cotable data to the backup buffer.
0301  * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
0302  * for convenience / fencing.
0303  *
0304  * Unbinds the cotable from the device and fences the backup buffer.
0305  */
0306 static int vmw_cotable_unbind(struct vmw_resource *res,
0307                   bool readback,
0308                   struct ttm_validate_buffer *val_buf)
0309 {
0310     struct vmw_cotable *vcotbl = vmw_cotable(res);
0311     struct vmw_private *dev_priv = res->dev_priv;
0312     struct ttm_buffer_object *bo = val_buf->bo;
0313     struct vmw_fence_obj *fence;
0314 
0315     if (!vmw_resource_mob_attached(res))
0316         return 0;
0317 
0318     WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
0319     dma_resv_assert_held(bo->base.resv);
0320 
0321     mutex_lock(&dev_priv->binding_mutex);
0322     if (!vcotbl->scrubbed)
0323         vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
0324     mutex_unlock(&dev_priv->binding_mutex);
0325     (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
0326     vmw_bo_fence_single(bo, fence);
0327     if (likely(fence != NULL))
0328         vmw_fence_obj_unreference(&fence);
0329 
0330     return 0;
0331 }
0332 
0333 /**
0334  * vmw_cotable_readback - Read back a cotable without unbinding.
0335  *
0336  * @res: The cotable resource.
0337  *
0338  * Reads back a cotable to its backing mob without scrubbing the MOB from
0339  * the cotable. The MOB is fenced for subsequent CPU access.
0340  */
0341 static int vmw_cotable_readback(struct vmw_resource *res)
0342 {
0343     struct vmw_cotable *vcotbl = vmw_cotable(res);
0344     struct vmw_private *dev_priv = res->dev_priv;
0345 
0346     struct {
0347         SVGA3dCmdHeader header;
0348         SVGA3dCmdDXReadbackCOTable body;
0349     } *cmd;
0350     struct vmw_fence_obj *fence;
0351 
0352     if (!vcotbl->scrubbed) {
0353         cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0354         if (!cmd)
0355             return -ENOMEM;
0356 
0357         cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
0358         cmd->header.size = sizeof(cmd->body);
0359         cmd->body.cid = vcotbl->ctx->id;
0360         cmd->body.type = vcotbl->type;
0361         vcotbl->size_read_back = res->backup_size;
0362         vmw_cmd_commit(dev_priv, sizeof(*cmd));
0363     }
0364 
0365     (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
0366     vmw_bo_fence_single(&res->backup->base, fence);
0367     vmw_fence_obj_unreference(&fence);
0368 
0369     return 0;
0370 }
0371 
0372 /**
0373  * vmw_cotable_resize - Resize a cotable.
0374  *
0375  * @res: The cotable resource.
0376  * @new_size: The new size.
0377  *
0378  * Resizes a cotable and binds the new backup buffer.
0379  * On failure the cotable is left intact.
0380  * Important! This function may not fail once the MOB switch has been
0381  * committed to hardware. That would put the device context in an
0382  * invalid state which we can't currently recover from.
0383  */
0384 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
0385 {
0386     struct ttm_operation_ctx ctx = { false, false };
0387     struct vmw_private *dev_priv = res->dev_priv;
0388     struct vmw_cotable *vcotbl = vmw_cotable(res);
0389     struct vmw_buffer_object *buf, *old_buf = res->backup;
0390     struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
0391     size_t old_size = res->backup_size;
0392     size_t old_size_read_back = vcotbl->size_read_back;
0393     size_t cur_size_read_back;
0394     struct ttm_bo_kmap_obj old_map, new_map;
0395     int ret;
0396     size_t i;
0397 
0398     ret = vmw_cotable_readback(res);
0399     if (ret)
0400         return ret;
0401 
0402     cur_size_read_back = vcotbl->size_read_back;
0403     vcotbl->size_read_back = old_size_read_back;
0404 
0405     /*
0406      * While device is processing, Allocate and reserve a buffer object
0407      * for the new COTable. Initially pin the buffer object to make sure
0408      * we can use tryreserve without failure.
0409      */
0410     ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
0411                 true, true, vmw_bo_bo_free, &buf);
0412     if (ret) {
0413         DRM_ERROR("Failed initializing new cotable MOB.\n");
0414         return ret;
0415     }
0416 
0417     bo = &buf->base;
0418     WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
0419 
0420     ret = ttm_bo_wait(old_bo, false, false);
0421     if (unlikely(ret != 0)) {
0422         DRM_ERROR("Failed waiting for cotable unbind.\n");
0423         goto out_wait;
0424     }
0425 
0426     /*
0427      * Do a page by page copy of COTables. This eliminates slow vmap()s.
0428      * This should really be a TTM utility.
0429      */
0430     for (i = 0; i < old_bo->resource->num_pages; ++i) {
0431         bool dummy;
0432 
0433         ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
0434         if (unlikely(ret != 0)) {
0435             DRM_ERROR("Failed mapping old COTable on resize.\n");
0436             goto out_wait;
0437         }
0438         ret = ttm_bo_kmap(bo, i, 1, &new_map);
0439         if (unlikely(ret != 0)) {
0440             DRM_ERROR("Failed mapping new COTable on resize.\n");
0441             goto out_map_new;
0442         }
0443         memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
0444                ttm_kmap_obj_virtual(&old_map, &dummy),
0445                PAGE_SIZE);
0446         ttm_bo_kunmap(&new_map);
0447         ttm_bo_kunmap(&old_map);
0448     }
0449 
0450     /* Unpin new buffer, and switch backup buffers. */
0451     ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
0452     if (unlikely(ret != 0)) {
0453         DRM_ERROR("Failed validating new COTable backup buffer.\n");
0454         goto out_wait;
0455     }
0456 
0457     vmw_resource_mob_detach(res);
0458     res->backup = buf;
0459     res->backup_size = new_size;
0460     vcotbl->size_read_back = cur_size_read_back;
0461 
0462     /*
0463      * Now tell the device to switch. If this fails, then we need to
0464      * revert the full resize.
0465      */
0466     ret = vmw_cotable_unscrub(res);
0467     if (ret) {
0468         DRM_ERROR("Failed switching COTable backup buffer.\n");
0469         res->backup = old_buf;
0470         res->backup_size = old_size;
0471         vcotbl->size_read_back = old_size_read_back;
0472         vmw_resource_mob_attach(res);
0473         goto out_wait;
0474     }
0475 
0476     vmw_resource_mob_attach(res);
0477     /* Let go of the old mob. */
0478     vmw_bo_unreference(&old_buf);
0479     res->id = vcotbl->type;
0480 
0481     ret = dma_resv_reserve_fences(bo->base.resv, 1);
0482     if (unlikely(ret))
0483         goto out_wait;
0484 
0485     /* Release the pin acquired in vmw_bo_init */
0486     ttm_bo_unpin(bo);
0487 
0488     return 0;
0489 
0490 out_map_new:
0491     ttm_bo_kunmap(&old_map);
0492 out_wait:
0493     ttm_bo_unpin(bo);
0494     ttm_bo_unreserve(bo);
0495     vmw_bo_unreference(&buf);
0496 
0497     return ret;
0498 }
0499 
0500 /**
0501  * vmw_cotable_create - Cotable resource create callback
0502  *
0503  * @res: Pointer to a cotable resource.
0504  *
0505  * There is no separate create command for cotables, so this callback, which
0506  * is called before bind() in the validation sequence is instead used for two
0507  * things.
0508  * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
0509  *    buffer.
0510  * 2) Resize the cotable if needed.
0511  */
0512 static int vmw_cotable_create(struct vmw_resource *res)
0513 {
0514     struct vmw_cotable *vcotbl = vmw_cotable(res);
0515     size_t new_size = res->backup_size;
0516     size_t needed_size;
0517     int ret;
0518 
0519     /* Check whether we need to resize the cotable */
0520     needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
0521     while (needed_size > new_size)
0522         new_size *= 2;
0523 
0524     if (likely(new_size <= res->backup_size)) {
0525         if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
0526             ret = vmw_cotable_unscrub(res);
0527             if (ret)
0528                 return ret;
0529         }
0530         res->id = vcotbl->type;
0531         return 0;
0532     }
0533 
0534     return vmw_cotable_resize(res, new_size);
0535 }
0536 
0537 /**
0538  * vmw_hw_cotable_destroy - Cotable hw_destroy callback
0539  *
0540  * @res: Pointer to a cotable resource.
0541  *
0542  * The final (part of resource destruction) destroy callback.
0543  */
0544 static void vmw_hw_cotable_destroy(struct vmw_resource *res)
0545 {
0546     (void) vmw_cotable_destroy(res);
0547 }
0548 
0549 /**
0550  * vmw_cotable_free - Cotable resource destructor
0551  *
0552  * @res: Pointer to a cotable resource.
0553  */
0554 static void vmw_cotable_free(struct vmw_resource *res)
0555 {
0556     kfree(res);
0557 }
0558 
0559 /**
0560  * vmw_cotable_alloc - Create a cotable resource
0561  *
0562  * @dev_priv: Pointer to a device private struct.
0563  * @ctx: Pointer to the context resource.
0564  * The cotable resource will not add a refcount.
0565  * @type: The cotable type.
0566  */
0567 struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
0568                        struct vmw_resource *ctx,
0569                        u32 type)
0570 {
0571     struct vmw_cotable *vcotbl;
0572     int ret;
0573     u32 num_entries;
0574 
0575     vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
0576     if (unlikely(!vcotbl)) {
0577         ret = -ENOMEM;
0578         goto out_no_alloc;
0579     }
0580 
0581     ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
0582                 vmw_cotable_free, &vmw_cotable_func);
0583     if (unlikely(ret != 0))
0584         goto out_no_init;
0585 
0586     INIT_LIST_HEAD(&vcotbl->resource_list);
0587     vcotbl->res.id = type;
0588     vcotbl->res.backup_size = PAGE_SIZE;
0589     num_entries = PAGE_SIZE / co_info[type].size;
0590     if (num_entries < co_info[type].min_initial_entries) {
0591         vcotbl->res.backup_size = co_info[type].min_initial_entries *
0592             co_info[type].size;
0593         vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
0594     }
0595 
0596     vcotbl->scrubbed = true;
0597     vcotbl->seen_entries = -1;
0598     vcotbl->type = type;
0599     vcotbl->ctx = ctx;
0600 
0601     vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
0602 
0603     return &vcotbl->res;
0604 
0605 out_no_init:
0606     kfree(vcotbl);
0607 out_no_alloc:
0608     return ERR_PTR(ret);
0609 }
0610 
0611 /**
0612  * vmw_cotable_notify - Notify the cotable about an item creation
0613  *
0614  * @res: Pointer to a cotable resource.
0615  * @id: Item id.
0616  */
0617 int vmw_cotable_notify(struct vmw_resource *res, int id)
0618 {
0619     struct vmw_cotable *vcotbl = vmw_cotable(res);
0620 
0621     if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
0622         DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
0623               (unsigned) vcotbl->type, id);
0624         return -EINVAL;
0625     }
0626 
0627     if (vcotbl->seen_entries < id) {
0628         /* Trigger a call to create() on next validate */
0629         res->id = -1;
0630         vcotbl->seen_entries = id;
0631     }
0632 
0633     return 0;
0634 }
0635 
0636 /**
0637  * vmw_cotable_add_resource - add a view to the cotable's list of active views.
0638  *
0639  * @res: pointer struct vmw_resource representing the cotable.
0640  * @head: pointer to the struct list_head member of the resource, dedicated
0641  * to the cotable active resource list.
0642  */
0643 void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
0644 {
0645     struct vmw_cotable *vcotbl =
0646         container_of(res, struct vmw_cotable, res);
0647 
0648     list_add_tail(head, &vcotbl->resource_list);
0649 }