Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /**************************************************************************
0003  *
0004  * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the
0008  * "Software"), to deal in the Software without restriction, including
0009  * without limitation the rights to use, copy, modify, merge, publish,
0010  * distribute, sub license, and/or sell copies of the Software, and to
0011  * permit persons to whom the Software is furnished to do so, subject to
0012  * the following conditions:
0013  *
0014  * The above copyright notice and this permission notice (including the
0015  * next paragraph) shall be included in all copies or substantial portions
0016  * of the Software.
0017  *
0018  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0019  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0020  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0021  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0022  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0023  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0024  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0025  *
0026  **************************************************************************/
0027 /*
0028  * This file implements the vmwgfx context binding manager,
0029  * The sole reason for having to use this code is that vmware guest
0030  * backed contexts can be swapped out to their backing mobs by the device
0031  * at any time, also swapped in at any time. At swapin time, the device
0032  * validates the context bindings to make sure they point to valid resources.
0033  * It's this outside-of-drawcall validation (that can happen at any time),
0034  * that makes this code necessary.
0035  *
0036  * We therefore need to kill any context bindings pointing to a resource
0037  * when the resource is swapped out. Furthermore, if the vmwgfx driver has
0038  * swapped out the context we can't swap it in again to kill bindings because
0039  * of backing mob reservation lockdep violations, so as part of
0040  * context swapout, also kill all bindings of a context, so that they are
0041  * already killed if a resource to which a binding points
0042  * needs to be swapped out.
0043  *
0044  * Note that a resource can be pointed to by bindings from multiple contexts,
0045  * Therefore we can't easily protect this data by a per context mutex
0046  * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
0047  * to protect all binding manager data.
0048  *
0049  * Finally, any association between a context and a global resource
0050  * (surface, shader or even DX query) is conceptually a context binding that
0051  * needs to be tracked by this code.
0052  */
0053 
0054 #include "vmwgfx_drv.h"
0055 #include "vmwgfx_binding.h"
0056 #include "device_include/svga3d_reg.h"
0057 
0058 #define VMW_BINDING_RT_BIT     0
0059 #define VMW_BINDING_PS_BIT     1
0060 #define VMW_BINDING_SO_T_BIT   2
0061 #define VMW_BINDING_VB_BIT     3
0062 #define VMW_BINDING_UAV_BIT    4
0063 #define VMW_BINDING_CS_UAV_BIT 5
0064 #define VMW_BINDING_NUM_BITS   6
0065 
0066 #define VMW_BINDING_PS_SR_BIT  0
0067 
0068 /**
0069  * struct vmw_ctx_binding_state - per context binding state
0070  *
0071  * @dev_priv: Pointer to device private structure.
0072  * @list: linked list of individual active bindings.
0073  * @render_targets: Render target bindings.
0074  * @texture_units: Texture units bindings.
0075  * @ds_view: Depth-stencil view binding.
0076  * @so_targets: StreamOutput target bindings.
0077  * @vertex_buffers: Vertex buffer bindings.
0078  * @index_buffer: Index buffer binding.
0079  * @per_shader: Per shader-type bindings.
0080  * @ua_views: UAV bindings.
0081  * @so_state: StreamOutput bindings.
0082  * @dirty: Bitmap tracking per binding-type changes that have not yet
0083  * been emitted to the device.
0084  * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
0085  * have not yet been emitted to the device.
0086  * @bind_cmd_buffer: Scratch space used to construct binding commands.
0087  * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
0088  * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
0089  * device binding slot of the first command data entry in @bind_cmd_buffer.
0090  *
0091  * Note that this structure also provides storage space for the individual
0092  * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
0093  * for individual bindings.
0094  *
0095  */
0096 struct vmw_ctx_binding_state {
0097     struct vmw_private *dev_priv;
0098     struct list_head list;
0099     struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
0100     struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
0101     struct vmw_ctx_bindinfo_view ds_view;
0102     struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
0103     struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
0104     struct vmw_ctx_bindinfo_ib index_buffer;
0105     struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
0106     struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
0107     struct vmw_ctx_bindinfo_so so_state;
0108 
0109     unsigned long dirty;
0110     DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
0111 
0112     u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
0113     u32 bind_cmd_count;
0114     u32 bind_first_slot;
0115 };
0116 
0117 static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
0118 static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
0119                        bool rebind);
0120 static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
0121 static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
0122 static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
0123 static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
0124 static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
0125 static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
0126 static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
0127                        bool rebind);
0128 static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
0129 static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
0130 static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
0131 static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
0132 static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
0133 
0134 static void vmw_binding_build_asserts(void) __attribute__ ((unused));
0135 
0136 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
0137 
0138 /**
0139  * struct vmw_binding_info - Per binding type information for the binding
0140  * manager
0141  *
0142  * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
0143  * @offsets: array[shader_slot] of offsets to the array[slot]
0144  * of struct bindings for the binding type.
0145  * @scrub_func: Pointer to the scrub function for this binding type.
0146  *
0147  * Holds static information to help optimize the binding manager and avoid
0148  * an excessive amount of switch statements.
0149  */
0150 struct vmw_binding_info {
0151     size_t size;
0152     const size_t *offsets;
0153     vmw_scrub_func scrub_func;
0154 };
0155 
0156 /*
0157  * A number of static variables that help determine the scrub func and the
0158  * location of the struct vmw_ctx_bindinfo slots for each binding type.
0159  */
0160 static const size_t vmw_binding_shader_offsets[] = {
0161     offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
0162     offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
0163     offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
0164     offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
0165     offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
0166     offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
0167 };
0168 static const size_t vmw_binding_rt_offsets[] = {
0169     offsetof(struct vmw_ctx_binding_state, render_targets),
0170 };
0171 static const size_t vmw_binding_tex_offsets[] = {
0172     offsetof(struct vmw_ctx_binding_state, texture_units),
0173 };
0174 static const size_t vmw_binding_cb_offsets[] = {
0175     offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
0176     offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
0177     offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
0178     offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
0179     offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
0180     offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
0181 };
0182 static const size_t vmw_binding_dx_ds_offsets[] = {
0183     offsetof(struct vmw_ctx_binding_state, ds_view),
0184 };
0185 static const size_t vmw_binding_sr_offsets[] = {
0186     offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
0187     offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
0188     offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
0189     offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
0190     offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
0191     offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
0192 };
0193 static const size_t vmw_binding_so_target_offsets[] = {
0194     offsetof(struct vmw_ctx_binding_state, so_targets),
0195 };
0196 static const size_t vmw_binding_vb_offsets[] = {
0197     offsetof(struct vmw_ctx_binding_state, vertex_buffers),
0198 };
0199 static const size_t vmw_binding_ib_offsets[] = {
0200     offsetof(struct vmw_ctx_binding_state, index_buffer),
0201 };
0202 static const size_t vmw_binding_uav_offsets[] = {
0203     offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
0204 };
0205 static const size_t vmw_binding_cs_uav_offsets[] = {
0206     offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
0207 };
0208 static const size_t vmw_binding_so_offsets[] = {
0209     offsetof(struct vmw_ctx_binding_state, so_state),
0210 };
0211 
0212 static const struct vmw_binding_info vmw_binding_infos[] = {
0213     [vmw_ctx_binding_shader] = {
0214         .size = sizeof(struct vmw_ctx_bindinfo_shader),
0215         .offsets = vmw_binding_shader_offsets,
0216         .scrub_func = vmw_binding_scrub_shader},
0217     [vmw_ctx_binding_rt] = {
0218         .size = sizeof(struct vmw_ctx_bindinfo_view),
0219         .offsets = vmw_binding_rt_offsets,
0220         .scrub_func = vmw_binding_scrub_render_target},
0221     [vmw_ctx_binding_tex] = {
0222         .size = sizeof(struct vmw_ctx_bindinfo_tex),
0223         .offsets = vmw_binding_tex_offsets,
0224         .scrub_func = vmw_binding_scrub_texture},
0225     [vmw_ctx_binding_cb] = {
0226         .size = sizeof(struct vmw_ctx_bindinfo_cb),
0227         .offsets = vmw_binding_cb_offsets,
0228         .scrub_func = vmw_binding_scrub_cb},
0229     [vmw_ctx_binding_dx_shader] = {
0230         .size = sizeof(struct vmw_ctx_bindinfo_shader),
0231         .offsets = vmw_binding_shader_offsets,
0232         .scrub_func = vmw_binding_scrub_dx_shader},
0233     [vmw_ctx_binding_dx_rt] = {
0234         .size = sizeof(struct vmw_ctx_bindinfo_view),
0235         .offsets = vmw_binding_rt_offsets,
0236         .scrub_func = vmw_binding_scrub_dx_rt},
0237     [vmw_ctx_binding_sr] = {
0238         .size = sizeof(struct vmw_ctx_bindinfo_view),
0239         .offsets = vmw_binding_sr_offsets,
0240         .scrub_func = vmw_binding_scrub_sr},
0241     [vmw_ctx_binding_ds] = {
0242         .size = sizeof(struct vmw_ctx_bindinfo_view),
0243         .offsets = vmw_binding_dx_ds_offsets,
0244         .scrub_func = vmw_binding_scrub_dx_rt},
0245     [vmw_ctx_binding_so_target] = {
0246         .size = sizeof(struct vmw_ctx_bindinfo_so_target),
0247         .offsets = vmw_binding_so_target_offsets,
0248         .scrub_func = vmw_binding_scrub_so_target},
0249     [vmw_ctx_binding_vb] = {
0250         .size = sizeof(struct vmw_ctx_bindinfo_vb),
0251         .offsets = vmw_binding_vb_offsets,
0252         .scrub_func = vmw_binding_scrub_vb},
0253     [vmw_ctx_binding_ib] = {
0254         .size = sizeof(struct vmw_ctx_bindinfo_ib),
0255         .offsets = vmw_binding_ib_offsets,
0256         .scrub_func = vmw_binding_scrub_ib},
0257     [vmw_ctx_binding_uav] = {
0258         .size = sizeof(struct vmw_ctx_bindinfo_view),
0259         .offsets = vmw_binding_uav_offsets,
0260         .scrub_func = vmw_binding_scrub_uav},
0261     [vmw_ctx_binding_cs_uav] = {
0262         .size = sizeof(struct vmw_ctx_bindinfo_view),
0263         .offsets = vmw_binding_cs_uav_offsets,
0264         .scrub_func = vmw_binding_scrub_cs_uav},
0265     [vmw_ctx_binding_so] = {
0266         .size = sizeof(struct vmw_ctx_bindinfo_so),
0267         .offsets = vmw_binding_so_offsets,
0268         .scrub_func = vmw_binding_scrub_so},
0269 };
0270 
0271 /**
0272  * vmw_cbs_context - Return a pointer to the context resource of a
0273  * context binding state tracker.
0274  *
0275  * @cbs: The context binding state tracker.
0276  *
0277  * Provided there are any active bindings, this function will return an
0278  * unreferenced pointer to the context resource that owns the context
0279  * binding state tracker. If there are no active bindings, this function
0280  * will return NULL. Note that the caller must somehow ensure that a reference
0281  * is held on the context resource prior to calling this function.
0282  */
0283 static const struct vmw_resource *
0284 vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
0285 {
0286     if (list_empty(&cbs->list))
0287         return NULL;
0288 
0289     return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
0290                 ctx_list)->ctx;
0291 }
0292 
0293 /**
0294  * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
0295  *
0296  * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
0297  * @bt: The binding type.
0298  * @shader_slot: The shader slot of the binding. If none, then set to 0.
0299  * @slot: The slot of the binding.
0300  */
0301 static struct vmw_ctx_bindinfo *
0302 vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
0303         enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
0304 {
0305     const struct vmw_binding_info *b = &vmw_binding_infos[bt];
0306     size_t offset = b->offsets[shader_slot] + b->size*slot;
0307 
0308     return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
0309 }
0310 
0311 /**
0312  * vmw_binding_drop: Stop tracking a context binding
0313  *
0314  * @bi: Pointer to binding tracker storage.
0315  *
0316  * Stops tracking a context binding, and re-initializes its storage.
0317  * Typically used when the context binding is replaced with a binding to
0318  * another (or the same, for that matter) resource.
0319  */
0320 static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
0321 {
0322     list_del(&bi->ctx_list);
0323     if (!list_empty(&bi->res_list))
0324         list_del(&bi->res_list);
0325     bi->ctx = NULL;
0326 }
0327 
0328 /**
0329  * vmw_binding_add: Start tracking a context binding
0330  *
0331  * @cbs: Pointer to the context binding state tracker.
0332  * @bi: Information about the binding to track.
0333  * @shader_slot: The shader slot of the binding.
0334  * @slot: The slot of the binding.
0335  *
0336  * Starts tracking the binding in the context binding
0337  * state structure @cbs.
0338  */
0339 void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
0340             const struct vmw_ctx_bindinfo *bi,
0341             u32 shader_slot, u32 slot)
0342 {
0343     struct vmw_ctx_bindinfo *loc =
0344         vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
0345     const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
0346 
0347     if (loc->ctx != NULL)
0348         vmw_binding_drop(loc);
0349 
0350     memcpy(loc, bi, b->size);
0351     loc->scrubbed = false;
0352     list_add(&loc->ctx_list, &cbs->list);
0353     INIT_LIST_HEAD(&loc->res_list);
0354 }
0355 
0356 /**
0357  * vmw_binding_cb_offset_update: Update the offset of a cb binding
0358  *
0359  * @cbs: Pointer to the context binding state tracker.
0360  * @shader_slot: The shader slot of the binding.
0361  * @slot: The slot of the binding.
0362  * @offsetInBytes: The new offset of the binding.
0363  *
0364  * Updates the offset of an existing cb binding in the context binding
0365  * state structure @cbs.
0366  */
0367 void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
0368                   u32 shader_slot, u32 slot, u32 offsetInBytes)
0369 {
0370     struct vmw_ctx_bindinfo *loc =
0371         vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot);
0372     struct vmw_ctx_bindinfo_cb *loc_cb =
0373         (struct vmw_ctx_bindinfo_cb *)((u8 *) loc);
0374     loc_cb->offset = offsetInBytes;
0375 }
0376 
0377 /**
0378  * vmw_binding_add_uav_index - Add UAV index for tracking.
0379  * @cbs: Pointer to the context binding state tracker.
0380  * @slot: UAV type to which bind this index.
0381  * @index: The splice index to track.
0382  */
0383 void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
0384                    uint32 index)
0385 {
0386     cbs->ua_views[slot].index = index;
0387 }
0388 
0389 /**
0390  * vmw_binding_transfer: Transfer a context binding tracking entry.
0391  *
0392  * @cbs: Pointer to the persistent context binding state tracker.
0393  * @from: Staged binding info built during execbuf
0394  * @bi: Information about the binding to track.
0395  *
0396  */
0397 static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
0398                  const struct vmw_ctx_binding_state *from,
0399                  const struct vmw_ctx_bindinfo *bi)
0400 {
0401     size_t offset = (unsigned long)bi - (unsigned long)from;
0402     struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
0403         ((unsigned long) cbs + offset);
0404 
0405     if (loc->ctx != NULL) {
0406         WARN_ON(bi->scrubbed);
0407 
0408         vmw_binding_drop(loc);
0409     }
0410 
0411     if (bi->res != NULL) {
0412         memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
0413         list_add_tail(&loc->ctx_list, &cbs->list);
0414         list_add_tail(&loc->res_list, &loc->res->binding_head);
0415     }
0416 }
0417 
0418 /**
0419  * vmw_binding_state_kill - Kill all bindings associated with a
0420  * struct vmw_ctx_binding state structure, and re-initialize the structure.
0421  *
0422  * @cbs: Pointer to the context binding state tracker.
0423  *
0424  * Emits commands to scrub all bindings associated with the
0425  * context binding state tracker. Then re-initializes the whole structure.
0426  */
0427 void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
0428 {
0429     struct vmw_ctx_bindinfo *entry, *next;
0430 
0431     vmw_binding_state_scrub(cbs);
0432     list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
0433         vmw_binding_drop(entry);
0434 }
0435 
0436 /**
0437  * vmw_binding_state_scrub - Scrub all bindings associated with a
0438  * struct vmw_ctx_binding state structure.
0439  *
0440  * @cbs: Pointer to the context binding state tracker.
0441  *
0442  * Emits commands to scrub all bindings associated with the
0443  * context binding state tracker.
0444  */
0445 void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
0446 {
0447     struct vmw_ctx_bindinfo *entry;
0448 
0449     list_for_each_entry(entry, &cbs->list, ctx_list) {
0450         if (!entry->scrubbed) {
0451             (void) vmw_binding_infos[entry->bt].scrub_func
0452                 (entry, false);
0453             entry->scrubbed = true;
0454         }
0455     }
0456 
0457     (void) vmw_binding_emit_dirty(cbs);
0458 }
0459 
0460 /**
0461  * vmw_binding_res_list_kill - Kill all bindings on a
0462  * resource binding list
0463  *
0464  * @head: list head of resource binding list
0465  *
0466  * Kills all bindings associated with a specific resource. Typically
0467  * called before the resource is destroyed.
0468  */
0469 void vmw_binding_res_list_kill(struct list_head *head)
0470 {
0471     struct vmw_ctx_bindinfo *entry, *next;
0472 
0473     vmw_binding_res_list_scrub(head);
0474     list_for_each_entry_safe(entry, next, head, res_list)
0475         vmw_binding_drop(entry);
0476 }
0477 
0478 /**
0479  * vmw_binding_res_list_scrub - Scrub all bindings on a
0480  * resource binding list
0481  *
0482  * @head: list head of resource binding list
0483  *
0484  * Scrub all bindings associated with a specific resource. Typically
0485  * called before the resource is evicted.
0486  */
0487 void vmw_binding_res_list_scrub(struct list_head *head)
0488 {
0489     struct vmw_ctx_bindinfo *entry;
0490 
0491     list_for_each_entry(entry, head, res_list) {
0492         if (!entry->scrubbed) {
0493             (void) vmw_binding_infos[entry->bt].scrub_func
0494                 (entry, false);
0495             entry->scrubbed = true;
0496         }
0497     }
0498 
0499     list_for_each_entry(entry, head, res_list) {
0500         struct vmw_ctx_binding_state *cbs =
0501             vmw_context_binding_state(entry->ctx);
0502 
0503         (void) vmw_binding_emit_dirty(cbs);
0504     }
0505 }
0506 
0507 
0508 /**
0509  * vmw_binding_state_commit - Commit staged binding info
0510  *
0511  * @to:   Staged binding info area to copy into to.
0512  * @from: Staged binding info built during execbuf.
0513  *
0514  * Transfers binding info from a temporary structure
0515  * (typically used by execbuf) to the persistent
0516  * structure in the context. This can be done once commands have been
0517  * submitted to hardware
0518  */
0519 void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
0520                   struct vmw_ctx_binding_state *from)
0521 {
0522     struct vmw_ctx_bindinfo *entry, *next;
0523 
0524     list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
0525         vmw_binding_transfer(to, from, entry);
0526         vmw_binding_drop(entry);
0527     }
0528 
0529     /* Also transfer uav splice indices */
0530     to->ua_views[0].index = from->ua_views[0].index;
0531     to->ua_views[1].index = from->ua_views[1].index;
0532 }
0533 
0534 /**
0535  * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
0536  *
0537  * @cbs: Pointer to the context binding state tracker.
0538  *
0539  * Walks through the context binding list and rebinds all scrubbed
0540  * resources.
0541  */
0542 int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
0543 {
0544     struct vmw_ctx_bindinfo *entry;
0545     int ret;
0546 
0547     list_for_each_entry(entry, &cbs->list, ctx_list) {
0548         if (likely(!entry->scrubbed))
0549             continue;
0550 
0551         if ((entry->res == NULL || entry->res->id ==
0552                 SVGA3D_INVALID_ID))
0553             continue;
0554 
0555         ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
0556         if (unlikely(ret != 0))
0557             return ret;
0558 
0559         entry->scrubbed = false;
0560     }
0561 
0562     return vmw_binding_emit_dirty(cbs);
0563 }
0564 
0565 /**
0566  * vmw_binding_scrub_shader - scrub a shader binding from a context.
0567  *
0568  * @bi: single binding information.
0569  * @rebind: Whether to issue a bind instead of scrub command.
0570  */
0571 static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
0572 {
0573     struct vmw_ctx_bindinfo_shader *binding =
0574         container_of(bi, typeof(*binding), bi);
0575     struct vmw_private *dev_priv = bi->ctx->dev_priv;
0576     struct {
0577         SVGA3dCmdHeader header;
0578         SVGA3dCmdSetShader body;
0579     } *cmd;
0580 
0581     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0582     if (unlikely(cmd == NULL))
0583         return -ENOMEM;
0584 
0585     cmd->header.id = SVGA_3D_CMD_SET_SHADER;
0586     cmd->header.size = sizeof(cmd->body);
0587     cmd->body.cid = bi->ctx->id;
0588     cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
0589     cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0590     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0591 
0592     return 0;
0593 }
0594 
0595 /**
0596  * vmw_binding_scrub_render_target - scrub a render target binding
0597  * from a context.
0598  *
0599  * @bi: single binding information.
0600  * @rebind: Whether to issue a bind instead of scrub command.
0601  */
0602 static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
0603                        bool rebind)
0604 {
0605     struct vmw_ctx_bindinfo_view *binding =
0606         container_of(bi, typeof(*binding), bi);
0607     struct vmw_private *dev_priv = bi->ctx->dev_priv;
0608     struct {
0609         SVGA3dCmdHeader header;
0610         SVGA3dCmdSetRenderTarget body;
0611     } *cmd;
0612 
0613     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0614     if (unlikely(cmd == NULL))
0615         return -ENOMEM;
0616 
0617     cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
0618     cmd->header.size = sizeof(cmd->body);
0619     cmd->body.cid = bi->ctx->id;
0620     cmd->body.type = binding->slot;
0621     cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0622     cmd->body.target.face = 0;
0623     cmd->body.target.mipmap = 0;
0624     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0625 
0626     return 0;
0627 }
0628 
0629 /**
0630  * vmw_binding_scrub_texture - scrub a texture binding from a context.
0631  *
0632  * @bi: single binding information.
0633  * @rebind: Whether to issue a bind instead of scrub command.
0634  *
0635  * TODO: Possibly complement this function with a function that takes
0636  * a list of texture bindings and combines them to a single command.
0637  */
0638 static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
0639                      bool rebind)
0640 {
0641     struct vmw_ctx_bindinfo_tex *binding =
0642         container_of(bi, typeof(*binding), bi);
0643     struct vmw_private *dev_priv = bi->ctx->dev_priv;
0644     struct {
0645         SVGA3dCmdHeader header;
0646         struct {
0647             SVGA3dCmdSetTextureState c;
0648             SVGA3dTextureState s1;
0649         } body;
0650     } *cmd;
0651 
0652     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0653     if (unlikely(cmd == NULL))
0654         return -ENOMEM;
0655 
0656     cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
0657     cmd->header.size = sizeof(cmd->body);
0658     cmd->body.c.cid = bi->ctx->id;
0659     cmd->body.s1.stage = binding->texture_stage;
0660     cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
0661     cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0662     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0663 
0664     return 0;
0665 }
0666 
0667 /**
0668  * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
0669  *
0670  * @bi: single binding information.
0671  * @rebind: Whether to issue a bind instead of scrub command.
0672  */
0673 static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
0674 {
0675     struct vmw_ctx_bindinfo_shader *binding =
0676         container_of(bi, typeof(*binding), bi);
0677     struct vmw_private *dev_priv = bi->ctx->dev_priv;
0678     struct {
0679         SVGA3dCmdHeader header;
0680         SVGA3dCmdDXSetShader body;
0681     } *cmd;
0682 
0683     cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
0684     if (unlikely(cmd == NULL))
0685         return -ENOMEM;
0686 
0687     cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
0688     cmd->header.size = sizeof(cmd->body);
0689     cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
0690     cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0691     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0692 
0693     return 0;
0694 }
0695 
0696 /**
0697  * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
0698  *
0699  * @bi: single binding information.
0700  * @rebind: Whether to issue a bind instead of scrub command.
0701  */
0702 static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
0703 {
0704     struct vmw_ctx_bindinfo_cb *binding =
0705         container_of(bi, typeof(*binding), bi);
0706     struct vmw_private *dev_priv = bi->ctx->dev_priv;
0707     struct {
0708         SVGA3dCmdHeader header;
0709         SVGA3dCmdDXSetSingleConstantBuffer body;
0710     } *cmd;
0711 
0712     cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
0713     if (unlikely(cmd == NULL))
0714         return -ENOMEM;
0715 
0716     cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
0717     cmd->header.size = sizeof(cmd->body);
0718     cmd->body.slot = binding->slot;
0719     cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
0720     if (rebind) {
0721         cmd->body.offsetInBytes = binding->offset;
0722         cmd->body.sizeInBytes = binding->size;
0723         cmd->body.sid = bi->res->id;
0724     } else {
0725         cmd->body.offsetInBytes = 0;
0726         cmd->body.sizeInBytes = 0;
0727         cmd->body.sid = SVGA3D_INVALID_ID;
0728     }
0729     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0730 
0731     return 0;
0732 }
0733 
0734 /**
0735  * vmw_collect_view_ids - Build view id data for a view binding command
0736  * without checking which bindings actually need to be emitted
0737  *
0738  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0739  * @biv: Pointer to where the binding info array is stored in @cbs
0740  * @max_num: Maximum number of entries in the @bi array.
0741  *
0742  * Scans the @bi array for bindings and builds a buffer of view id data.
0743  * Stops at the first non-existing binding in the @bi array.
0744  * On output, @cbs->bind_cmd_count contains the number of bindings to be
0745  * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
0746  * contains the command data.
0747  */
0748 static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
0749                  const struct vmw_ctx_bindinfo_view *biv,
0750                  u32 max_num)
0751 {
0752     unsigned long i;
0753 
0754     cbs->bind_cmd_count = 0;
0755     cbs->bind_first_slot = 0;
0756 
0757     for (i = 0; i < max_num; ++i, ++biv) {
0758         if (!biv->bi.ctx)
0759             break;
0760 
0761         cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
0762             ((biv->bi.scrubbed) ?
0763              SVGA3D_INVALID_ID : biv->bi.res->id);
0764     }
0765 }
0766 
0767 /**
0768  * vmw_collect_dirty_view_ids - Build view id data for a view binding command
0769  *
0770  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0771  * @bi: Pointer to where the binding info array is stored in @cbs
0772  * @dirty: Bitmap indicating which bindings need to be emitted.
0773  * @max_num: Maximum number of entries in the @bi array.
0774  *
0775  * Scans the @bi array for bindings that need to be emitted and
0776  * builds a buffer of view id data.
0777  * On output, @cbs->bind_cmd_count contains the number of bindings to be
0778  * emitted, @cbs->bind_first_slot indicates the index of the first emitted
0779  * binding, and @cbs->bind_cmd_buffer contains the command data.
0780  */
0781 static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
0782                        const struct vmw_ctx_bindinfo *bi,
0783                        unsigned long *dirty,
0784                        u32 max_num)
0785 {
0786     const struct vmw_ctx_bindinfo_view *biv =
0787         container_of(bi, struct vmw_ctx_bindinfo_view, bi);
0788     unsigned long i, next_bit;
0789 
0790     cbs->bind_cmd_count = 0;
0791     i = find_first_bit(dirty, max_num);
0792     next_bit = i;
0793     cbs->bind_first_slot = i;
0794 
0795     biv += i;
0796     for (; i < max_num; ++i, ++biv) {
0797         cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
0798             ((!biv->bi.ctx || biv->bi.scrubbed) ?
0799              SVGA3D_INVALID_ID : biv->bi.res->id);
0800 
0801         if (next_bit == i) {
0802             next_bit = find_next_bit(dirty, max_num, i + 1);
0803             if (next_bit >= max_num)
0804                 break;
0805         }
0806     }
0807 }
0808 
0809 /**
0810  * vmw_emit_set_sr - Issue delayed DX shader resource binding commands
0811  *
0812  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0813  * @shader_slot: The shader slot of the binding.
0814  */
0815 static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
0816                int shader_slot)
0817 {
0818     const struct vmw_ctx_bindinfo *loc =
0819         &cbs->per_shader[shader_slot].shader_res[0].bi;
0820     struct {
0821         SVGA3dCmdHeader header;
0822         SVGA3dCmdDXSetShaderResources body;
0823     } *cmd;
0824     size_t cmd_size, view_id_size;
0825     const struct vmw_resource *ctx = vmw_cbs_context(cbs);
0826 
0827     vmw_collect_dirty_view_ids(cbs, loc,
0828                    cbs->per_shader[shader_slot].dirty_sr,
0829                    SVGA3D_DX_MAX_SRVIEWS);
0830     if (cbs->bind_cmd_count == 0)
0831         return 0;
0832 
0833     view_id_size = cbs->bind_cmd_count*sizeof(uint32);
0834     cmd_size = sizeof(*cmd) + view_id_size;
0835     cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
0836     if (unlikely(cmd == NULL))
0837         return -ENOMEM;
0838 
0839     cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
0840     cmd->header.size = sizeof(cmd->body) + view_id_size;
0841     cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
0842     cmd->body.startView = cbs->bind_first_slot;
0843 
0844     memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
0845 
0846     vmw_cmd_commit(ctx->dev_priv, cmd_size);
0847     bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
0848              cbs->bind_first_slot, cbs->bind_cmd_count);
0849 
0850     return 0;
0851 }
0852 
0853 /**
0854  * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands
0855  *
0856  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0857  */
0858 static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
0859 {
0860     const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0];
0861     struct {
0862         SVGA3dCmdHeader header;
0863         SVGA3dCmdDXSetRenderTargets body;
0864     } *cmd;
0865     size_t cmd_size, view_id_size;
0866     const struct vmw_resource *ctx = vmw_cbs_context(cbs);
0867 
0868     vmw_collect_view_ids(cbs, loc, SVGA3D_DX_MAX_RENDER_TARGETS);
0869     view_id_size = cbs->bind_cmd_count*sizeof(uint32);
0870     cmd_size = sizeof(*cmd) + view_id_size;
0871     cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
0872     if (unlikely(cmd == NULL))
0873         return -ENOMEM;
0874 
0875     cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
0876     cmd->header.size = sizeof(cmd->body) + view_id_size;
0877 
0878     if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
0879         cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
0880     else
0881         cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
0882 
0883     memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
0884 
0885     vmw_cmd_commit(ctx->dev_priv, cmd_size);
0886 
0887     return 0;
0888 
0889 }
0890 
0891 /**
0892  * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
0893  * without checking which bindings actually need to be emitted
0894  *
0895  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0896  * @biso: Pointer to where the binding info array is stored in @cbs
0897  * @max_num: Maximum number of entries in the @bi array.
0898  *
0899  * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
0900  * Stops at the first non-existing binding in the @bi array.
0901  * On output, @cbs->bind_cmd_count contains the number of bindings to be
0902  * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
0903  * contains the command data.
0904  */
0905 static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
0906                    const struct vmw_ctx_bindinfo_so_target *biso,
0907                    u32 max_num)
0908 {
0909     unsigned long i;
0910     SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
0911 
0912     cbs->bind_cmd_count = 0;
0913     cbs->bind_first_slot = 0;
0914 
0915     for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
0916             ++cbs->bind_cmd_count) {
0917         if (!biso->bi.ctx)
0918             break;
0919 
0920         if (!biso->bi.scrubbed) {
0921             so_buffer->sid = biso->bi.res->id;
0922             so_buffer->offset = biso->offset;
0923             so_buffer->sizeInBytes = biso->size;
0924         } else {
0925             so_buffer->sid = SVGA3D_INVALID_ID;
0926             so_buffer->offset = 0;
0927             so_buffer->sizeInBytes = 0;
0928         }
0929     }
0930 }
0931 
0932 /**
0933  * vmw_emit_set_so_target - Issue delayed streamout binding commands
0934  *
0935  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0936  */
0937 static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
0938 {
0939     const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0];
0940     struct {
0941         SVGA3dCmdHeader header;
0942         SVGA3dCmdDXSetSOTargets body;
0943     } *cmd;
0944     size_t cmd_size, so_target_size;
0945     const struct vmw_resource *ctx = vmw_cbs_context(cbs);
0946 
0947     vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
0948     if (cbs->bind_cmd_count == 0)
0949         return 0;
0950 
0951     so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
0952     cmd_size = sizeof(*cmd) + so_target_size;
0953     cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
0954     if (unlikely(cmd == NULL))
0955         return -ENOMEM;
0956 
0957     cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
0958     cmd->header.size = sizeof(cmd->body) + so_target_size;
0959     memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
0960 
0961     vmw_cmd_commit(ctx->dev_priv, cmd_size);
0962 
0963     return 0;
0964 
0965 }
0966 
0967 /**
0968  * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
0969  *
0970  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0971  *
0972  */
0973 static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
0974 {
0975     struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
0976     u32 i;
0977     int ret;
0978 
0979     for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
0980         if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
0981             continue;
0982 
0983         ret = vmw_emit_set_sr(cbs, i);
0984         if (ret)
0985             break;
0986 
0987         __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
0988     }
0989 
0990     return 0;
0991 }
0992 
0993 /**
0994  * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
0995  * SVGA3dCmdDXSetVertexBuffers command
0996  *
0997  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
0998  * @bi: Pointer to where the binding info array is stored in @cbs
0999  * @dirty: Bitmap indicating which bindings need to be emitted.
1000  * @max_num: Maximum number of entries in the @bi array.
1001  *
1002  * Scans the @bi array for bindings that need to be emitted and
1003  * builds a buffer of SVGA3dVertexBuffer data.
1004  * On output, @cbs->bind_cmd_count contains the number of bindings to be
1005  * emitted, @cbs->bind_first_slot indicates the index of the first emitted
1006  * binding, and @cbs->bind_cmd_buffer contains the command data.
1007  */
1008 static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
1009                   const struct vmw_ctx_bindinfo *bi,
1010                   unsigned long *dirty,
1011                   u32 max_num)
1012 {
1013     const struct vmw_ctx_bindinfo_vb *biv =
1014         container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1015     unsigned long i, next_bit;
1016     SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
1017 
1018     cbs->bind_cmd_count = 0;
1019     i = find_first_bit(dirty, max_num);
1020     next_bit = i;
1021     cbs->bind_first_slot = i;
1022 
1023     biv += i;
1024     for (; i < max_num; ++i, ++biv, ++vbs) {
1025         if (!biv->bi.ctx || biv->bi.scrubbed) {
1026             vbs->sid = SVGA3D_INVALID_ID;
1027             vbs->stride = 0;
1028             vbs->offset = 0;
1029         } else {
1030             vbs->sid = biv->bi.res->id;
1031             vbs->stride = biv->stride;
1032             vbs->offset = biv->offset;
1033         }
1034         cbs->bind_cmd_count++;
1035         if (next_bit == i) {
1036             next_bit = find_next_bit(dirty, max_num, i + 1);
1037             if (next_bit >= max_num)
1038                 break;
1039         }
1040     }
1041 }
1042 
1043 /**
1044  * vmw_emit_set_vb - Issue delayed vertex buffer binding commands
1045  *
1046  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1047  *
1048  */
1049 static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
1050 {
1051     const struct vmw_ctx_bindinfo *loc =
1052         &cbs->vertex_buffers[0].bi;
1053     struct {
1054         SVGA3dCmdHeader header;
1055         SVGA3dCmdDXSetVertexBuffers body;
1056     } *cmd;
1057     size_t cmd_size, set_vb_size;
1058     const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1059 
1060     vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
1061                  SVGA3D_DX_MAX_VERTEXBUFFERS);
1062     if (cbs->bind_cmd_count == 0)
1063         return 0;
1064 
1065     set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
1066     cmd_size = sizeof(*cmd) + set_vb_size;
1067     cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1068     if (unlikely(cmd == NULL))
1069         return -ENOMEM;
1070 
1071     cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
1072     cmd->header.size = sizeof(cmd->body) + set_vb_size;
1073     cmd->body.startBuffer = cbs->bind_first_slot;
1074 
1075     memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
1076 
1077     vmw_cmd_commit(ctx->dev_priv, cmd_size);
1078     bitmap_clear(cbs->dirty_vb,
1079              cbs->bind_first_slot, cbs->bind_cmd_count);
1080 
1081     return 0;
1082 }
1083 
1084 static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
1085 {
1086     const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0];
1087     struct {
1088         SVGA3dCmdHeader header;
1089         SVGA3dCmdDXSetUAViews body;
1090     } *cmd;
1091     size_t cmd_size, view_id_size;
1092     const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1093 
1094     vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
1095     view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1096     cmd_size = sizeof(*cmd) + view_id_size;
1097     cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1098     if (!cmd)
1099         return -ENOMEM;
1100 
1101     cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
1102     cmd->header.size = sizeof(cmd->body) + view_id_size;
1103 
1104     /* Splice index is specified user-space   */
1105     cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
1106 
1107     memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1108 
1109     vmw_cmd_commit(ctx->dev_priv, cmd_size);
1110 
1111     return 0;
1112 }
1113 
1114 static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
1115 {
1116     const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0];
1117     struct {
1118         SVGA3dCmdHeader header;
1119         SVGA3dCmdDXSetCSUAViews body;
1120     } *cmd;
1121     size_t cmd_size, view_id_size;
1122     const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1123 
1124     vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
1125     view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1126     cmd_size = sizeof(*cmd) + view_id_size;
1127     cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1128     if (!cmd)
1129         return -ENOMEM;
1130 
1131     cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
1132     cmd->header.size = sizeof(cmd->body) + view_id_size;
1133 
1134     /* Start index is specified user-space */
1135     cmd->body.startIndex = cbs->ua_views[1].index;
1136 
1137     memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1138 
1139     vmw_cmd_commit(ctx->dev_priv, cmd_size);
1140 
1141     return 0;
1142 }
1143 
1144 /**
1145  * vmw_binding_emit_dirty - Issue delayed binding commands
1146  *
1147  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1148  *
1149  * This function issues the delayed binding commands that arise from
1150  * previous scrub / unscrub calls. These binding commands are typically
1151  * commands that batch a number of bindings and therefore it makes sense
1152  * to delay them.
1153  */
1154 static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
1155 {
1156     int ret = 0;
1157     unsigned long hit = 0;
1158 
1159     while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
1160           < VMW_BINDING_NUM_BITS) {
1161 
1162         switch (hit) {
1163         case VMW_BINDING_RT_BIT:
1164             ret = vmw_emit_set_rt(cbs);
1165             break;
1166         case VMW_BINDING_PS_BIT:
1167             ret = vmw_binding_emit_dirty_ps(cbs);
1168             break;
1169         case VMW_BINDING_SO_T_BIT:
1170             ret = vmw_emit_set_so_target(cbs);
1171             break;
1172         case VMW_BINDING_VB_BIT:
1173             ret = vmw_emit_set_vb(cbs);
1174             break;
1175         case VMW_BINDING_UAV_BIT:
1176             ret = vmw_emit_set_uav(cbs);
1177             break;
1178         case VMW_BINDING_CS_UAV_BIT:
1179             ret = vmw_emit_set_cs_uav(cbs);
1180             break;
1181         default:
1182             BUG();
1183         }
1184         if (ret)
1185             return ret;
1186 
1187         __clear_bit(hit, &cbs->dirty);
1188         hit++;
1189     }
1190 
1191     return 0;
1192 }
1193 
1194 /**
1195  * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
1196  * scrub from a context
1197  *
1198  * @bi: single binding information.
1199  * @rebind: Whether to issue a bind instead of scrub command.
1200  */
1201 static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
1202 {
1203     struct vmw_ctx_bindinfo_view *biv =
1204         container_of(bi, struct vmw_ctx_bindinfo_view, bi);
1205     struct vmw_ctx_binding_state *cbs =
1206         vmw_context_binding_state(bi->ctx);
1207 
1208     __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
1209     __set_bit(VMW_BINDING_PS_SR_BIT,
1210           &cbs->per_shader[biv->shader_slot].dirty);
1211     __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
1212 
1213     return 0;
1214 }
1215 
1216 /**
1217  * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
1218  * scrub from a context
1219  *
1220  * @bi: single binding information.
1221  * @rebind: Whether to issue a bind instead of scrub command.
1222  */
1223 static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
1224 {
1225     struct vmw_ctx_binding_state *cbs =
1226         vmw_context_binding_state(bi->ctx);
1227 
1228     __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
1229 
1230     return 0;
1231 }
1232 
1233 /**
1234  * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
1235  * scrub from a context
1236  *
1237  * @bi: single binding information.
1238  * @rebind: Whether to issue a bind instead of scrub command.
1239  */
1240 static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
1241 {
1242     struct vmw_ctx_binding_state *cbs =
1243         vmw_context_binding_state(bi->ctx);
1244 
1245     __set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
1246 
1247     return 0;
1248 }
1249 
1250 /**
1251  * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
1252  * scrub from a context
1253  *
1254  * @bi: single binding information.
1255  * @rebind: Whether to issue a bind instead of scrub command.
1256  */
1257 static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
1258 {
1259     struct vmw_ctx_bindinfo_vb *bivb =
1260         container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1261     struct vmw_ctx_binding_state *cbs =
1262         vmw_context_binding_state(bi->ctx);
1263 
1264     __set_bit(bivb->slot, cbs->dirty_vb);
1265     __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
1266 
1267     return 0;
1268 }
1269 
1270 /**
1271  * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
1272  *
1273  * @bi: single binding information.
1274  * @rebind: Whether to issue a bind instead of scrub command.
1275  */
1276 static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
1277 {
1278     struct vmw_ctx_bindinfo_ib *binding =
1279         container_of(bi, typeof(*binding), bi);
1280     struct vmw_private *dev_priv = bi->ctx->dev_priv;
1281     struct {
1282         SVGA3dCmdHeader header;
1283         SVGA3dCmdDXSetIndexBuffer body;
1284     } *cmd;
1285 
1286     cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
1287     if (unlikely(cmd == NULL))
1288         return -ENOMEM;
1289 
1290     cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
1291     cmd->header.size = sizeof(cmd->body);
1292     if (rebind) {
1293         cmd->body.sid = bi->res->id;
1294         cmd->body.format = binding->format;
1295         cmd->body.offset = binding->offset;
1296     } else {
1297         cmd->body.sid = SVGA3D_INVALID_ID;
1298         cmd->body.format = 0;
1299         cmd->body.offset = 0;
1300     }
1301 
1302     vmw_cmd_commit(dev_priv, sizeof(*cmd));
1303 
1304     return 0;
1305 }
1306 
1307 static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1308 {
1309     struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1310 
1311     __set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
1312     return 0;
1313 }
1314 
1315 static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1316 {
1317     struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1318 
1319     __set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
1320     return 0;
1321 }
1322 
1323 /**
1324  * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
1325  * @bi: Single binding information.
1326  * @rebind: Whether to issue a bind instead of scrub command.
1327  */
1328 static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
1329 {
1330     struct vmw_ctx_bindinfo_so *binding =
1331         container_of(bi, typeof(*binding), bi);
1332     struct vmw_private *dev_priv = bi->ctx->dev_priv;
1333     struct {
1334         SVGA3dCmdHeader header;
1335         SVGA3dCmdDXSetStreamOutput body;
1336     } *cmd;
1337 
1338     cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
1339     if (!cmd)
1340         return -ENOMEM;
1341 
1342     cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
1343     cmd->header.size = sizeof(cmd->body);
1344     cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
1345     vmw_cmd_commit(dev_priv, sizeof(*cmd));
1346 
1347     return 0;
1348 }
1349 
1350 /**
1351  * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
1352  *
1353  * @dev_priv: Pointer to a device private structure.
1354  *
1355  * Returns a pointer to a newly allocated struct or an error pointer on error.
1356  */
1357 struct vmw_ctx_binding_state *
1358 vmw_binding_state_alloc(struct vmw_private *dev_priv)
1359 {
1360     struct vmw_ctx_binding_state *cbs;
1361 
1362     cbs = vzalloc(sizeof(*cbs));
1363     if (!cbs) {
1364         return ERR_PTR(-ENOMEM);
1365     }
1366 
1367     cbs->dev_priv = dev_priv;
1368     INIT_LIST_HEAD(&cbs->list);
1369 
1370     return cbs;
1371 }
1372 
1373 /**
1374  * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
1375  *
1376  * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
1377  */
1378 void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
1379 {
1380     vfree(cbs);
1381 }
1382 
1383 /**
1384  * vmw_binding_state_list - Get the binding list of a
1385  * struct vmw_ctx_binding_state
1386  *
1387  * @cbs: Pointer to the struct vmw_ctx_binding_state
1388  *
1389  * Returns the binding list which can be used to traverse through the bindings
1390  * and access the resource information of all bindings.
1391  */
1392 struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
1393 {
1394     return &cbs->list;
1395 }
1396 
1397 /**
1398  * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state
1399  *
1400  * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
1401  *
1402  * Drops all bindings registered in @cbs. No device binding actions are
1403  * performed.
1404  */
1405 void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
1406 {
1407     struct vmw_ctx_bindinfo *entry, *next;
1408 
1409     list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
1410         vmw_binding_drop(entry);
1411 }
1412 
1413 /**
1414  * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
1415  * @binding_type: The binding type
1416  *
1417  * Each time a resource is put on the validation list as the result of a
1418  * context binding referencing it, we need to determine whether that resource
1419  * will be dirtied (written to by the GPU) as a result of the corresponding
1420  * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
1421  * and unordered access view bindings are capable of dirtying its resource.
1422  *
1423  * Return: Whether the binding type dirties the resource its binding points to.
1424  */
1425 u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
1426 {
1427     static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
1428         [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
1429         [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
1430         [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
1431         [vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
1432         [vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
1433         [vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
1434     };
1435 
1436     /* Review this function as new bindings are added. */
1437     BUILD_BUG_ON(vmw_ctx_binding_max != 14);
1438     return is_binding_dirtying[binding_type];
1439 }
1440 
1441 /*
1442  * This function is unused at run-time, and only used to hold various build
1443  * asserts important for code optimization assumptions.
1444  */
1445 static void vmw_binding_build_asserts(void)
1446 {
1447     BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
1448     BUILD_BUG_ON(SVGA3D_DX_MAX_RENDER_TARGETS > SVGA3D_RT_MAX);
1449     BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
1450 
1451     /*
1452      * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
1453      * view id arrays.
1454      */
1455     BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
1456     BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
1457     BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
1458 
1459     /*
1460      * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
1461      * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
1462      */
1463     BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
1464              VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1465     BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
1466              VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1467 }