0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 #include "vmwgfx_drv.h"
0055 #include "vmwgfx_binding.h"
0056 #include "device_include/svga3d_reg.h"
0057
0058 #define VMW_BINDING_RT_BIT 0
0059 #define VMW_BINDING_PS_BIT 1
0060 #define VMW_BINDING_SO_T_BIT 2
0061 #define VMW_BINDING_VB_BIT 3
0062 #define VMW_BINDING_UAV_BIT 4
0063 #define VMW_BINDING_CS_UAV_BIT 5
0064 #define VMW_BINDING_NUM_BITS 6
0065
0066 #define VMW_BINDING_PS_SR_BIT 0
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096 struct vmw_ctx_binding_state {
0097 struct vmw_private *dev_priv;
0098 struct list_head list;
0099 struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
0100 struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
0101 struct vmw_ctx_bindinfo_view ds_view;
0102 struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
0103 struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
0104 struct vmw_ctx_bindinfo_ib index_buffer;
0105 struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
0106 struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
0107 struct vmw_ctx_bindinfo_so so_state;
0108
0109 unsigned long dirty;
0110 DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
0111
0112 u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
0113 u32 bind_cmd_count;
0114 u32 bind_first_slot;
0115 };
0116
0117 static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
0118 static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
0119 bool rebind);
0120 static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
0121 static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
0122 static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
0123 static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
0124 static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
0125 static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
0126 static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
0127 bool rebind);
0128 static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
0129 static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
0130 static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
0131 static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
0132 static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
0133
0134 static void vmw_binding_build_asserts(void) __attribute__ ((unused));
0135
0136 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 struct vmw_binding_info {
0151 size_t size;
0152 const size_t *offsets;
0153 vmw_scrub_func scrub_func;
0154 };
0155
0156
0157
0158
0159
0160 static const size_t vmw_binding_shader_offsets[] = {
0161 offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
0162 offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
0163 offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
0164 offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
0165 offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
0166 offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
0167 };
0168 static const size_t vmw_binding_rt_offsets[] = {
0169 offsetof(struct vmw_ctx_binding_state, render_targets),
0170 };
0171 static const size_t vmw_binding_tex_offsets[] = {
0172 offsetof(struct vmw_ctx_binding_state, texture_units),
0173 };
0174 static const size_t vmw_binding_cb_offsets[] = {
0175 offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
0176 offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
0177 offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
0178 offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
0179 offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
0180 offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
0181 };
0182 static const size_t vmw_binding_dx_ds_offsets[] = {
0183 offsetof(struct vmw_ctx_binding_state, ds_view),
0184 };
0185 static const size_t vmw_binding_sr_offsets[] = {
0186 offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
0187 offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
0188 offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
0189 offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
0190 offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
0191 offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
0192 };
0193 static const size_t vmw_binding_so_target_offsets[] = {
0194 offsetof(struct vmw_ctx_binding_state, so_targets),
0195 };
0196 static const size_t vmw_binding_vb_offsets[] = {
0197 offsetof(struct vmw_ctx_binding_state, vertex_buffers),
0198 };
0199 static const size_t vmw_binding_ib_offsets[] = {
0200 offsetof(struct vmw_ctx_binding_state, index_buffer),
0201 };
0202 static const size_t vmw_binding_uav_offsets[] = {
0203 offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
0204 };
0205 static const size_t vmw_binding_cs_uav_offsets[] = {
0206 offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
0207 };
0208 static const size_t vmw_binding_so_offsets[] = {
0209 offsetof(struct vmw_ctx_binding_state, so_state),
0210 };
0211
0212 static const struct vmw_binding_info vmw_binding_infos[] = {
0213 [vmw_ctx_binding_shader] = {
0214 .size = sizeof(struct vmw_ctx_bindinfo_shader),
0215 .offsets = vmw_binding_shader_offsets,
0216 .scrub_func = vmw_binding_scrub_shader},
0217 [vmw_ctx_binding_rt] = {
0218 .size = sizeof(struct vmw_ctx_bindinfo_view),
0219 .offsets = vmw_binding_rt_offsets,
0220 .scrub_func = vmw_binding_scrub_render_target},
0221 [vmw_ctx_binding_tex] = {
0222 .size = sizeof(struct vmw_ctx_bindinfo_tex),
0223 .offsets = vmw_binding_tex_offsets,
0224 .scrub_func = vmw_binding_scrub_texture},
0225 [vmw_ctx_binding_cb] = {
0226 .size = sizeof(struct vmw_ctx_bindinfo_cb),
0227 .offsets = vmw_binding_cb_offsets,
0228 .scrub_func = vmw_binding_scrub_cb},
0229 [vmw_ctx_binding_dx_shader] = {
0230 .size = sizeof(struct vmw_ctx_bindinfo_shader),
0231 .offsets = vmw_binding_shader_offsets,
0232 .scrub_func = vmw_binding_scrub_dx_shader},
0233 [vmw_ctx_binding_dx_rt] = {
0234 .size = sizeof(struct vmw_ctx_bindinfo_view),
0235 .offsets = vmw_binding_rt_offsets,
0236 .scrub_func = vmw_binding_scrub_dx_rt},
0237 [vmw_ctx_binding_sr] = {
0238 .size = sizeof(struct vmw_ctx_bindinfo_view),
0239 .offsets = vmw_binding_sr_offsets,
0240 .scrub_func = vmw_binding_scrub_sr},
0241 [vmw_ctx_binding_ds] = {
0242 .size = sizeof(struct vmw_ctx_bindinfo_view),
0243 .offsets = vmw_binding_dx_ds_offsets,
0244 .scrub_func = vmw_binding_scrub_dx_rt},
0245 [vmw_ctx_binding_so_target] = {
0246 .size = sizeof(struct vmw_ctx_bindinfo_so_target),
0247 .offsets = vmw_binding_so_target_offsets,
0248 .scrub_func = vmw_binding_scrub_so_target},
0249 [vmw_ctx_binding_vb] = {
0250 .size = sizeof(struct vmw_ctx_bindinfo_vb),
0251 .offsets = vmw_binding_vb_offsets,
0252 .scrub_func = vmw_binding_scrub_vb},
0253 [vmw_ctx_binding_ib] = {
0254 .size = sizeof(struct vmw_ctx_bindinfo_ib),
0255 .offsets = vmw_binding_ib_offsets,
0256 .scrub_func = vmw_binding_scrub_ib},
0257 [vmw_ctx_binding_uav] = {
0258 .size = sizeof(struct vmw_ctx_bindinfo_view),
0259 .offsets = vmw_binding_uav_offsets,
0260 .scrub_func = vmw_binding_scrub_uav},
0261 [vmw_ctx_binding_cs_uav] = {
0262 .size = sizeof(struct vmw_ctx_bindinfo_view),
0263 .offsets = vmw_binding_cs_uav_offsets,
0264 .scrub_func = vmw_binding_scrub_cs_uav},
0265 [vmw_ctx_binding_so] = {
0266 .size = sizeof(struct vmw_ctx_bindinfo_so),
0267 .offsets = vmw_binding_so_offsets,
0268 .scrub_func = vmw_binding_scrub_so},
0269 };
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 static const struct vmw_resource *
0284 vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
0285 {
0286 if (list_empty(&cbs->list))
0287 return NULL;
0288
0289 return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
0290 ctx_list)->ctx;
0291 }
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 static struct vmw_ctx_bindinfo *
0302 vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
0303 enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
0304 {
0305 const struct vmw_binding_info *b = &vmw_binding_infos[bt];
0306 size_t offset = b->offsets[shader_slot] + b->size*slot;
0307
0308 return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
0309 }
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
0321 {
0322 list_del(&bi->ctx_list);
0323 if (!list_empty(&bi->res_list))
0324 list_del(&bi->res_list);
0325 bi->ctx = NULL;
0326 }
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
0340 const struct vmw_ctx_bindinfo *bi,
0341 u32 shader_slot, u32 slot)
0342 {
0343 struct vmw_ctx_bindinfo *loc =
0344 vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
0345 const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
0346
0347 if (loc->ctx != NULL)
0348 vmw_binding_drop(loc);
0349
0350 memcpy(loc, bi, b->size);
0351 loc->scrubbed = false;
0352 list_add(&loc->ctx_list, &cbs->list);
0353 INIT_LIST_HEAD(&loc->res_list);
0354 }
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367 void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
0368 u32 shader_slot, u32 slot, u32 offsetInBytes)
0369 {
0370 struct vmw_ctx_bindinfo *loc =
0371 vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot);
0372 struct vmw_ctx_bindinfo_cb *loc_cb =
0373 (struct vmw_ctx_bindinfo_cb *)((u8 *) loc);
0374 loc_cb->offset = offsetInBytes;
0375 }
0376
0377
0378
0379
0380
0381
0382
0383 void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
0384 uint32 index)
0385 {
0386 cbs->ua_views[slot].index = index;
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397 static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
0398 const struct vmw_ctx_binding_state *from,
0399 const struct vmw_ctx_bindinfo *bi)
0400 {
0401 size_t offset = (unsigned long)bi - (unsigned long)from;
0402 struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
0403 ((unsigned long) cbs + offset);
0404
0405 if (loc->ctx != NULL) {
0406 WARN_ON(bi->scrubbed);
0407
0408 vmw_binding_drop(loc);
0409 }
0410
0411 if (bi->res != NULL) {
0412 memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
0413 list_add_tail(&loc->ctx_list, &cbs->list);
0414 list_add_tail(&loc->res_list, &loc->res->binding_head);
0415 }
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
0428 {
0429 struct vmw_ctx_bindinfo *entry, *next;
0430
0431 vmw_binding_state_scrub(cbs);
0432 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
0433 vmw_binding_drop(entry);
0434 }
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445 void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
0446 {
0447 struct vmw_ctx_bindinfo *entry;
0448
0449 list_for_each_entry(entry, &cbs->list, ctx_list) {
0450 if (!entry->scrubbed) {
0451 (void) vmw_binding_infos[entry->bt].scrub_func
0452 (entry, false);
0453 entry->scrubbed = true;
0454 }
0455 }
0456
0457 (void) vmw_binding_emit_dirty(cbs);
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469 void vmw_binding_res_list_kill(struct list_head *head)
0470 {
0471 struct vmw_ctx_bindinfo *entry, *next;
0472
0473 vmw_binding_res_list_scrub(head);
0474 list_for_each_entry_safe(entry, next, head, res_list)
0475 vmw_binding_drop(entry);
0476 }
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 void vmw_binding_res_list_scrub(struct list_head *head)
0488 {
0489 struct vmw_ctx_bindinfo *entry;
0490
0491 list_for_each_entry(entry, head, res_list) {
0492 if (!entry->scrubbed) {
0493 (void) vmw_binding_infos[entry->bt].scrub_func
0494 (entry, false);
0495 entry->scrubbed = true;
0496 }
0497 }
0498
0499 list_for_each_entry(entry, head, res_list) {
0500 struct vmw_ctx_binding_state *cbs =
0501 vmw_context_binding_state(entry->ctx);
0502
0503 (void) vmw_binding_emit_dirty(cbs);
0504 }
0505 }
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
0520 struct vmw_ctx_binding_state *from)
0521 {
0522 struct vmw_ctx_bindinfo *entry, *next;
0523
0524 list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
0525 vmw_binding_transfer(to, from, entry);
0526 vmw_binding_drop(entry);
0527 }
0528
0529
0530 to->ua_views[0].index = from->ua_views[0].index;
0531 to->ua_views[1].index = from->ua_views[1].index;
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542 int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
0543 {
0544 struct vmw_ctx_bindinfo *entry;
0545 int ret;
0546
0547 list_for_each_entry(entry, &cbs->list, ctx_list) {
0548 if (likely(!entry->scrubbed))
0549 continue;
0550
0551 if ((entry->res == NULL || entry->res->id ==
0552 SVGA3D_INVALID_ID))
0553 continue;
0554
0555 ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
0556 if (unlikely(ret != 0))
0557 return ret;
0558
0559 entry->scrubbed = false;
0560 }
0561
0562 return vmw_binding_emit_dirty(cbs);
0563 }
0564
0565
0566
0567
0568
0569
0570
0571 static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
0572 {
0573 struct vmw_ctx_bindinfo_shader *binding =
0574 container_of(bi, typeof(*binding), bi);
0575 struct vmw_private *dev_priv = bi->ctx->dev_priv;
0576 struct {
0577 SVGA3dCmdHeader header;
0578 SVGA3dCmdSetShader body;
0579 } *cmd;
0580
0581 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0582 if (unlikely(cmd == NULL))
0583 return -ENOMEM;
0584
0585 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
0586 cmd->header.size = sizeof(cmd->body);
0587 cmd->body.cid = bi->ctx->id;
0588 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
0589 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0590 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0591
0592 return 0;
0593 }
0594
0595
0596
0597
0598
0599
0600
0601
0602 static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
0603 bool rebind)
0604 {
0605 struct vmw_ctx_bindinfo_view *binding =
0606 container_of(bi, typeof(*binding), bi);
0607 struct vmw_private *dev_priv = bi->ctx->dev_priv;
0608 struct {
0609 SVGA3dCmdHeader header;
0610 SVGA3dCmdSetRenderTarget body;
0611 } *cmd;
0612
0613 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0614 if (unlikely(cmd == NULL))
0615 return -ENOMEM;
0616
0617 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
0618 cmd->header.size = sizeof(cmd->body);
0619 cmd->body.cid = bi->ctx->id;
0620 cmd->body.type = binding->slot;
0621 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0622 cmd->body.target.face = 0;
0623 cmd->body.target.mipmap = 0;
0624 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0625
0626 return 0;
0627 }
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
0639 bool rebind)
0640 {
0641 struct vmw_ctx_bindinfo_tex *binding =
0642 container_of(bi, typeof(*binding), bi);
0643 struct vmw_private *dev_priv = bi->ctx->dev_priv;
0644 struct {
0645 SVGA3dCmdHeader header;
0646 struct {
0647 SVGA3dCmdSetTextureState c;
0648 SVGA3dTextureState s1;
0649 } body;
0650 } *cmd;
0651
0652 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0653 if (unlikely(cmd == NULL))
0654 return -ENOMEM;
0655
0656 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
0657 cmd->header.size = sizeof(cmd->body);
0658 cmd->body.c.cid = bi->ctx->id;
0659 cmd->body.s1.stage = binding->texture_stage;
0660 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
0661 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0662 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0663
0664 return 0;
0665 }
0666
0667
0668
0669
0670
0671
0672
0673 static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
0674 {
0675 struct vmw_ctx_bindinfo_shader *binding =
0676 container_of(bi, typeof(*binding), bi);
0677 struct vmw_private *dev_priv = bi->ctx->dev_priv;
0678 struct {
0679 SVGA3dCmdHeader header;
0680 SVGA3dCmdDXSetShader body;
0681 } *cmd;
0682
0683 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
0684 if (unlikely(cmd == NULL))
0685 return -ENOMEM;
0686
0687 cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
0688 cmd->header.size = sizeof(cmd->body);
0689 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
0690 cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
0691 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0692
0693 return 0;
0694 }
0695
0696
0697
0698
0699
0700
0701
0702 static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
0703 {
0704 struct vmw_ctx_bindinfo_cb *binding =
0705 container_of(bi, typeof(*binding), bi);
0706 struct vmw_private *dev_priv = bi->ctx->dev_priv;
0707 struct {
0708 SVGA3dCmdHeader header;
0709 SVGA3dCmdDXSetSingleConstantBuffer body;
0710 } *cmd;
0711
0712 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
0713 if (unlikely(cmd == NULL))
0714 return -ENOMEM;
0715
0716 cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
0717 cmd->header.size = sizeof(cmd->body);
0718 cmd->body.slot = binding->slot;
0719 cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
0720 if (rebind) {
0721 cmd->body.offsetInBytes = binding->offset;
0722 cmd->body.sizeInBytes = binding->size;
0723 cmd->body.sid = bi->res->id;
0724 } else {
0725 cmd->body.offsetInBytes = 0;
0726 cmd->body.sizeInBytes = 0;
0727 cmd->body.sid = SVGA3D_INVALID_ID;
0728 }
0729 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0730
0731 return 0;
0732 }
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748 static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
0749 const struct vmw_ctx_bindinfo_view *biv,
0750 u32 max_num)
0751 {
0752 unsigned long i;
0753
0754 cbs->bind_cmd_count = 0;
0755 cbs->bind_first_slot = 0;
0756
0757 for (i = 0; i < max_num; ++i, ++biv) {
0758 if (!biv->bi.ctx)
0759 break;
0760
0761 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
0762 ((biv->bi.scrubbed) ?
0763 SVGA3D_INVALID_ID : biv->bi.res->id);
0764 }
0765 }
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781 static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
0782 const struct vmw_ctx_bindinfo *bi,
0783 unsigned long *dirty,
0784 u32 max_num)
0785 {
0786 const struct vmw_ctx_bindinfo_view *biv =
0787 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
0788 unsigned long i, next_bit;
0789
0790 cbs->bind_cmd_count = 0;
0791 i = find_first_bit(dirty, max_num);
0792 next_bit = i;
0793 cbs->bind_first_slot = i;
0794
0795 biv += i;
0796 for (; i < max_num; ++i, ++biv) {
0797 cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
0798 ((!biv->bi.ctx || biv->bi.scrubbed) ?
0799 SVGA3D_INVALID_ID : biv->bi.res->id);
0800
0801 if (next_bit == i) {
0802 next_bit = find_next_bit(dirty, max_num, i + 1);
0803 if (next_bit >= max_num)
0804 break;
0805 }
0806 }
0807 }
0808
0809
0810
0811
0812
0813
0814
0815 static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
0816 int shader_slot)
0817 {
0818 const struct vmw_ctx_bindinfo *loc =
0819 &cbs->per_shader[shader_slot].shader_res[0].bi;
0820 struct {
0821 SVGA3dCmdHeader header;
0822 SVGA3dCmdDXSetShaderResources body;
0823 } *cmd;
0824 size_t cmd_size, view_id_size;
0825 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
0826
0827 vmw_collect_dirty_view_ids(cbs, loc,
0828 cbs->per_shader[shader_slot].dirty_sr,
0829 SVGA3D_DX_MAX_SRVIEWS);
0830 if (cbs->bind_cmd_count == 0)
0831 return 0;
0832
0833 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
0834 cmd_size = sizeof(*cmd) + view_id_size;
0835 cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
0836 if (unlikely(cmd == NULL))
0837 return -ENOMEM;
0838
0839 cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
0840 cmd->header.size = sizeof(cmd->body) + view_id_size;
0841 cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
0842 cmd->body.startView = cbs->bind_first_slot;
0843
0844 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
0845
0846 vmw_cmd_commit(ctx->dev_priv, cmd_size);
0847 bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
0848 cbs->bind_first_slot, cbs->bind_cmd_count);
0849
0850 return 0;
0851 }
0852
0853
0854
0855
0856
0857
0858 static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
0859 {
0860 const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0];
0861 struct {
0862 SVGA3dCmdHeader header;
0863 SVGA3dCmdDXSetRenderTargets body;
0864 } *cmd;
0865 size_t cmd_size, view_id_size;
0866 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
0867
0868 vmw_collect_view_ids(cbs, loc, SVGA3D_DX_MAX_RENDER_TARGETS);
0869 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
0870 cmd_size = sizeof(*cmd) + view_id_size;
0871 cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
0872 if (unlikely(cmd == NULL))
0873 return -ENOMEM;
0874
0875 cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
0876 cmd->header.size = sizeof(cmd->body) + view_id_size;
0877
0878 if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
0879 cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
0880 else
0881 cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
0882
0883 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
0884
0885 vmw_cmd_commit(ctx->dev_priv, cmd_size);
0886
0887 return 0;
0888
0889 }
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905 static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
0906 const struct vmw_ctx_bindinfo_so_target *biso,
0907 u32 max_num)
0908 {
0909 unsigned long i;
0910 SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
0911
0912 cbs->bind_cmd_count = 0;
0913 cbs->bind_first_slot = 0;
0914
0915 for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
0916 ++cbs->bind_cmd_count) {
0917 if (!biso->bi.ctx)
0918 break;
0919
0920 if (!biso->bi.scrubbed) {
0921 so_buffer->sid = biso->bi.res->id;
0922 so_buffer->offset = biso->offset;
0923 so_buffer->sizeInBytes = biso->size;
0924 } else {
0925 so_buffer->sid = SVGA3D_INVALID_ID;
0926 so_buffer->offset = 0;
0927 so_buffer->sizeInBytes = 0;
0928 }
0929 }
0930 }
0931
0932
0933
0934
0935
0936
0937 static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
0938 {
0939 const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0];
0940 struct {
0941 SVGA3dCmdHeader header;
0942 SVGA3dCmdDXSetSOTargets body;
0943 } *cmd;
0944 size_t cmd_size, so_target_size;
0945 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
0946
0947 vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
0948 if (cbs->bind_cmd_count == 0)
0949 return 0;
0950
0951 so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
0952 cmd_size = sizeof(*cmd) + so_target_size;
0953 cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
0954 if (unlikely(cmd == NULL))
0955 return -ENOMEM;
0956
0957 cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
0958 cmd->header.size = sizeof(cmd->body) + so_target_size;
0959 memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
0960
0961 vmw_cmd_commit(ctx->dev_priv, cmd_size);
0962
0963 return 0;
0964
0965 }
0966
0967
0968
0969
0970
0971
0972
0973 static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
0974 {
0975 struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
0976 u32 i;
0977 int ret;
0978
0979 for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
0980 if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
0981 continue;
0982
0983 ret = vmw_emit_set_sr(cbs, i);
0984 if (ret)
0985 break;
0986
0987 __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
0988 }
0989
0990 return 0;
0991 }
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008 static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
1009 const struct vmw_ctx_bindinfo *bi,
1010 unsigned long *dirty,
1011 u32 max_num)
1012 {
1013 const struct vmw_ctx_bindinfo_vb *biv =
1014 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1015 unsigned long i, next_bit;
1016 SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
1017
1018 cbs->bind_cmd_count = 0;
1019 i = find_first_bit(dirty, max_num);
1020 next_bit = i;
1021 cbs->bind_first_slot = i;
1022
1023 biv += i;
1024 for (; i < max_num; ++i, ++biv, ++vbs) {
1025 if (!biv->bi.ctx || biv->bi.scrubbed) {
1026 vbs->sid = SVGA3D_INVALID_ID;
1027 vbs->stride = 0;
1028 vbs->offset = 0;
1029 } else {
1030 vbs->sid = biv->bi.res->id;
1031 vbs->stride = biv->stride;
1032 vbs->offset = biv->offset;
1033 }
1034 cbs->bind_cmd_count++;
1035 if (next_bit == i) {
1036 next_bit = find_next_bit(dirty, max_num, i + 1);
1037 if (next_bit >= max_num)
1038 break;
1039 }
1040 }
1041 }
1042
1043
1044
1045
1046
1047
1048
1049 static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
1050 {
1051 const struct vmw_ctx_bindinfo *loc =
1052 &cbs->vertex_buffers[0].bi;
1053 struct {
1054 SVGA3dCmdHeader header;
1055 SVGA3dCmdDXSetVertexBuffers body;
1056 } *cmd;
1057 size_t cmd_size, set_vb_size;
1058 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1059
1060 vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
1061 SVGA3D_DX_MAX_VERTEXBUFFERS);
1062 if (cbs->bind_cmd_count == 0)
1063 return 0;
1064
1065 set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
1066 cmd_size = sizeof(*cmd) + set_vb_size;
1067 cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1068 if (unlikely(cmd == NULL))
1069 return -ENOMEM;
1070
1071 cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
1072 cmd->header.size = sizeof(cmd->body) + set_vb_size;
1073 cmd->body.startBuffer = cbs->bind_first_slot;
1074
1075 memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
1076
1077 vmw_cmd_commit(ctx->dev_priv, cmd_size);
1078 bitmap_clear(cbs->dirty_vb,
1079 cbs->bind_first_slot, cbs->bind_cmd_count);
1080
1081 return 0;
1082 }
1083
1084 static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
1085 {
1086 const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0];
1087 struct {
1088 SVGA3dCmdHeader header;
1089 SVGA3dCmdDXSetUAViews body;
1090 } *cmd;
1091 size_t cmd_size, view_id_size;
1092 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1093
1094 vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
1095 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1096 cmd_size = sizeof(*cmd) + view_id_size;
1097 cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1098 if (!cmd)
1099 return -ENOMEM;
1100
1101 cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
1102 cmd->header.size = sizeof(cmd->body) + view_id_size;
1103
1104
1105 cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
1106
1107 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1108
1109 vmw_cmd_commit(ctx->dev_priv, cmd_size);
1110
1111 return 0;
1112 }
1113
1114 static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
1115 {
1116 const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0];
1117 struct {
1118 SVGA3dCmdHeader header;
1119 SVGA3dCmdDXSetCSUAViews body;
1120 } *cmd;
1121 size_t cmd_size, view_id_size;
1122 const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1123
1124 vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
1125 view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1126 cmd_size = sizeof(*cmd) + view_id_size;
1127 cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
1128 if (!cmd)
1129 return -ENOMEM;
1130
1131 cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
1132 cmd->header.size = sizeof(cmd->body) + view_id_size;
1133
1134
1135 cmd->body.startIndex = cbs->ua_views[1].index;
1136
1137 memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1138
1139 vmw_cmd_commit(ctx->dev_priv, cmd_size);
1140
1141 return 0;
1142 }
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
1155 {
1156 int ret = 0;
1157 unsigned long hit = 0;
1158
1159 while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
1160 < VMW_BINDING_NUM_BITS) {
1161
1162 switch (hit) {
1163 case VMW_BINDING_RT_BIT:
1164 ret = vmw_emit_set_rt(cbs);
1165 break;
1166 case VMW_BINDING_PS_BIT:
1167 ret = vmw_binding_emit_dirty_ps(cbs);
1168 break;
1169 case VMW_BINDING_SO_T_BIT:
1170 ret = vmw_emit_set_so_target(cbs);
1171 break;
1172 case VMW_BINDING_VB_BIT:
1173 ret = vmw_emit_set_vb(cbs);
1174 break;
1175 case VMW_BINDING_UAV_BIT:
1176 ret = vmw_emit_set_uav(cbs);
1177 break;
1178 case VMW_BINDING_CS_UAV_BIT:
1179 ret = vmw_emit_set_cs_uav(cbs);
1180 break;
1181 default:
1182 BUG();
1183 }
1184 if (ret)
1185 return ret;
1186
1187 __clear_bit(hit, &cbs->dirty);
1188 hit++;
1189 }
1190
1191 return 0;
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201 static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
1202 {
1203 struct vmw_ctx_bindinfo_view *biv =
1204 container_of(bi, struct vmw_ctx_bindinfo_view, bi);
1205 struct vmw_ctx_binding_state *cbs =
1206 vmw_context_binding_state(bi->ctx);
1207
1208 __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
1209 __set_bit(VMW_BINDING_PS_SR_BIT,
1210 &cbs->per_shader[biv->shader_slot].dirty);
1211 __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
1212
1213 return 0;
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223 static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
1224 {
1225 struct vmw_ctx_binding_state *cbs =
1226 vmw_context_binding_state(bi->ctx);
1227
1228 __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
1229
1230 return 0;
1231 }
1232
1233
1234
1235
1236
1237
1238
1239
1240 static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
1241 {
1242 struct vmw_ctx_binding_state *cbs =
1243 vmw_context_binding_state(bi->ctx);
1244
1245 __set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
1246
1247 return 0;
1248 }
1249
1250
1251
1252
1253
1254
1255
1256
1257 static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
1258 {
1259 struct vmw_ctx_bindinfo_vb *bivb =
1260 container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1261 struct vmw_ctx_binding_state *cbs =
1262 vmw_context_binding_state(bi->ctx);
1263
1264 __set_bit(bivb->slot, cbs->dirty_vb);
1265 __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
1266
1267 return 0;
1268 }
1269
1270
1271
1272
1273
1274
1275
1276 static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
1277 {
1278 struct vmw_ctx_bindinfo_ib *binding =
1279 container_of(bi, typeof(*binding), bi);
1280 struct vmw_private *dev_priv = bi->ctx->dev_priv;
1281 struct {
1282 SVGA3dCmdHeader header;
1283 SVGA3dCmdDXSetIndexBuffer body;
1284 } *cmd;
1285
1286 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
1287 if (unlikely(cmd == NULL))
1288 return -ENOMEM;
1289
1290 cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
1291 cmd->header.size = sizeof(cmd->body);
1292 if (rebind) {
1293 cmd->body.sid = bi->res->id;
1294 cmd->body.format = binding->format;
1295 cmd->body.offset = binding->offset;
1296 } else {
1297 cmd->body.sid = SVGA3D_INVALID_ID;
1298 cmd->body.format = 0;
1299 cmd->body.offset = 0;
1300 }
1301
1302 vmw_cmd_commit(dev_priv, sizeof(*cmd));
1303
1304 return 0;
1305 }
1306
1307 static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1308 {
1309 struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1310
1311 __set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
1312 return 0;
1313 }
1314
1315 static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1316 {
1317 struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1318
1319 __set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
1320 return 0;
1321 }
1322
1323
1324
1325
1326
1327
1328 static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
1329 {
1330 struct vmw_ctx_bindinfo_so *binding =
1331 container_of(bi, typeof(*binding), bi);
1332 struct vmw_private *dev_priv = bi->ctx->dev_priv;
1333 struct {
1334 SVGA3dCmdHeader header;
1335 SVGA3dCmdDXSetStreamOutput body;
1336 } *cmd;
1337
1338 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
1339 if (!cmd)
1340 return -ENOMEM;
1341
1342 cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
1343 cmd->header.size = sizeof(cmd->body);
1344 cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
1345 vmw_cmd_commit(dev_priv, sizeof(*cmd));
1346
1347 return 0;
1348 }
1349
1350
1351
1352
1353
1354
1355
1356
1357 struct vmw_ctx_binding_state *
1358 vmw_binding_state_alloc(struct vmw_private *dev_priv)
1359 {
1360 struct vmw_ctx_binding_state *cbs;
1361
1362 cbs = vzalloc(sizeof(*cbs));
1363 if (!cbs) {
1364 return ERR_PTR(-ENOMEM);
1365 }
1366
1367 cbs->dev_priv = dev_priv;
1368 INIT_LIST_HEAD(&cbs->list);
1369
1370 return cbs;
1371 }
1372
1373
1374
1375
1376
1377
1378 void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
1379 {
1380 vfree(cbs);
1381 }
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
1393 {
1394 return &cbs->list;
1395 }
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
1406 {
1407 struct vmw_ctx_bindinfo *entry, *next;
1408
1409 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
1410 vmw_binding_drop(entry);
1411 }
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
1426 {
1427 static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
1428 [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
1429 [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
1430 [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
1431 [vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
1432 [vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
1433 [vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
1434 };
1435
1436
1437 BUILD_BUG_ON(vmw_ctx_binding_max != 14);
1438 return is_binding_dirtying[binding_type];
1439 }
1440
1441
1442
1443
1444
1445 static void vmw_binding_build_asserts(void)
1446 {
1447 BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
1448 BUILD_BUG_ON(SVGA3D_DX_MAX_RENDER_TARGETS > SVGA3D_RT_MAX);
1449 BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
1450
1451
1452
1453
1454
1455 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
1456 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
1457 BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
1458
1459
1460
1461
1462
1463 BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
1464 VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1465 BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
1466 VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1467 }