0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <drm/ttm/ttm_placement.h>
0034
0035 #include "vmwgfx_drv.h"
0036 #include "vmwgfx_resource_priv.h"
0037 #include "vmwgfx_so.h"
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 struct vmw_cotable {
0051 struct vmw_resource res;
0052 struct vmw_resource *ctx;
0053 size_t size_read_back;
0054 int seen_entries;
0055 u32 type;
0056 bool scrubbed;
0057 struct list_head resource_list;
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 struct vmw_cotable_info {
0069 u32 min_initial_entries;
0070 u32 size;
0071 void (*unbind_func)(struct vmw_private *, struct list_head *,
0072 bool);
0073 };
0074
0075 static const struct vmw_cotable_info co_info[] = {
0076 {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
0077 {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
0078 {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
0079 {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
0080 {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
0081 {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
0082 {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
0083 {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
0084 {1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
0085 {1, sizeof(SVGACOTableDXQueryEntry), NULL},
0086 {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
0087 {1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
0088 };
0089
0090
0091
0092
0093
0094
0095 const SVGACOTableType vmw_cotable_scrub_order[] = {
0096 SVGA_COTABLE_RTVIEW,
0097 SVGA_COTABLE_DSVIEW,
0098 SVGA_COTABLE_SRVIEW,
0099 SVGA_COTABLE_DXSHADER,
0100 SVGA_COTABLE_ELEMENTLAYOUT,
0101 SVGA_COTABLE_BLENDSTATE,
0102 SVGA_COTABLE_DEPTHSTENCIL,
0103 SVGA_COTABLE_RASTERIZERSTATE,
0104 SVGA_COTABLE_SAMPLER,
0105 SVGA_COTABLE_STREAMOUTPUT,
0106 SVGA_COTABLE_DXQUERY,
0107 SVGA_COTABLE_UAVIEW,
0108 };
0109
0110 static int vmw_cotable_bind(struct vmw_resource *res,
0111 struct ttm_validate_buffer *val_buf);
0112 static int vmw_cotable_unbind(struct vmw_resource *res,
0113 bool readback,
0114 struct ttm_validate_buffer *val_buf);
0115 static int vmw_cotable_create(struct vmw_resource *res);
0116 static int vmw_cotable_destroy(struct vmw_resource *res);
0117
0118 static const struct vmw_res_func vmw_cotable_func = {
0119 .res_type = vmw_res_cotable,
0120 .needs_backup = true,
0121 .may_evict = true,
0122 .prio = 3,
0123 .dirty_prio = 3,
0124 .type_name = "context guest backed object tables",
0125 .backup_placement = &vmw_mob_placement,
0126 .create = vmw_cotable_create,
0127 .destroy = vmw_cotable_destroy,
0128 .bind = vmw_cotable_bind,
0129 .unbind = vmw_cotable_unbind,
0130 };
0131
0132
0133
0134
0135
0136
0137
0138 static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
0139 {
0140 return container_of(res, struct vmw_cotable, res);
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 static int vmw_cotable_destroy(struct vmw_resource *res)
0152 {
0153 res->id = -1;
0154 return 0;
0155 }
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166 static int vmw_cotable_unscrub(struct vmw_resource *res)
0167 {
0168 struct vmw_cotable *vcotbl = vmw_cotable(res);
0169 struct vmw_private *dev_priv = res->dev_priv;
0170 struct ttm_buffer_object *bo = &res->backup->base;
0171 struct {
0172 SVGA3dCmdHeader header;
0173 SVGA3dCmdDXSetCOTable body;
0174 } *cmd;
0175
0176 WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
0177 dma_resv_assert_held(bo->base.resv);
0178
0179 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0180 if (!cmd)
0181 return -ENOMEM;
0182
0183 WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
0184 WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
0185 cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
0186 cmd->header.size = sizeof(cmd->body);
0187 cmd->body.cid = vcotbl->ctx->id;
0188 cmd->body.type = vcotbl->type;
0189 cmd->body.mobid = bo->resource->start;
0190 cmd->body.validSizeInBytes = vcotbl->size_read_back;
0191
0192 vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
0193 vcotbl->scrubbed = false;
0194
0195 return 0;
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 static int vmw_cotable_bind(struct vmw_resource *res,
0209 struct ttm_validate_buffer *val_buf)
0210 {
0211
0212
0213
0214
0215
0216
0217
0218 val_buf->bo = &res->backup->base;
0219
0220 return vmw_cotable_unscrub(res);
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
0242 {
0243 struct vmw_cotable *vcotbl = vmw_cotable(res);
0244 struct vmw_private *dev_priv = res->dev_priv;
0245 size_t submit_size;
0246
0247 struct {
0248 SVGA3dCmdHeader header;
0249 SVGA3dCmdDXReadbackCOTable body;
0250 } *cmd0;
0251 struct {
0252 SVGA3dCmdHeader header;
0253 SVGA3dCmdDXSetCOTable body;
0254 } *cmd1;
0255
0256 if (vcotbl->scrubbed)
0257 return 0;
0258
0259 if (co_info[vcotbl->type].unbind_func)
0260 co_info[vcotbl->type].unbind_func(dev_priv,
0261 &vcotbl->resource_list,
0262 readback);
0263 submit_size = sizeof(*cmd1);
0264 if (readback)
0265 submit_size += sizeof(*cmd0);
0266
0267 cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
0268 if (!cmd1)
0269 return -ENOMEM;
0270
0271 vcotbl->size_read_back = 0;
0272 if (readback) {
0273 cmd0 = (void *) cmd1;
0274 cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
0275 cmd0->header.size = sizeof(cmd0->body);
0276 cmd0->body.cid = vcotbl->ctx->id;
0277 cmd0->body.type = vcotbl->type;
0278 cmd1 = (void *) &cmd0[1];
0279 vcotbl->size_read_back = res->backup_size;
0280 }
0281 cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
0282 cmd1->header.size = sizeof(cmd1->body);
0283 cmd1->body.cid = vcotbl->ctx->id;
0284 cmd1->body.type = vcotbl->type;
0285 cmd1->body.mobid = SVGA3D_INVALID_ID;
0286 cmd1->body.validSizeInBytes = 0;
0287 vmw_cmd_commit_flush(dev_priv, submit_size);
0288 vcotbl->scrubbed = true;
0289
0290
0291 res->id = -1;
0292
0293 return 0;
0294 }
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 static int vmw_cotable_unbind(struct vmw_resource *res,
0307 bool readback,
0308 struct ttm_validate_buffer *val_buf)
0309 {
0310 struct vmw_cotable *vcotbl = vmw_cotable(res);
0311 struct vmw_private *dev_priv = res->dev_priv;
0312 struct ttm_buffer_object *bo = val_buf->bo;
0313 struct vmw_fence_obj *fence;
0314
0315 if (!vmw_resource_mob_attached(res))
0316 return 0;
0317
0318 WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
0319 dma_resv_assert_held(bo->base.resv);
0320
0321 mutex_lock(&dev_priv->binding_mutex);
0322 if (!vcotbl->scrubbed)
0323 vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
0324 mutex_unlock(&dev_priv->binding_mutex);
0325 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
0326 vmw_bo_fence_single(bo, fence);
0327 if (likely(fence != NULL))
0328 vmw_fence_obj_unreference(&fence);
0329
0330 return 0;
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 static int vmw_cotable_readback(struct vmw_resource *res)
0342 {
0343 struct vmw_cotable *vcotbl = vmw_cotable(res);
0344 struct vmw_private *dev_priv = res->dev_priv;
0345
0346 struct {
0347 SVGA3dCmdHeader header;
0348 SVGA3dCmdDXReadbackCOTable body;
0349 } *cmd;
0350 struct vmw_fence_obj *fence;
0351
0352 if (!vcotbl->scrubbed) {
0353 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0354 if (!cmd)
0355 return -ENOMEM;
0356
0357 cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
0358 cmd->header.size = sizeof(cmd->body);
0359 cmd->body.cid = vcotbl->ctx->id;
0360 cmd->body.type = vcotbl->type;
0361 vcotbl->size_read_back = res->backup_size;
0362 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0363 }
0364
0365 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
0366 vmw_bo_fence_single(&res->backup->base, fence);
0367 vmw_fence_obj_unreference(&fence);
0368
0369 return 0;
0370 }
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
0385 {
0386 struct ttm_operation_ctx ctx = { false, false };
0387 struct vmw_private *dev_priv = res->dev_priv;
0388 struct vmw_cotable *vcotbl = vmw_cotable(res);
0389 struct vmw_buffer_object *buf, *old_buf = res->backup;
0390 struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
0391 size_t old_size = res->backup_size;
0392 size_t old_size_read_back = vcotbl->size_read_back;
0393 size_t cur_size_read_back;
0394 struct ttm_bo_kmap_obj old_map, new_map;
0395 int ret;
0396 size_t i;
0397
0398 ret = vmw_cotable_readback(res);
0399 if (ret)
0400 return ret;
0401
0402 cur_size_read_back = vcotbl->size_read_back;
0403 vcotbl->size_read_back = old_size_read_back;
0404
0405
0406
0407
0408
0409
0410 ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
0411 true, true, vmw_bo_bo_free, &buf);
0412 if (ret) {
0413 DRM_ERROR("Failed initializing new cotable MOB.\n");
0414 return ret;
0415 }
0416
0417 bo = &buf->base;
0418 WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
0419
0420 ret = ttm_bo_wait(old_bo, false, false);
0421 if (unlikely(ret != 0)) {
0422 DRM_ERROR("Failed waiting for cotable unbind.\n");
0423 goto out_wait;
0424 }
0425
0426
0427
0428
0429
0430 for (i = 0; i < old_bo->resource->num_pages; ++i) {
0431 bool dummy;
0432
0433 ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
0434 if (unlikely(ret != 0)) {
0435 DRM_ERROR("Failed mapping old COTable on resize.\n");
0436 goto out_wait;
0437 }
0438 ret = ttm_bo_kmap(bo, i, 1, &new_map);
0439 if (unlikely(ret != 0)) {
0440 DRM_ERROR("Failed mapping new COTable on resize.\n");
0441 goto out_map_new;
0442 }
0443 memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
0444 ttm_kmap_obj_virtual(&old_map, &dummy),
0445 PAGE_SIZE);
0446 ttm_bo_kunmap(&new_map);
0447 ttm_bo_kunmap(&old_map);
0448 }
0449
0450
0451 ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
0452 if (unlikely(ret != 0)) {
0453 DRM_ERROR("Failed validating new COTable backup buffer.\n");
0454 goto out_wait;
0455 }
0456
0457 vmw_resource_mob_detach(res);
0458 res->backup = buf;
0459 res->backup_size = new_size;
0460 vcotbl->size_read_back = cur_size_read_back;
0461
0462
0463
0464
0465
0466 ret = vmw_cotable_unscrub(res);
0467 if (ret) {
0468 DRM_ERROR("Failed switching COTable backup buffer.\n");
0469 res->backup = old_buf;
0470 res->backup_size = old_size;
0471 vcotbl->size_read_back = old_size_read_back;
0472 vmw_resource_mob_attach(res);
0473 goto out_wait;
0474 }
0475
0476 vmw_resource_mob_attach(res);
0477
0478 vmw_bo_unreference(&old_buf);
0479 res->id = vcotbl->type;
0480
0481 ret = dma_resv_reserve_fences(bo->base.resv, 1);
0482 if (unlikely(ret))
0483 goto out_wait;
0484
0485
0486 ttm_bo_unpin(bo);
0487
0488 return 0;
0489
0490 out_map_new:
0491 ttm_bo_kunmap(&old_map);
0492 out_wait:
0493 ttm_bo_unpin(bo);
0494 ttm_bo_unreserve(bo);
0495 vmw_bo_unreference(&buf);
0496
0497 return ret;
0498 }
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512 static int vmw_cotable_create(struct vmw_resource *res)
0513 {
0514 struct vmw_cotable *vcotbl = vmw_cotable(res);
0515 size_t new_size = res->backup_size;
0516 size_t needed_size;
0517 int ret;
0518
0519
0520 needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
0521 while (needed_size > new_size)
0522 new_size *= 2;
0523
0524 if (likely(new_size <= res->backup_size)) {
0525 if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
0526 ret = vmw_cotable_unscrub(res);
0527 if (ret)
0528 return ret;
0529 }
0530 res->id = vcotbl->type;
0531 return 0;
0532 }
0533
0534 return vmw_cotable_resize(res, new_size);
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544 static void vmw_hw_cotable_destroy(struct vmw_resource *res)
0545 {
0546 (void) vmw_cotable_destroy(res);
0547 }
0548
0549
0550
0551
0552
0553
0554 static void vmw_cotable_free(struct vmw_resource *res)
0555 {
0556 kfree(res);
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
0568 struct vmw_resource *ctx,
0569 u32 type)
0570 {
0571 struct vmw_cotable *vcotbl;
0572 int ret;
0573 u32 num_entries;
0574
0575 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
0576 if (unlikely(!vcotbl)) {
0577 ret = -ENOMEM;
0578 goto out_no_alloc;
0579 }
0580
0581 ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
0582 vmw_cotable_free, &vmw_cotable_func);
0583 if (unlikely(ret != 0))
0584 goto out_no_init;
0585
0586 INIT_LIST_HEAD(&vcotbl->resource_list);
0587 vcotbl->res.id = type;
0588 vcotbl->res.backup_size = PAGE_SIZE;
0589 num_entries = PAGE_SIZE / co_info[type].size;
0590 if (num_entries < co_info[type].min_initial_entries) {
0591 vcotbl->res.backup_size = co_info[type].min_initial_entries *
0592 co_info[type].size;
0593 vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
0594 }
0595
0596 vcotbl->scrubbed = true;
0597 vcotbl->seen_entries = -1;
0598 vcotbl->type = type;
0599 vcotbl->ctx = ctx;
0600
0601 vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
0602
0603 return &vcotbl->res;
0604
0605 out_no_init:
0606 kfree(vcotbl);
0607 out_no_alloc:
0608 return ERR_PTR(ret);
0609 }
0610
0611
0612
0613
0614
0615
0616
0617 int vmw_cotable_notify(struct vmw_resource *res, int id)
0618 {
0619 struct vmw_cotable *vcotbl = vmw_cotable(res);
0620
0621 if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
0622 DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
0623 (unsigned) vcotbl->type, id);
0624 return -EINVAL;
0625 }
0626
0627 if (vcotbl->seen_entries < id) {
0628
0629 res->id = -1;
0630 vcotbl->seen_entries = id;
0631 }
0632
0633 return 0;
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643 void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
0644 {
0645 struct vmw_cotable *vcotbl =
0646 container_of(res, struct vmw_cotable, res);
0647
0648 list_add_tail(head, &vcotbl->resource_list);
0649 }