0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <drm/ttm/ttm_placement.h>
0029
0030 #include "vmwgfx_resource_priv.h"
0031 #include "vmwgfx_binding.h"
0032 #include "vmwgfx_drv.h"
0033
0034 #define VMW_RES_EVICT_ERR_COUNT 10
0035
0036
0037
0038
0039
0040 void vmw_resource_mob_attach(struct vmw_resource *res)
0041 {
0042 struct vmw_buffer_object *backup = res->backup;
0043 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
0044
0045 dma_resv_assert_held(res->backup->base.base.resv);
0046 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
0047 res->func->prio;
0048
0049 while (*new) {
0050 struct vmw_resource *this =
0051 container_of(*new, struct vmw_resource, mob_node);
0052
0053 parent = *new;
0054 new = (res->backup_offset < this->backup_offset) ?
0055 &((*new)->rb_left) : &((*new)->rb_right);
0056 }
0057
0058 rb_link_node(&res->mob_node, parent, new);
0059 rb_insert_color(&res->mob_node, &backup->res_tree);
0060
0061 vmw_bo_prio_add(backup, res->used_prio);
0062 }
0063
0064
0065
0066
0067
0068 void vmw_resource_mob_detach(struct vmw_resource *res)
0069 {
0070 struct vmw_buffer_object *backup = res->backup;
0071
0072 dma_resv_assert_held(backup->base.base.resv);
0073 if (vmw_resource_mob_attached(res)) {
0074 rb_erase(&res->mob_node, &backup->res_tree);
0075 RB_CLEAR_NODE(&res->mob_node);
0076 vmw_bo_prio_del(backup, res->used_prio);
0077 }
0078 }
0079
0080 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
0081 {
0082 kref_get(&res->kref);
0083 return res;
0084 }
0085
0086 struct vmw_resource *
0087 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
0088 {
0089 return kref_get_unless_zero(&res->kref) ? res : NULL;
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099 void vmw_resource_release_id(struct vmw_resource *res)
0100 {
0101 struct vmw_private *dev_priv = res->dev_priv;
0102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
0103
0104 spin_lock(&dev_priv->resource_lock);
0105 if (res->id != -1)
0106 idr_remove(idr, res->id);
0107 res->id = -1;
0108 spin_unlock(&dev_priv->resource_lock);
0109 }
0110
0111 static void vmw_resource_release(struct kref *kref)
0112 {
0113 struct vmw_resource *res =
0114 container_of(kref, struct vmw_resource, kref);
0115 struct vmw_private *dev_priv = res->dev_priv;
0116 int id;
0117 int ret;
0118 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
0119
0120 spin_lock(&dev_priv->resource_lock);
0121 list_del_init(&res->lru_head);
0122 spin_unlock(&dev_priv->resource_lock);
0123 if (res->backup) {
0124 struct ttm_buffer_object *bo = &res->backup->base;
0125
0126 ret = ttm_bo_reserve(bo, false, false, NULL);
0127 BUG_ON(ret);
0128 if (vmw_resource_mob_attached(res) &&
0129 res->func->unbind != NULL) {
0130 struct ttm_validate_buffer val_buf;
0131
0132 val_buf.bo = bo;
0133 val_buf.num_shared = 0;
0134 res->func->unbind(res, false, &val_buf);
0135 }
0136 res->backup_dirty = false;
0137 vmw_resource_mob_detach(res);
0138 if (res->dirty)
0139 res->func->dirty_free(res);
0140 if (res->coherent)
0141 vmw_bo_dirty_release(res->backup);
0142 ttm_bo_unreserve(bo);
0143 vmw_bo_unreference(&res->backup);
0144 }
0145
0146 if (likely(res->hw_destroy != NULL)) {
0147 mutex_lock(&dev_priv->binding_mutex);
0148 vmw_binding_res_list_kill(&res->binding_head);
0149 mutex_unlock(&dev_priv->binding_mutex);
0150 res->hw_destroy(res);
0151 }
0152
0153 id = res->id;
0154 if (res->res_free != NULL)
0155 res->res_free(res);
0156 else
0157 kfree(res);
0158
0159 spin_lock(&dev_priv->resource_lock);
0160 if (id != -1)
0161 idr_remove(idr, id);
0162 spin_unlock(&dev_priv->resource_lock);
0163 }
0164
0165 void vmw_resource_unreference(struct vmw_resource **p_res)
0166 {
0167 struct vmw_resource *res = *p_res;
0168
0169 *p_res = NULL;
0170 kref_put(&res->kref, vmw_resource_release);
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 int vmw_resource_alloc_id(struct vmw_resource *res)
0183 {
0184 struct vmw_private *dev_priv = res->dev_priv;
0185 int ret;
0186 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
0187
0188 BUG_ON(res->id != -1);
0189
0190 idr_preload(GFP_KERNEL);
0191 spin_lock(&dev_priv->resource_lock);
0192
0193 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
0194 if (ret >= 0)
0195 res->id = ret;
0196
0197 spin_unlock(&dev_priv->resource_lock);
0198 idr_preload_end();
0199 return ret < 0 ? ret : 0;
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
0213 bool delay_id,
0214 void (*res_free) (struct vmw_resource *res),
0215 const struct vmw_res_func *func)
0216 {
0217 kref_init(&res->kref);
0218 res->hw_destroy = NULL;
0219 res->res_free = res_free;
0220 res->dev_priv = dev_priv;
0221 res->func = func;
0222 RB_CLEAR_NODE(&res->mob_node);
0223 INIT_LIST_HEAD(&res->lru_head);
0224 INIT_LIST_HEAD(&res->binding_head);
0225 res->id = -1;
0226 res->backup = NULL;
0227 res->backup_offset = 0;
0228 res->backup_dirty = false;
0229 res->res_dirty = false;
0230 res->coherent = false;
0231 res->used_prio = 3;
0232 res->dirty = NULL;
0233 if (delay_id)
0234 return 0;
0235 else
0236 return vmw_resource_alloc_id(res);
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
0255 struct ttm_object_file *tfile,
0256 uint32_t handle,
0257 const struct vmw_user_resource_conv
0258 *converter,
0259 struct vmw_resource **p_res)
0260 {
0261 struct ttm_base_object *base;
0262 struct vmw_resource *res;
0263 int ret = -EINVAL;
0264
0265 base = ttm_base_object_lookup(tfile, handle);
0266 if (unlikely(base == NULL))
0267 return -EINVAL;
0268
0269 if (unlikely(ttm_base_object_type(base) != converter->object_type))
0270 goto out_bad_resource;
0271
0272 res = converter->base_obj_to_res(base);
0273 kref_get(&res->kref);
0274
0275 *p_res = res;
0276 ret = 0;
0277
0278 out_bad_resource:
0279 ttm_base_object_unref(&base);
0280
0281 return ret;
0282 }
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 struct vmw_resource *
0297 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
0298 struct ttm_object_file *tfile,
0299 uint32_t handle,
0300 const struct vmw_user_resource_conv
0301 *converter)
0302 {
0303 struct ttm_base_object *base;
0304
0305 base = ttm_base_object_noref_lookup(tfile, handle);
0306 if (!base)
0307 return ERR_PTR(-ESRCH);
0308
0309 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
0310 ttm_base_object_noref_release();
0311 return ERR_PTR(-EINVAL);
0312 }
0313
0314 return converter->base_obj_to_res(base);
0315 }
0316
0317
0318
0319
0320
0321
0322 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
0323 struct drm_file *filp,
0324 uint32_t handle,
0325 struct vmw_surface **out_surf,
0326 struct vmw_buffer_object **out_buf)
0327 {
0328 struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
0329 struct vmw_resource *res;
0330 int ret;
0331
0332 BUG_ON(*out_surf || *out_buf);
0333
0334 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
0335 user_surface_converter,
0336 &res);
0337 if (!ret) {
0338 *out_surf = vmw_res_to_srf(res);
0339 return 0;
0340 }
0341
0342 *out_surf = NULL;
0343 ret = vmw_user_bo_lookup(filp, handle, out_buf);
0344 return ret;
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354 static int vmw_resource_buf_alloc(struct vmw_resource *res,
0355 bool interruptible)
0356 {
0357 unsigned long size = PFN_ALIGN(res->backup_size);
0358 struct vmw_buffer_object *backup;
0359 int ret;
0360
0361 if (likely(res->backup)) {
0362 BUG_ON(res->backup->base.base.size < size);
0363 return 0;
0364 }
0365
0366 ret = vmw_bo_create(res->dev_priv, res->backup_size,
0367 res->func->backup_placement,
0368 interruptible, false,
0369 &vmw_bo_bo_free, &backup);
0370 if (unlikely(ret != 0))
0371 goto out_no_bo;
0372
0373 res->backup = backup;
0374
0375 out_no_bo:
0376 return ret;
0377 }
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 static int vmw_resource_do_validate(struct vmw_resource *res,
0392 struct ttm_validate_buffer *val_buf,
0393 bool dirtying)
0394 {
0395 int ret = 0;
0396 const struct vmw_res_func *func = res->func;
0397
0398 if (unlikely(res->id == -1)) {
0399 ret = func->create(res);
0400 if (unlikely(ret != 0))
0401 return ret;
0402 }
0403
0404 if (func->bind &&
0405 ((func->needs_backup && !vmw_resource_mob_attached(res) &&
0406 val_buf->bo != NULL) ||
0407 (!func->needs_backup && val_buf->bo != NULL))) {
0408 ret = func->bind(res, val_buf);
0409 if (unlikely(ret != 0))
0410 goto out_bind_failed;
0411 if (func->needs_backup)
0412 vmw_resource_mob_attach(res);
0413 }
0414
0415
0416
0417
0418
0419 if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
0420 !res->coherent) {
0421 if (res->backup->dirty && !res->dirty) {
0422 ret = func->dirty_alloc(res);
0423 if (ret)
0424 return ret;
0425 } else if (!res->backup->dirty && res->dirty) {
0426 func->dirty_free(res);
0427 }
0428 }
0429
0430
0431
0432
0433
0434 if (res->dirty) {
0435 if (dirtying && !res->res_dirty) {
0436 pgoff_t start = res->backup_offset >> PAGE_SHIFT;
0437 pgoff_t end = __KERNEL_DIV_ROUND_UP
0438 (res->backup_offset + res->backup_size,
0439 PAGE_SIZE);
0440
0441 vmw_bo_dirty_unmap(res->backup, start, end);
0442 }
0443
0444 vmw_bo_dirty_transfer_to_res(res);
0445 return func->dirty_sync(res);
0446 }
0447
0448 return 0;
0449
0450 out_bind_failed:
0451 func->destroy(res);
0452
0453 return ret;
0454 }
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 void vmw_resource_unreserve(struct vmw_resource *res,
0472 bool dirty_set,
0473 bool dirty,
0474 bool switch_backup,
0475 struct vmw_buffer_object *new_backup,
0476 unsigned long new_backup_offset)
0477 {
0478 struct vmw_private *dev_priv = res->dev_priv;
0479
0480 if (!list_empty(&res->lru_head))
0481 return;
0482
0483 if (switch_backup && new_backup != res->backup) {
0484 if (res->backup) {
0485 vmw_resource_mob_detach(res);
0486 if (res->coherent)
0487 vmw_bo_dirty_release(res->backup);
0488 vmw_bo_unreference(&res->backup);
0489 }
0490
0491 if (new_backup) {
0492 res->backup = vmw_bo_reference(new_backup);
0493
0494
0495
0496
0497
0498 WARN_ON(res->coherent && !new_backup->dirty);
0499
0500 vmw_resource_mob_attach(res);
0501 } else {
0502 res->backup = NULL;
0503 }
0504 } else if (switch_backup && res->coherent) {
0505 vmw_bo_dirty_release(res->backup);
0506 }
0507
0508 if (switch_backup)
0509 res->backup_offset = new_backup_offset;
0510
0511 if (dirty_set)
0512 res->res_dirty = dirty;
0513
0514 if (!res->func->may_evict || res->id == -1 || res->pin_count)
0515 return;
0516
0517 spin_lock(&dev_priv->resource_lock);
0518 list_add_tail(&res->lru_head,
0519 &res->dev_priv->res_lru[res->func->res_type]);
0520 spin_unlock(&dev_priv->resource_lock);
0521 }
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 static int
0536 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
0537 struct vmw_resource *res,
0538 bool interruptible,
0539 struct ttm_validate_buffer *val_buf)
0540 {
0541 struct ttm_operation_ctx ctx = { true, false };
0542 struct list_head val_list;
0543 bool backup_dirty = false;
0544 int ret;
0545
0546 if (unlikely(res->backup == NULL)) {
0547 ret = vmw_resource_buf_alloc(res, interruptible);
0548 if (unlikely(ret != 0))
0549 return ret;
0550 }
0551
0552 INIT_LIST_HEAD(&val_list);
0553 ttm_bo_get(&res->backup->base);
0554 val_buf->bo = &res->backup->base;
0555 val_buf->num_shared = 0;
0556 list_add_tail(&val_buf->head, &val_list);
0557 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
0558 if (unlikely(ret != 0))
0559 goto out_no_reserve;
0560
0561 if (res->func->needs_backup && !vmw_resource_mob_attached(res))
0562 return 0;
0563
0564 backup_dirty = res->backup_dirty;
0565 ret = ttm_bo_validate(&res->backup->base,
0566 res->func->backup_placement,
0567 &ctx);
0568
0569 if (unlikely(ret != 0))
0570 goto out_no_validate;
0571
0572 return 0;
0573
0574 out_no_validate:
0575 ttm_eu_backoff_reservation(ticket, &val_list);
0576 out_no_reserve:
0577 ttm_bo_put(val_buf->bo);
0578 val_buf->bo = NULL;
0579 if (backup_dirty)
0580 vmw_bo_unreference(&res->backup);
0581
0582 return ret;
0583 }
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
0596 bool no_backup)
0597 {
0598 struct vmw_private *dev_priv = res->dev_priv;
0599 int ret;
0600
0601 spin_lock(&dev_priv->resource_lock);
0602 list_del_init(&res->lru_head);
0603 spin_unlock(&dev_priv->resource_lock);
0604
0605 if (res->func->needs_backup && res->backup == NULL &&
0606 !no_backup) {
0607 ret = vmw_resource_buf_alloc(res, interruptible);
0608 if (unlikely(ret != 0)) {
0609 DRM_ERROR("Failed to allocate a backup buffer "
0610 "of size %lu. bytes\n",
0611 (unsigned long) res->backup_size);
0612 return ret;
0613 }
0614 }
0615
0616 return 0;
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626 static void
0627 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
0628 struct ttm_validate_buffer *val_buf)
0629 {
0630 struct list_head val_list;
0631
0632 if (likely(val_buf->bo == NULL))
0633 return;
0634
0635 INIT_LIST_HEAD(&val_list);
0636 list_add_tail(&val_buf->head, &val_list);
0637 ttm_eu_backoff_reservation(ticket, &val_list);
0638 ttm_bo_put(val_buf->bo);
0639 val_buf->bo = NULL;
0640 }
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
0651 struct vmw_resource *res, bool interruptible)
0652 {
0653 struct ttm_validate_buffer val_buf;
0654 const struct vmw_res_func *func = res->func;
0655 int ret;
0656
0657 BUG_ON(!func->may_evict);
0658
0659 val_buf.bo = NULL;
0660 val_buf.num_shared = 0;
0661 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
0662 if (unlikely(ret != 0))
0663 return ret;
0664
0665 if (unlikely(func->unbind != NULL &&
0666 (!func->needs_backup || vmw_resource_mob_attached(res)))) {
0667 ret = func->unbind(res, res->res_dirty, &val_buf);
0668 if (unlikely(ret != 0))
0669 goto out_no_unbind;
0670 vmw_resource_mob_detach(res);
0671 }
0672 ret = func->destroy(res);
0673 res->backup_dirty = true;
0674 res->res_dirty = false;
0675 out_no_unbind:
0676 vmw_resource_backoff_reservation(ticket, &val_buf);
0677
0678 return ret;
0679 }
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 int vmw_resource_validate(struct vmw_resource *res, bool intr,
0698 bool dirtying)
0699 {
0700 int ret;
0701 struct vmw_resource *evict_res;
0702 struct vmw_private *dev_priv = res->dev_priv;
0703 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
0704 struct ttm_validate_buffer val_buf;
0705 unsigned err_count = 0;
0706
0707 if (!res->func->create)
0708 return 0;
0709
0710 val_buf.bo = NULL;
0711 val_buf.num_shared = 0;
0712 if (res->backup)
0713 val_buf.bo = &res->backup->base;
0714 do {
0715 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
0716 if (likely(ret != -EBUSY))
0717 break;
0718
0719 spin_lock(&dev_priv->resource_lock);
0720 if (list_empty(lru_list) || !res->func->may_evict) {
0721 DRM_ERROR("Out of device device resources "
0722 "for %s.\n", res->func->type_name);
0723 ret = -EBUSY;
0724 spin_unlock(&dev_priv->resource_lock);
0725 break;
0726 }
0727
0728 evict_res = vmw_resource_reference
0729 (list_first_entry(lru_list, struct vmw_resource,
0730 lru_head));
0731 list_del_init(&evict_res->lru_head);
0732
0733 spin_unlock(&dev_priv->resource_lock);
0734
0735
0736 ret = vmw_resource_do_evict(NULL, evict_res, intr);
0737 if (unlikely(ret != 0)) {
0738 spin_lock(&dev_priv->resource_lock);
0739 list_add_tail(&evict_res->lru_head, lru_list);
0740 spin_unlock(&dev_priv->resource_lock);
0741 if (ret == -ERESTARTSYS ||
0742 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
0743 vmw_resource_unreference(&evict_res);
0744 goto out_no_validate;
0745 }
0746 }
0747
0748 vmw_resource_unreference(&evict_res);
0749 } while (1);
0750
0751 if (unlikely(ret != 0))
0752 goto out_no_validate;
0753 else if (!res->func->needs_backup && res->backup) {
0754 WARN_ON_ONCE(vmw_resource_mob_attached(res));
0755 vmw_bo_unreference(&res->backup);
0756 }
0757
0758 return 0;
0759
0760 out_no_validate:
0761 return ret;
0762 }
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
0777 {
0778 struct ttm_validate_buffer val_buf = {
0779 .bo = &vbo->base,
0780 .num_shared = 0
0781 };
0782
0783 dma_resv_assert_held(vbo->base.base.resv);
0784 while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
0785 struct rb_node *node = vbo->res_tree.rb_node;
0786 struct vmw_resource *res =
0787 container_of(node, struct vmw_resource, mob_node);
0788
0789 if (!WARN_ON_ONCE(!res->func->unbind))
0790 (void) res->func->unbind(res, res->res_dirty, &val_buf);
0791
0792 res->backup_dirty = true;
0793 res->res_dirty = false;
0794 vmw_resource_mob_detach(res);
0795 }
0796
0797 (void) ttm_bo_wait(&vbo->base, false, false);
0798 }
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
0810 {
0811 struct vmw_resource *dx_query_ctx;
0812 struct vmw_private *dev_priv;
0813 struct {
0814 SVGA3dCmdHeader header;
0815 SVGA3dCmdDXReadbackAllQuery body;
0816 } *cmd;
0817
0818
0819
0820 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
0821 return 0;
0822
0823 dx_query_ctx = dx_query_mob->dx_query_ctx;
0824 dev_priv = dx_query_ctx->dev_priv;
0825
0826 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
0827 if (unlikely(cmd == NULL))
0828 return -ENOMEM;
0829
0830 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
0831 cmd->header.size = sizeof(cmd->body);
0832 cmd->body.cid = dx_query_ctx->id;
0833
0834 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0835
0836
0837 dx_query_mob->dx_query_ctx = NULL;
0838
0839 return 0;
0840 }
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854 void vmw_query_move_notify(struct ttm_buffer_object *bo,
0855 struct ttm_resource *old_mem,
0856 struct ttm_resource *new_mem)
0857 {
0858 struct vmw_buffer_object *dx_query_mob;
0859 struct ttm_device *bdev = bo->bdev;
0860 struct vmw_private *dev_priv;
0861
0862 dev_priv = container_of(bdev, struct vmw_private, bdev);
0863
0864 mutex_lock(&dev_priv->binding_mutex);
0865
0866
0867 if (new_mem->mem_type == TTM_PL_SYSTEM &&
0868 old_mem->mem_type == VMW_PL_MOB) {
0869 struct vmw_fence_obj *fence;
0870
0871 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
0872 if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
0873 mutex_unlock(&dev_priv->binding_mutex);
0874 return;
0875 }
0876
0877 (void) vmw_query_readback_all(dx_query_mob);
0878 mutex_unlock(&dev_priv->binding_mutex);
0879
0880
0881 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
0882 vmw_bo_fence_single(bo, fence);
0883
0884 if (fence != NULL)
0885 vmw_fence_obj_unreference(&fence);
0886
0887 (void) ttm_bo_wait(bo, false, false);
0888 } else
0889 mutex_unlock(&dev_priv->binding_mutex);
0890 }
0891
0892
0893
0894
0895
0896
0897 bool vmw_resource_needs_backup(const struct vmw_resource *res)
0898 {
0899 return res->func->needs_backup;
0900 }
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
0912 enum vmw_res_type type)
0913 {
0914 struct list_head *lru_list = &dev_priv->res_lru[type];
0915 struct vmw_resource *evict_res;
0916 unsigned err_count = 0;
0917 int ret;
0918 struct ww_acquire_ctx ticket;
0919
0920 do {
0921 spin_lock(&dev_priv->resource_lock);
0922
0923 if (list_empty(lru_list))
0924 goto out_unlock;
0925
0926 evict_res = vmw_resource_reference(
0927 list_first_entry(lru_list, struct vmw_resource,
0928 lru_head));
0929 list_del_init(&evict_res->lru_head);
0930 spin_unlock(&dev_priv->resource_lock);
0931
0932
0933 ret = vmw_resource_do_evict(&ticket, evict_res, false);
0934 if (unlikely(ret != 0)) {
0935 spin_lock(&dev_priv->resource_lock);
0936 list_add_tail(&evict_res->lru_head, lru_list);
0937 spin_unlock(&dev_priv->resource_lock);
0938 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
0939 vmw_resource_unreference(&evict_res);
0940 return;
0941 }
0942 }
0943
0944 vmw_resource_unreference(&evict_res);
0945 } while (1);
0946
0947 out_unlock:
0948 spin_unlock(&dev_priv->resource_lock);
0949 }
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961 void vmw_resource_evict_all(struct vmw_private *dev_priv)
0962 {
0963 enum vmw_res_type type;
0964
0965 mutex_lock(&dev_priv->cmdbuf_mutex);
0966
0967 for (type = 0; type < vmw_res_max; ++type)
0968 vmw_resource_evict_type(dev_priv, type);
0969
0970 mutex_unlock(&dev_priv->cmdbuf_mutex);
0971 }
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
0984 {
0985 struct ttm_operation_ctx ctx = { interruptible, false };
0986 struct vmw_private *dev_priv = res->dev_priv;
0987 int ret;
0988
0989 mutex_lock(&dev_priv->cmdbuf_mutex);
0990 ret = vmw_resource_reserve(res, interruptible, false);
0991 if (ret)
0992 goto out_no_reserve;
0993
0994 if (res->pin_count == 0) {
0995 struct vmw_buffer_object *vbo = NULL;
0996
0997 if (res->backup) {
0998 vbo = res->backup;
0999
1000 ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1001 if (ret)
1002 goto out_no_validate;
1003 if (!vbo->base.pin_count) {
1004 ret = ttm_bo_validate
1005 (&vbo->base,
1006 res->func->backup_placement,
1007 &ctx);
1008 if (ret) {
1009 ttm_bo_unreserve(&vbo->base);
1010 goto out_no_validate;
1011 }
1012 }
1013
1014
1015 vmw_bo_pin_reserved(vbo, true);
1016 }
1017 ret = vmw_resource_validate(res, interruptible, true);
1018 if (vbo)
1019 ttm_bo_unreserve(&vbo->base);
1020 if (ret)
1021 goto out_no_validate;
1022 }
1023 res->pin_count++;
1024
1025 out_no_validate:
1026 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1027 out_no_reserve:
1028 mutex_unlock(&dev_priv->cmdbuf_mutex);
1029
1030 return ret;
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 void vmw_resource_unpin(struct vmw_resource *res)
1042 {
1043 struct vmw_private *dev_priv = res->dev_priv;
1044 int ret;
1045
1046 mutex_lock(&dev_priv->cmdbuf_mutex);
1047
1048 ret = vmw_resource_reserve(res, false, true);
1049 WARN_ON(ret);
1050
1051 WARN_ON(res->pin_count == 0);
1052 if (--res->pin_count == 0 && res->backup) {
1053 struct vmw_buffer_object *vbo = res->backup;
1054
1055 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1056 vmw_bo_pin_reserved(vbo, false);
1057 ttm_bo_unreserve(&vbo->base);
1058 }
1059
1060 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1061
1062 mutex_unlock(&dev_priv->cmdbuf_mutex);
1063 }
1064
1065
1066
1067
1068
1069
1070 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1071 {
1072 return res->func->res_type;
1073 }
1074
1075
1076
1077
1078
1079
1080
1081
1082 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1083 pgoff_t end)
1084 {
1085 if (res->dirty)
1086 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1087 end << PAGE_SHIFT);
1088 }
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1099 pgoff_t end, pgoff_t *num_prefault)
1100 {
1101 struct rb_node *cur = vbo->res_tree.rb_node;
1102 struct vmw_resource *found = NULL;
1103 unsigned long res_start = start << PAGE_SHIFT;
1104 unsigned long res_end = end << PAGE_SHIFT;
1105 unsigned long last_cleaned = 0;
1106
1107
1108
1109
1110
1111 while (cur) {
1112 struct vmw_resource *cur_res =
1113 container_of(cur, struct vmw_resource, mob_node);
1114
1115 if (cur_res->backup_offset >= res_end) {
1116 cur = cur->rb_left;
1117 } else if (cur_res->backup_offset + cur_res->backup_size <=
1118 res_start) {
1119 cur = cur->rb_right;
1120 } else {
1121 found = cur_res;
1122 cur = cur->rb_left;
1123
1124 }
1125 }
1126
1127
1128
1129
1130
1131 while (found) {
1132 if (found->res_dirty) {
1133 int ret;
1134
1135 if (!found->func->clean)
1136 return -EINVAL;
1137
1138 ret = found->func->clean(found);
1139 if (ret)
1140 return ret;
1141
1142 found->res_dirty = false;
1143 }
1144 last_cleaned = found->backup_offset + found->backup_size;
1145 cur = rb_next(&found->mob_node);
1146 if (!cur)
1147 break;
1148
1149 found = container_of(cur, struct vmw_resource, mob_node);
1150 if (found->backup_offset >= res_end)
1151 break;
1152 }
1153
1154
1155
1156
1157 *num_prefault = 1;
1158 if (last_cleaned > res_start) {
1159 struct ttm_buffer_object *bo = &vbo->base;
1160
1161 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1162 PAGE_SIZE);
1163 vmw_bo_fence_single(bo, NULL);
1164 }
1165
1166 return 0;
1167 }