0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/slab.h>
0029 #include "vmwgfx_validation.h"
0030 #include "vmwgfx_drv.h"
0031
0032
0033 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 struct vmw_validation_bo_node {
0048 struct ttm_validate_buffer base;
0049 struct vmwgfx_hash_item hash;
0050 unsigned int coherent_count;
0051 u32 as_mob : 1;
0052 u32 cpu_blit : 1;
0053 };
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 struct vmw_validation_res_node {
0077 struct list_head head;
0078 struct vmwgfx_hash_item hash;
0079 struct vmw_resource *res;
0080 struct vmw_buffer_object *new_backup;
0081 unsigned long new_backup_offset;
0082 u32 no_buffer_needed : 1;
0083 u32 switching_backup : 1;
0084 u32 first_usage : 1;
0085 u32 reserved : 1;
0086 u32 dirty : 1;
0087 u32 dirty_set : 1;
0088 unsigned long private[];
0089 };
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
0107 unsigned int size)
0108 {
0109 void *addr;
0110
0111 size = vmw_validation_align(size);
0112 if (size > PAGE_SIZE)
0113 return NULL;
0114
0115 if (ctx->mem_size_left < size) {
0116 struct page *page;
0117
0118 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
0119 ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
0120 ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
0121 }
0122
0123 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
0124 if (!page)
0125 return NULL;
0126
0127 if (ctx->vm)
0128 ctx->vm_size_left -= PAGE_SIZE;
0129
0130 list_add_tail(&page->lru, &ctx->page_list);
0131 ctx->page_address = page_address(page);
0132 ctx->mem_size_left = PAGE_SIZE;
0133 }
0134
0135 addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
0136 ctx->mem_size_left -= size;
0137
0138 return addr;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
0150 {
0151 struct page *entry, *next;
0152
0153 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
0154 list_del_init(&entry->lru);
0155 __free_page(entry);
0156 }
0157
0158 ctx->mem_size_left = 0;
0159 if (ctx->vm && ctx->total_mem) {
0160 ctx->total_mem = 0;
0161 ctx->vm_size_left = 0;
0162 }
0163 }
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 static struct vmw_validation_bo_node *
0175 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
0176 struct vmw_buffer_object *vbo)
0177 {
0178 struct vmw_validation_bo_node *bo_node = NULL;
0179
0180 if (!ctx->merge_dups)
0181 return NULL;
0182
0183 if (ctx->ht) {
0184 struct vmwgfx_hash_item *hash;
0185
0186 if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
0187 bo_node = container_of(hash, typeof(*bo_node), hash);
0188 } else {
0189 struct vmw_validation_bo_node *entry;
0190
0191 list_for_each_entry(entry, &ctx->bo_list, base.head) {
0192 if (entry->base.bo == &vbo->base) {
0193 bo_node = entry;
0194 break;
0195 }
0196 }
0197 }
0198
0199 return bo_node;
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 static struct vmw_validation_res_node *
0212 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
0213 struct vmw_resource *res)
0214 {
0215 struct vmw_validation_res_node *res_node = NULL;
0216
0217 if (!ctx->merge_dups)
0218 return NULL;
0219
0220 if (ctx->ht) {
0221 struct vmwgfx_hash_item *hash;
0222
0223 if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
0224 res_node = container_of(hash, typeof(*res_node), hash);
0225 } else {
0226 struct vmw_validation_res_node *entry;
0227
0228 list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
0229 if (entry->res == res) {
0230 res_node = entry;
0231 goto out;
0232 }
0233 }
0234
0235 list_for_each_entry(entry, &ctx->resource_list, head) {
0236 if (entry->res == res) {
0237 res_node = entry;
0238 break;
0239 }
0240 }
0241
0242 }
0243 out:
0244 return res_node;
0245 }
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
0257 struct vmw_buffer_object *vbo,
0258 bool as_mob,
0259 bool cpu_blit)
0260 {
0261 struct vmw_validation_bo_node *bo_node;
0262
0263 bo_node = vmw_validation_find_bo_dup(ctx, vbo);
0264 if (bo_node) {
0265 if (bo_node->as_mob != as_mob ||
0266 bo_node->cpu_blit != cpu_blit) {
0267 DRM_ERROR("Inconsistent buffer usage.\n");
0268 return -EINVAL;
0269 }
0270 } else {
0271 struct ttm_validate_buffer *val_buf;
0272 int ret;
0273
0274 bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
0275 if (!bo_node)
0276 return -ENOMEM;
0277
0278 if (ctx->ht) {
0279 bo_node->hash.key = (unsigned long) vbo;
0280 ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
0281 if (ret) {
0282 DRM_ERROR("Failed to initialize a buffer "
0283 "validation entry.\n");
0284 return ret;
0285 }
0286 }
0287 val_buf = &bo_node->base;
0288 val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
0289 if (!val_buf->bo)
0290 return -ESRCH;
0291 val_buf->num_shared = 0;
0292 list_add_tail(&val_buf->head, &ctx->bo_list);
0293 bo_node->as_mob = as_mob;
0294 bo_node->cpu_blit = cpu_blit;
0295 }
0296
0297 return 0;
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
0312 struct vmw_resource *res,
0313 size_t priv_size,
0314 u32 dirty,
0315 void **p_node,
0316 bool *first_usage)
0317 {
0318 struct vmw_validation_res_node *node;
0319 int ret;
0320
0321 node = vmw_validation_find_res_dup(ctx, res);
0322 if (node) {
0323 node->first_usage = 0;
0324 goto out_fill;
0325 }
0326
0327 node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
0328 if (!node) {
0329 VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
0330 return -ENOMEM;
0331 }
0332
0333 if (ctx->ht) {
0334 node->hash.key = (unsigned long) res;
0335 ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
0336 if (ret) {
0337 DRM_ERROR("Failed to initialize a resource validation "
0338 "entry.\n");
0339 return ret;
0340 }
0341 }
0342 node->res = vmw_resource_reference_unless_doomed(res);
0343 if (!node->res)
0344 return -ESRCH;
0345
0346 node->first_usage = 1;
0347 if (!res->dev_priv->has_mob) {
0348 list_add_tail(&node->head, &ctx->resource_list);
0349 } else {
0350 switch (vmw_res_type(res)) {
0351 case vmw_res_context:
0352 case vmw_res_dx_context:
0353 list_add(&node->head, &ctx->resource_ctx_list);
0354 break;
0355 case vmw_res_cotable:
0356 list_add_tail(&node->head, &ctx->resource_ctx_list);
0357 break;
0358 default:
0359 list_add_tail(&node->head, &ctx->resource_list);
0360 break;
0361 }
0362 }
0363
0364 out_fill:
0365 if (dirty) {
0366 node->dirty_set = 1;
0367
0368 node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
0369 }
0370 if (first_usage)
0371 *first_usage = node->first_usage;
0372 if (p_node)
0373 *p_node = &node->private;
0374
0375 return 0;
0376 }
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
0388 void *val_private, u32 dirty)
0389 {
0390 struct vmw_validation_res_node *val;
0391
0392 if (!dirty)
0393 return;
0394
0395 val = container_of(val_private, typeof(*val), private);
0396 val->dirty_set = 1;
0397
0398 val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
0399 }
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
0413 void *val_private,
0414 struct vmw_buffer_object *vbo,
0415 unsigned long backup_offset)
0416 {
0417 struct vmw_validation_res_node *val;
0418
0419 val = container_of(val_private, typeof(*val), private);
0420
0421 val->switching_backup = 1;
0422 if (val->first_usage)
0423 val->no_buffer_needed = 1;
0424
0425 val->new_backup = vbo;
0426 val->new_backup_offset = backup_offset;
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
0439 bool intr)
0440 {
0441 struct vmw_validation_res_node *val;
0442 int ret = 0;
0443
0444 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
0445
0446 list_for_each_entry(val, &ctx->resource_list, head) {
0447 struct vmw_resource *res = val->res;
0448
0449 ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
0450 if (ret)
0451 goto out_unreserve;
0452
0453 val->reserved = 1;
0454 if (res->backup) {
0455 struct vmw_buffer_object *vbo = res->backup;
0456
0457 ret = vmw_validation_add_bo
0458 (ctx, vbo, vmw_resource_needs_backup(res),
0459 false);
0460 if (ret)
0461 goto out_unreserve;
0462 }
0463
0464 if (val->switching_backup && val->new_backup &&
0465 res->coherent) {
0466 struct vmw_validation_bo_node *bo_node =
0467 vmw_validation_find_bo_dup(ctx,
0468 val->new_backup);
0469
0470 if (WARN_ON(!bo_node)) {
0471 ret = -EINVAL;
0472 goto out_unreserve;
0473 }
0474 bo_node->coherent_count++;
0475 }
0476 }
0477
0478 return 0;
0479
0480 out_unreserve:
0481 vmw_validation_res_unreserve(ctx, true);
0482 return ret;
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
0493 bool backoff)
0494 {
0495 struct vmw_validation_res_node *val;
0496
0497 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
0498 if (backoff)
0499 list_for_each_entry(val, &ctx->resource_list, head) {
0500 if (val->reserved)
0501 vmw_resource_unreserve(val->res,
0502 false, false, false,
0503 NULL, 0);
0504 }
0505 else
0506 list_for_each_entry(val, &ctx->resource_list, head) {
0507 if (val->reserved)
0508 vmw_resource_unreserve(val->res,
0509 val->dirty_set,
0510 val->dirty,
0511 val->switching_backup,
0512 val->new_backup,
0513 val->new_backup_offset);
0514 }
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
0527 bool interruptible,
0528 bool validate_as_mob)
0529 {
0530 struct vmw_buffer_object *vbo =
0531 container_of(bo, struct vmw_buffer_object, base);
0532 struct ttm_operation_ctx ctx = {
0533 .interruptible = interruptible,
0534 .no_wait_gpu = false
0535 };
0536 int ret;
0537
0538 if (atomic_read(&vbo->cpu_writers))
0539 return -EBUSY;
0540
0541 if (vbo->base.pin_count > 0)
0542 return 0;
0543
0544 if (validate_as_mob)
0545 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
0546
0547
0548
0549
0550
0551
0552
0553
0554 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
0555 if (ret == 0 || ret == -ERESTARTSYS)
0556 return ret;
0557
0558
0559
0560
0561
0562
0563 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
0564 return ret;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
0577 {
0578 struct vmw_validation_bo_node *entry;
0579 int ret;
0580
0581 list_for_each_entry(entry, &ctx->bo_list, base.head) {
0582 struct vmw_buffer_object *vbo =
0583 container_of(entry->base.bo, typeof(*vbo), base);
0584
0585 if (entry->cpu_blit) {
0586 struct ttm_operation_ctx ttm_ctx = {
0587 .interruptible = intr,
0588 .no_wait_gpu = false
0589 };
0590
0591 ret = ttm_bo_validate(entry->base.bo,
0592 &vmw_nonfixed_placement, &ttm_ctx);
0593 } else {
0594 ret = vmw_validation_bo_validate_single
0595 (entry->base.bo, intr, entry->as_mob);
0596 }
0597 if (ret)
0598 return ret;
0599
0600
0601
0602
0603
0604
0605 if (entry->coherent_count) {
0606 unsigned int coherent_count = entry->coherent_count;
0607
0608 while (coherent_count) {
0609 ret = vmw_bo_dirty_add(vbo);
0610 if (ret)
0611 return ret;
0612
0613 coherent_count--;
0614 }
0615 entry->coherent_count -= coherent_count;
0616 }
0617
0618 if (vbo->dirty)
0619 vmw_bo_dirty_scan(vbo);
0620 }
0621 return 0;
0622 }
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
0637 {
0638 struct vmw_validation_res_node *val;
0639 int ret;
0640
0641 list_for_each_entry(val, &ctx->resource_list, head) {
0642 struct vmw_resource *res = val->res;
0643 struct vmw_buffer_object *backup = res->backup;
0644
0645 ret = vmw_resource_validate(res, intr, val->dirty_set &&
0646 val->dirty);
0647 if (ret) {
0648 if (ret != -ERESTARTSYS)
0649 DRM_ERROR("Failed to validate resource.\n");
0650 return ret;
0651 }
0652
0653
0654 if (backup && res->backup && (backup != res->backup)) {
0655 struct vmw_buffer_object *vbo = res->backup;
0656
0657 ret = vmw_validation_add_bo
0658 (ctx, vbo, vmw_resource_needs_backup(res),
0659 false);
0660 if (ret)
0661 return ret;
0662 }
0663 }
0664 return 0;
0665 }
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
0680 {
0681 struct vmw_validation_bo_node *entry;
0682 struct vmw_validation_res_node *val;
0683
0684 if (!ctx->ht)
0685 return;
0686
0687 list_for_each_entry(entry, &ctx->bo_list, base.head)
0688 (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
0689
0690 list_for_each_entry(val, &ctx->resource_list, head)
0691 (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
0692
0693 list_for_each_entry(val, &ctx->resource_ctx_list, head)
0694 (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
0695
0696 ctx->ht = NULL;
0697 }
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
0708 {
0709 struct vmw_validation_bo_node *entry;
0710 struct vmw_validation_res_node *val;
0711
0712 list_for_each_entry(entry, &ctx->bo_list, base.head) {
0713 ttm_bo_put(entry->base.bo);
0714 entry->base.bo = NULL;
0715 }
0716
0717 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
0718 list_for_each_entry(val, &ctx->resource_list, head)
0719 vmw_resource_unreference(&val->res);
0720
0721
0722
0723
0724
0725 INIT_LIST_HEAD(&ctx->bo_list);
0726 INIT_LIST_HEAD(&ctx->resource_list);
0727
0728 vmw_validation_mem_free(ctx);
0729 }
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 int vmw_validation_prepare(struct vmw_validation_context *ctx,
0747 struct mutex *mutex,
0748 bool intr)
0749 {
0750 int ret = 0;
0751
0752 if (mutex) {
0753 if (intr)
0754 ret = mutex_lock_interruptible(mutex);
0755 else
0756 mutex_lock(mutex);
0757 if (ret)
0758 return -ERESTARTSYS;
0759 }
0760
0761 ctx->res_mutex = mutex;
0762 ret = vmw_validation_res_reserve(ctx, intr);
0763 if (ret)
0764 goto out_no_res_reserve;
0765
0766 ret = vmw_validation_bo_reserve(ctx, intr);
0767 if (ret)
0768 goto out_no_bo_reserve;
0769
0770 ret = vmw_validation_bo_validate(ctx, intr);
0771 if (ret)
0772 goto out_no_validate;
0773
0774 ret = vmw_validation_res_validate(ctx, intr);
0775 if (ret)
0776 goto out_no_validate;
0777
0778 return 0;
0779
0780 out_no_validate:
0781 vmw_validation_bo_backoff(ctx);
0782 out_no_bo_reserve:
0783 vmw_validation_res_unreserve(ctx, true);
0784 out_no_res_reserve:
0785 if (mutex)
0786 mutex_unlock(mutex);
0787
0788 return ret;
0789 }
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799 void vmw_validation_revert(struct vmw_validation_context *ctx)
0800 {
0801 vmw_validation_bo_backoff(ctx);
0802 vmw_validation_res_unreserve(ctx, true);
0803 if (ctx->res_mutex)
0804 mutex_unlock(ctx->res_mutex);
0805 vmw_validation_unref_lists(ctx);
0806 }
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817 void vmw_validation_done(struct vmw_validation_context *ctx,
0818 struct vmw_fence_obj *fence)
0819 {
0820 vmw_validation_bo_fence(ctx, fence);
0821 vmw_validation_res_unreserve(ctx, false);
0822 if (ctx->res_mutex)
0823 mutex_unlock(ctx->res_mutex);
0824 vmw_validation_unref_lists(ctx);
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
0839 {
0840 unsigned int size = sizeof(struct vmw_validation_bo_node);
0841
0842 if (!vmw_validation_mem_alloc(ctx, size))
0843 return -ENOMEM;
0844
0845 ctx->mem_size_left += size;
0846 return 0;
0847 }
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
0862 unsigned int size)
0863 {
0864 size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
0865 size) +
0866 vmw_validation_align(sizeof(struct vmw_validation_bo_node));
0867 if (!vmw_validation_mem_alloc(ctx, size))
0868 return -ENOMEM;
0869
0870 ctx->mem_size_left += size;
0871 return 0;
0872 }
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
0883 {
0884 struct vmw_validation_bo_node *entry;
0885
0886
0887
0888
0889
0890 list_for_each_entry(entry, &ctx->bo_list, base.head) {
0891 if (entry->coherent_count) {
0892 unsigned int coherent_count = entry->coherent_count;
0893 struct vmw_buffer_object *vbo =
0894 container_of(entry->base.bo, typeof(*vbo),
0895 base);
0896
0897 while (coherent_count--)
0898 vmw_bo_dirty_release(vbo);
0899 }
0900 }
0901
0902 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
0903 }