Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /**************************************************************************
0003  *
0004  * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
0005  * All Rights Reserved.
0006  *
0007  * Permission is hereby granted, free of charge, to any person obtaining a
0008  * copy of this software and associated documentation files (the
0009  * "Software"), to deal in the Software without restriction, including
0010  * without limitation the rights to use, copy, modify, merge, publish,
0011  * distribute, sub license, and/or sell copies of the Software, and to
0012  * permit persons to whom the Software is furnished to do so, subject to
0013  * the following conditions:
0014  *
0015  * The above copyright notice and this permission notice (including the
0016  * next paragraph) shall be included in all copies or substantial portions
0017  * of the Software.
0018  *
0019  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0020  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0021  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0022  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0023  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0024  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0025  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0026  *
0027  **************************************************************************/
0028 #include <linux/slab.h>
0029 #include "vmwgfx_validation.h"
0030 #include "vmwgfx_drv.h"
0031 
0032 
0033 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
0034 
0035 /**
0036  * struct vmw_validation_bo_node - Buffer object validation metadata.
0037  * @base: Metadata used for TTM reservation- and validation.
0038  * @hash: A hash entry used for the duplicate detection hash table.
0039  * @coherent_count: If switching backup buffers, number of new coherent
0040  * resources that will have this buffer as a backup buffer.
0041  * @as_mob: Validate as mob.
0042  * @cpu_blit: Validate for cpu blit access.
0043  *
0044  * Bit fields are used since these structures are allocated and freed in
0045  * large numbers and space conservation is desired.
0046  */
0047 struct vmw_validation_bo_node {
0048     struct ttm_validate_buffer base;
0049     struct vmwgfx_hash_item hash;
0050     unsigned int coherent_count;
0051     u32 as_mob : 1;
0052     u32 cpu_blit : 1;
0053 };
0054 /**
0055  * struct vmw_validation_res_node - Resource validation metadata.
0056  * @head: List head for the resource validation list.
0057  * @hash: A hash entry used for the duplicate detection hash table.
0058  * @res: Reference counted resource pointer.
0059  * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
0060  * to a resource.
0061  * @new_backup_offset: Offset into the new backup mob for resources that can
0062  * share MOBs.
0063  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
0064  * the command stream provides a mob bind operation.
0065  * @switching_backup: The validation process is switching backup MOB.
0066  * @first_usage: True iff the resource has been seen only once in the current
0067  * validation batch.
0068  * @reserved: Whether the resource is currently reserved by this process.
0069  * @dirty_set: Change dirty status of the resource.
0070  * @dirty: Dirty information VMW_RES_DIRTY_XX.
0071  * @private: Optionally additional memory for caller-private data.
0072  *
0073  * Bit fields are used since these structures are allocated and freed in
0074  * large numbers and space conservation is desired.
0075  */
0076 struct vmw_validation_res_node {
0077     struct list_head head;
0078     struct vmwgfx_hash_item hash;
0079     struct vmw_resource *res;
0080     struct vmw_buffer_object *new_backup;
0081     unsigned long new_backup_offset;
0082     u32 no_buffer_needed : 1;
0083     u32 switching_backup : 1;
0084     u32 first_usage : 1;
0085     u32 reserved : 1;
0086     u32 dirty : 1;
0087     u32 dirty_set : 1;
0088     unsigned long private[];
0089 };
0090 
0091 /**
0092  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
0093  * context based allocator
0094  * @ctx: The validation context
0095  * @size: The number of bytes to allocated.
0096  *
0097  * The memory allocated may not exceed PAGE_SIZE, and the returned
0098  * address is aligned to sizeof(long). All memory allocated this way is
0099  * reclaimed after validation when calling any of the exported functions:
0100  * vmw_validation_unref_lists()
0101  * vmw_validation_revert()
0102  * vmw_validation_done()
0103  *
0104  * Return: Pointer to the allocated memory on success. NULL on failure.
0105  */
0106 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
0107                    unsigned int size)
0108 {
0109     void *addr;
0110 
0111     size = vmw_validation_align(size);
0112     if (size > PAGE_SIZE)
0113         return NULL;
0114 
0115     if (ctx->mem_size_left < size) {
0116         struct page *page;
0117 
0118         if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
0119             ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
0120             ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
0121         }
0122 
0123         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
0124         if (!page)
0125             return NULL;
0126 
0127         if (ctx->vm)
0128             ctx->vm_size_left -= PAGE_SIZE;
0129 
0130         list_add_tail(&page->lru, &ctx->page_list);
0131         ctx->page_address = page_address(page);
0132         ctx->mem_size_left = PAGE_SIZE;
0133     }
0134 
0135     addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
0136     ctx->mem_size_left -= size;
0137 
0138     return addr;
0139 }
0140 
0141 /**
0142  * vmw_validation_mem_free - Free all memory allocated using
0143  * vmw_validation_mem_alloc()
0144  * @ctx: The validation context
0145  *
0146  * All memory previously allocated for this context using
0147  * vmw_validation_mem_alloc() is freed.
0148  */
0149 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
0150 {
0151     struct page *entry, *next;
0152 
0153     list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
0154         list_del_init(&entry->lru);
0155         __free_page(entry);
0156     }
0157 
0158     ctx->mem_size_left = 0;
0159     if (ctx->vm && ctx->total_mem) {
0160         ctx->total_mem = 0;
0161         ctx->vm_size_left = 0;
0162     }
0163 }
0164 
0165 /**
0166  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
0167  * validation context's lists.
0168  * @ctx: The validation context to search.
0169  * @vbo: The buffer object to search for.
0170  *
0171  * Return: Pointer to the struct vmw_validation_bo_node referencing the
0172  * duplicate, or NULL if none found.
0173  */
0174 static struct vmw_validation_bo_node *
0175 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
0176                struct vmw_buffer_object *vbo)
0177 {
0178     struct  vmw_validation_bo_node *bo_node = NULL;
0179 
0180     if (!ctx->merge_dups)
0181         return NULL;
0182 
0183     if (ctx->ht) {
0184         struct vmwgfx_hash_item *hash;
0185 
0186         if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
0187             bo_node = container_of(hash, typeof(*bo_node), hash);
0188     } else {
0189         struct  vmw_validation_bo_node *entry;
0190 
0191         list_for_each_entry(entry, &ctx->bo_list, base.head) {
0192             if (entry->base.bo == &vbo->base) {
0193                 bo_node = entry;
0194                 break;
0195             }
0196         }
0197     }
0198 
0199     return bo_node;
0200 }
0201 
0202 /**
0203  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
0204  * validation context's lists.
0205  * @ctx: The validation context to search.
0206  * @res: Reference counted resource pointer.
0207  *
0208  * Return: Pointer to the struct vmw_validation_bo_node referencing the
0209  * duplicate, or NULL if none found.
0210  */
0211 static struct vmw_validation_res_node *
0212 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
0213                 struct vmw_resource *res)
0214 {
0215     struct  vmw_validation_res_node *res_node = NULL;
0216 
0217     if (!ctx->merge_dups)
0218         return NULL;
0219 
0220     if (ctx->ht) {
0221         struct vmwgfx_hash_item *hash;
0222 
0223         if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
0224             res_node = container_of(hash, typeof(*res_node), hash);
0225     } else {
0226         struct  vmw_validation_res_node *entry;
0227 
0228         list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
0229             if (entry->res == res) {
0230                 res_node = entry;
0231                 goto out;
0232             }
0233         }
0234 
0235         list_for_each_entry(entry, &ctx->resource_list, head) {
0236             if (entry->res == res) {
0237                 res_node = entry;
0238                 break;
0239             }
0240         }
0241 
0242     }
0243 out:
0244     return res_node;
0245 }
0246 
0247 /**
0248  * vmw_validation_add_bo - Add a buffer object to the validation context.
0249  * @ctx: The validation context.
0250  * @vbo: The buffer object.
0251  * @as_mob: Validate as mob, otherwise suitable for GMR operations.
0252  * @cpu_blit: Validate in a page-mappable location.
0253  *
0254  * Return: Zero on success, negative error code otherwise.
0255  */
0256 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
0257               struct vmw_buffer_object *vbo,
0258               bool as_mob,
0259               bool cpu_blit)
0260 {
0261     struct vmw_validation_bo_node *bo_node;
0262 
0263     bo_node = vmw_validation_find_bo_dup(ctx, vbo);
0264     if (bo_node) {
0265         if (bo_node->as_mob != as_mob ||
0266             bo_node->cpu_blit != cpu_blit) {
0267             DRM_ERROR("Inconsistent buffer usage.\n");
0268             return -EINVAL;
0269         }
0270     } else {
0271         struct ttm_validate_buffer *val_buf;
0272         int ret;
0273 
0274         bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
0275         if (!bo_node)
0276             return -ENOMEM;
0277 
0278         if (ctx->ht) {
0279             bo_node->hash.key = (unsigned long) vbo;
0280             ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
0281             if (ret) {
0282                 DRM_ERROR("Failed to initialize a buffer "
0283                       "validation entry.\n");
0284                 return ret;
0285             }
0286         }
0287         val_buf = &bo_node->base;
0288         val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
0289         if (!val_buf->bo)
0290             return -ESRCH;
0291         val_buf->num_shared = 0;
0292         list_add_tail(&val_buf->head, &ctx->bo_list);
0293         bo_node->as_mob = as_mob;
0294         bo_node->cpu_blit = cpu_blit;
0295     }
0296 
0297     return 0;
0298 }
0299 
0300 /**
0301  * vmw_validation_add_resource - Add a resource to the validation context.
0302  * @ctx: The validation context.
0303  * @res: The resource.
0304  * @priv_size: Size of private, additional metadata.
0305  * @dirty: Whether to change dirty status.
0306  * @p_node: Output pointer of additional metadata address.
0307  * @first_usage: Whether this was the first time this resource was seen.
0308  *
0309  * Return: Zero on success, negative error code otherwise.
0310  */
0311 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
0312                 struct vmw_resource *res,
0313                 size_t priv_size,
0314                 u32 dirty,
0315                 void **p_node,
0316                 bool *first_usage)
0317 {
0318     struct vmw_validation_res_node *node;
0319     int ret;
0320 
0321     node = vmw_validation_find_res_dup(ctx, res);
0322     if (node) {
0323         node->first_usage = 0;
0324         goto out_fill;
0325     }
0326 
0327     node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
0328     if (!node) {
0329         VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
0330         return -ENOMEM;
0331     }
0332 
0333     if (ctx->ht) {
0334         node->hash.key = (unsigned long) res;
0335         ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
0336         if (ret) {
0337             DRM_ERROR("Failed to initialize a resource validation "
0338                   "entry.\n");
0339             return ret;
0340         }
0341     }
0342     node->res = vmw_resource_reference_unless_doomed(res);
0343     if (!node->res)
0344         return -ESRCH;
0345 
0346     node->first_usage = 1;
0347     if (!res->dev_priv->has_mob) {
0348         list_add_tail(&node->head, &ctx->resource_list);
0349     } else {
0350         switch (vmw_res_type(res)) {
0351         case vmw_res_context:
0352         case vmw_res_dx_context:
0353             list_add(&node->head, &ctx->resource_ctx_list);
0354             break;
0355         case vmw_res_cotable:
0356             list_add_tail(&node->head, &ctx->resource_ctx_list);
0357             break;
0358         default:
0359             list_add_tail(&node->head, &ctx->resource_list);
0360             break;
0361         }
0362     }
0363 
0364 out_fill:
0365     if (dirty) {
0366         node->dirty_set = 1;
0367         /* Overwriting previous information here is intentional! */
0368         node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
0369     }
0370     if (first_usage)
0371         *first_usage = node->first_usage;
0372     if (p_node)
0373         *p_node = &node->private;
0374 
0375     return 0;
0376 }
0377 
0378 /**
0379  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
0380  * validation.
0381  * @ctx: The validation context.
0382  * @val_private: The additional meta-data pointer returned when the
0383  * resource was registered with the validation context. Used to identify
0384  * the resource.
0385  * @dirty: Dirty information VMW_RES_DIRTY_XX
0386  */
0387 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
0388                   void *val_private, u32 dirty)
0389 {
0390     struct vmw_validation_res_node *val;
0391 
0392     if (!dirty)
0393         return;
0394 
0395     val = container_of(val_private, typeof(*val), private);
0396     val->dirty_set = 1;
0397     /* Overwriting previous information here is intentional! */
0398     val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
0399 }
0400 
0401 /**
0402  * vmw_validation_res_switch_backup - Register a backup MOB switch during
0403  * validation.
0404  * @ctx: The validation context.
0405  * @val_private: The additional meta-data pointer returned when the
0406  * resource was registered with the validation context. Used to identify
0407  * the resource.
0408  * @vbo: The new backup buffer object MOB. This buffer object needs to have
0409  * already been registered with the validation context.
0410  * @backup_offset: Offset into the new backup MOB.
0411  */
0412 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
0413                       void *val_private,
0414                       struct vmw_buffer_object *vbo,
0415                       unsigned long backup_offset)
0416 {
0417     struct vmw_validation_res_node *val;
0418 
0419     val = container_of(val_private, typeof(*val), private);
0420 
0421     val->switching_backup = 1;
0422     if (val->first_usage)
0423         val->no_buffer_needed = 1;
0424 
0425     val->new_backup = vbo;
0426     val->new_backup_offset = backup_offset;
0427 }
0428 
0429 /**
0430  * vmw_validation_res_reserve - Reserve all resources registered with this
0431  * validation context.
0432  * @ctx: The validation context.
0433  * @intr: Use interruptible waits when possible.
0434  *
0435  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
0436  * code on failure.
0437  */
0438 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
0439                    bool intr)
0440 {
0441     struct vmw_validation_res_node *val;
0442     int ret = 0;
0443 
0444     list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
0445 
0446     list_for_each_entry(val, &ctx->resource_list, head) {
0447         struct vmw_resource *res = val->res;
0448 
0449         ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
0450         if (ret)
0451             goto out_unreserve;
0452 
0453         val->reserved = 1;
0454         if (res->backup) {
0455             struct vmw_buffer_object *vbo = res->backup;
0456 
0457             ret = vmw_validation_add_bo
0458                 (ctx, vbo, vmw_resource_needs_backup(res),
0459                  false);
0460             if (ret)
0461                 goto out_unreserve;
0462         }
0463 
0464         if (val->switching_backup && val->new_backup &&
0465             res->coherent) {
0466             struct vmw_validation_bo_node *bo_node =
0467                 vmw_validation_find_bo_dup(ctx,
0468                                val->new_backup);
0469 
0470             if (WARN_ON(!bo_node)) {
0471                 ret = -EINVAL;
0472                 goto out_unreserve;
0473             }
0474             bo_node->coherent_count++;
0475         }
0476     }
0477 
0478     return 0;
0479 
0480 out_unreserve:
0481     vmw_validation_res_unreserve(ctx, true);
0482     return ret;
0483 }
0484 
0485 /**
0486  * vmw_validation_res_unreserve - Unreserve all reserved resources
0487  * registered with this validation context.
0488  * @ctx: The validation context.
0489  * @backoff: Whether this is a backoff- of a commit-type operation. This
0490  * is used to determine whether to switch backup MOBs or not.
0491  */
0492 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
0493                  bool backoff)
0494 {
0495     struct vmw_validation_res_node *val;
0496 
0497     list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
0498     if (backoff)
0499         list_for_each_entry(val, &ctx->resource_list, head) {
0500             if (val->reserved)
0501                 vmw_resource_unreserve(val->res,
0502                                false, false, false,
0503                                NULL, 0);
0504         }
0505     else
0506         list_for_each_entry(val, &ctx->resource_list, head) {
0507             if (val->reserved)
0508                 vmw_resource_unreserve(val->res,
0509                                val->dirty_set,
0510                                val->dirty,
0511                                val->switching_backup,
0512                                val->new_backup,
0513                                val->new_backup_offset);
0514         }
0515 }
0516 
0517 /**
0518  * vmw_validation_bo_validate_single - Validate a single buffer object.
0519  * @bo: The TTM buffer object base.
0520  * @interruptible: Whether to perform waits interruptible if possible.
0521  * @validate_as_mob: Whether to validate in MOB memory.
0522  *
0523  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
0524  * code on failure.
0525  */
0526 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
0527                       bool interruptible,
0528                       bool validate_as_mob)
0529 {
0530     struct vmw_buffer_object *vbo =
0531         container_of(bo, struct vmw_buffer_object, base);
0532     struct ttm_operation_ctx ctx = {
0533         .interruptible = interruptible,
0534         .no_wait_gpu = false
0535     };
0536     int ret;
0537 
0538     if (atomic_read(&vbo->cpu_writers))
0539         return -EBUSY;
0540 
0541     if (vbo->base.pin_count > 0)
0542         return 0;
0543 
0544     if (validate_as_mob)
0545         return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
0546 
0547     /**
0548      * Put BO in VRAM if there is space, otherwise as a GMR.
0549      * If there is no space in VRAM and GMR ids are all used up,
0550      * start evicting GMRs to make room. If the DMA buffer can't be
0551      * used as a GMR, this will return -ENOMEM.
0552      */
0553 
0554     ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
0555     if (ret == 0 || ret == -ERESTARTSYS)
0556         return ret;
0557 
0558     /**
0559      * If that failed, try VRAM again, this time evicting
0560      * previous contents.
0561      */
0562 
0563     ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
0564     return ret;
0565 }
0566 
0567 /**
0568  * vmw_validation_bo_validate - Validate all buffer objects registered with
0569  * the validation context.
0570  * @ctx: The validation context.
0571  * @intr: Whether to perform waits interruptible if possible.
0572  *
0573  * Return: Zero on success, -ERESTARTSYS if interrupted,
0574  * negative error code on failure.
0575  */
0576 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
0577 {
0578     struct vmw_validation_bo_node *entry;
0579     int ret;
0580 
0581     list_for_each_entry(entry, &ctx->bo_list, base.head) {
0582         struct vmw_buffer_object *vbo =
0583             container_of(entry->base.bo, typeof(*vbo), base);
0584 
0585         if (entry->cpu_blit) {
0586             struct ttm_operation_ctx ttm_ctx = {
0587                 .interruptible = intr,
0588                 .no_wait_gpu = false
0589             };
0590 
0591             ret = ttm_bo_validate(entry->base.bo,
0592                           &vmw_nonfixed_placement, &ttm_ctx);
0593         } else {
0594             ret = vmw_validation_bo_validate_single
0595             (entry->base.bo, intr, entry->as_mob);
0596         }
0597         if (ret)
0598             return ret;
0599 
0600         /*
0601          * Rather than having the resource code allocating the bo
0602          * dirty tracker in resource_unreserve() where we can't fail,
0603          * Do it here when validating the buffer object.
0604          */
0605         if (entry->coherent_count) {
0606             unsigned int coherent_count = entry->coherent_count;
0607 
0608             while (coherent_count) {
0609                 ret = vmw_bo_dirty_add(vbo);
0610                 if (ret)
0611                     return ret;
0612 
0613                 coherent_count--;
0614             }
0615             entry->coherent_count -= coherent_count;
0616         }
0617 
0618         if (vbo->dirty)
0619             vmw_bo_dirty_scan(vbo);
0620     }
0621     return 0;
0622 }
0623 
0624 /**
0625  * vmw_validation_res_validate - Validate all resources registered with the
0626  * validation context.
0627  * @ctx: The validation context.
0628  * @intr: Whether to perform waits interruptible if possible.
0629  *
0630  * Before this function is called, all resource backup buffers must have
0631  * been validated.
0632  *
0633  * Return: Zero on success, -ERESTARTSYS if interrupted,
0634  * negative error code on failure.
0635  */
0636 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
0637 {
0638     struct vmw_validation_res_node *val;
0639     int ret;
0640 
0641     list_for_each_entry(val, &ctx->resource_list, head) {
0642         struct vmw_resource *res = val->res;
0643         struct vmw_buffer_object *backup = res->backup;
0644 
0645         ret = vmw_resource_validate(res, intr, val->dirty_set &&
0646                         val->dirty);
0647         if (ret) {
0648             if (ret != -ERESTARTSYS)
0649                 DRM_ERROR("Failed to validate resource.\n");
0650             return ret;
0651         }
0652 
0653         /* Check if the resource switched backup buffer */
0654         if (backup && res->backup && (backup != res->backup)) {
0655             struct vmw_buffer_object *vbo = res->backup;
0656 
0657             ret = vmw_validation_add_bo
0658                 (ctx, vbo, vmw_resource_needs_backup(res),
0659                  false);
0660             if (ret)
0661                 return ret;
0662         }
0663     }
0664     return 0;
0665 }
0666 
0667 /**
0668  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
0669  * and unregister it from this validation context.
0670  * @ctx: The validation context.
0671  *
0672  * The hash table used for duplicate finding is an expensive resource and
0673  * may be protected by mutexes that may cause deadlocks during resource
0674  * unreferencing if held. After resource- and buffer object registering,
0675  * there is no longer any use for this hash table, so allow freeing it
0676  * either to shorten any mutex locking time, or before resources- and
0677  * buffer objects are freed during validation context cleanup.
0678  */
0679 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
0680 {
0681     struct vmw_validation_bo_node *entry;
0682     struct vmw_validation_res_node *val;
0683 
0684     if (!ctx->ht)
0685         return;
0686 
0687     list_for_each_entry(entry, &ctx->bo_list, base.head)
0688         (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
0689 
0690     list_for_each_entry(val, &ctx->resource_list, head)
0691         (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
0692 
0693     list_for_each_entry(val, &ctx->resource_ctx_list, head)
0694         (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
0695 
0696     ctx->ht = NULL;
0697 }
0698 
0699 /**
0700  * vmw_validation_unref_lists - Unregister previously registered buffer
0701  * object and resources.
0702  * @ctx: The validation context.
0703  *
0704  * Note that this function may cause buffer object- and resource destructors
0705  * to be invoked.
0706  */
0707 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
0708 {
0709     struct vmw_validation_bo_node *entry;
0710     struct vmw_validation_res_node *val;
0711 
0712     list_for_each_entry(entry, &ctx->bo_list, base.head) {
0713         ttm_bo_put(entry->base.bo);
0714         entry->base.bo = NULL;
0715     }
0716 
0717     list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
0718     list_for_each_entry(val, &ctx->resource_list, head)
0719         vmw_resource_unreference(&val->res);
0720 
0721     /*
0722      * No need to detach each list entry since they are all freed with
0723      * vmw_validation_free_mem. Just make the inaccessible.
0724      */
0725     INIT_LIST_HEAD(&ctx->bo_list);
0726     INIT_LIST_HEAD(&ctx->resource_list);
0727 
0728     vmw_validation_mem_free(ctx);
0729 }
0730 
0731 /**
0732  * vmw_validation_prepare - Prepare a validation context for command
0733  * submission.
0734  * @ctx: The validation context.
0735  * @mutex: The mutex used to protect resource reservation.
0736  * @intr: Whether to perform waits interruptible if possible.
0737  *
0738  * Note that the single reservation mutex @mutex is an unfortunate
0739  * construct. Ideally resource reservation should be moved to per-resource
0740  * ww_mutexes.
0741  * If this functions doesn't return Zero to indicate success, all resources
0742  * are left unreserved but still referenced.
0743  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
0744  * on error.
0745  */
0746 int vmw_validation_prepare(struct vmw_validation_context *ctx,
0747                struct mutex *mutex,
0748                bool intr)
0749 {
0750     int ret = 0;
0751 
0752     if (mutex) {
0753         if (intr)
0754             ret = mutex_lock_interruptible(mutex);
0755         else
0756             mutex_lock(mutex);
0757         if (ret)
0758             return -ERESTARTSYS;
0759     }
0760 
0761     ctx->res_mutex = mutex;
0762     ret = vmw_validation_res_reserve(ctx, intr);
0763     if (ret)
0764         goto out_no_res_reserve;
0765 
0766     ret = vmw_validation_bo_reserve(ctx, intr);
0767     if (ret)
0768         goto out_no_bo_reserve;
0769 
0770     ret = vmw_validation_bo_validate(ctx, intr);
0771     if (ret)
0772         goto out_no_validate;
0773 
0774     ret = vmw_validation_res_validate(ctx, intr);
0775     if (ret)
0776         goto out_no_validate;
0777 
0778     return 0;
0779 
0780 out_no_validate:
0781     vmw_validation_bo_backoff(ctx);
0782 out_no_bo_reserve:
0783     vmw_validation_res_unreserve(ctx, true);
0784 out_no_res_reserve:
0785     if (mutex)
0786         mutex_unlock(mutex);
0787 
0788     return ret;
0789 }
0790 
0791 /**
0792  * vmw_validation_revert - Revert validation actions if command submission
0793  * failed.
0794  *
0795  * @ctx: The validation context.
0796  *
0797  * The caller still needs to unref resources after a call to this function.
0798  */
0799 void vmw_validation_revert(struct vmw_validation_context *ctx)
0800 {
0801     vmw_validation_bo_backoff(ctx);
0802     vmw_validation_res_unreserve(ctx, true);
0803     if (ctx->res_mutex)
0804         mutex_unlock(ctx->res_mutex);
0805     vmw_validation_unref_lists(ctx);
0806 }
0807 
0808 /**
0809  * vmw_validation_done - Commit validation actions after command submission
0810  * success.
0811  * @ctx: The validation context.
0812  * @fence: Fence with which to fence all buffer objects taking part in the
0813  * command submission.
0814  *
0815  * The caller does NOT need to unref resources after a call to this function.
0816  */
0817 void vmw_validation_done(struct vmw_validation_context *ctx,
0818              struct vmw_fence_obj *fence)
0819 {
0820     vmw_validation_bo_fence(ctx, fence);
0821     vmw_validation_res_unreserve(ctx, false);
0822     if (ctx->res_mutex)
0823         mutex_unlock(ctx->res_mutex);
0824     vmw_validation_unref_lists(ctx);
0825 }
0826 
0827 /**
0828  * vmw_validation_preload_bo - Preload the validation memory allocator for a
0829  * call to vmw_validation_add_bo().
0830  * @ctx: Pointer to the validation context.
0831  *
0832  * Iff this function returns successfully, the next call to
0833  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
0834  * but voids the guarantee.
0835  *
0836  * Returns: Zero if successful, %-EINVAL otherwise.
0837  */
0838 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
0839 {
0840     unsigned int size = sizeof(struct vmw_validation_bo_node);
0841 
0842     if (!vmw_validation_mem_alloc(ctx, size))
0843         return -ENOMEM;
0844 
0845     ctx->mem_size_left += size;
0846     return 0;
0847 }
0848 
0849 /**
0850  * vmw_validation_preload_res - Preload the validation memory allocator for a
0851  * call to vmw_validation_add_res().
0852  * @ctx: Pointer to the validation context.
0853  * @size: Size of the validation node extra data. See below.
0854  *
0855  * Iff this function returns successfully, the next call to
0856  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
0857  * sleep. An error is not fatal but voids the guarantee.
0858  *
0859  * Returns: Zero if successful, %-EINVAL otherwise.
0860  */
0861 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
0862                    unsigned int size)
0863 {
0864     size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
0865                     size) +
0866         vmw_validation_align(sizeof(struct vmw_validation_bo_node));
0867     if (!vmw_validation_mem_alloc(ctx, size))
0868         return -ENOMEM;
0869 
0870     ctx->mem_size_left += size;
0871     return 0;
0872 }
0873 
0874 /**
0875  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
0876  * validation context
0877  * @ctx: The validation context
0878  *
0879  * This function unreserves the buffer objects previously reserved using
0880  * vmw_validation_bo_reserve. It's typically used as part of an error path
0881  */
0882 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
0883 {
0884     struct vmw_validation_bo_node *entry;
0885 
0886     /*
0887      * Switching coherent resource backup buffers failed.
0888      * Release corresponding buffer object dirty trackers.
0889      */
0890     list_for_each_entry(entry, &ctx->bo_list, base.head) {
0891         if (entry->coherent_count) {
0892             unsigned int coherent_count = entry->coherent_count;
0893             struct vmw_buffer_object *vbo =
0894                 container_of(entry->base.bo, typeof(*vbo),
0895                          base);
0896 
0897             while (coherent_count--)
0898                 vmw_bo_dirty_release(vbo);
0899         }
0900     }
0901 
0902     ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
0903 }