Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /**************************************************************************
0003  *
0004  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the
0008  * "Software"), to deal in the Software without restriction, including
0009  * without limitation the rights to use, copy, modify, merge, publish,
0010  * distribute, sub license, and/or sell copies of the Software, and to
0011  * permit persons to whom the Software is furnished to do so, subject to
0012  * the following conditions:
0013  *
0014  * The above copyright notice and this permission notice (including the
0015  * next paragraph) shall be included in all copies or substantial portions
0016  * of the Software.
0017  *
0018  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0019  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0020  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0021  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0022  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0023  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0024  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0025  *
0026  **************************************************************************/
0027 
0028 #include <drm/ttm/ttm_placement.h>
0029 
0030 #include "vmwgfx_resource_priv.h"
0031 #include "vmwgfx_binding.h"
0032 #include "vmwgfx_drv.h"
0033 
0034 #define VMW_RES_EVICT_ERR_COUNT 10
0035 
0036 /**
0037  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
0038  * @res: The resource
0039  */
0040 void vmw_resource_mob_attach(struct vmw_resource *res)
0041 {
0042     struct vmw_buffer_object *backup = res->backup;
0043     struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
0044 
0045     dma_resv_assert_held(res->backup->base.base.resv);
0046     res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
0047         res->func->prio;
0048 
0049     while (*new) {
0050         struct vmw_resource *this =
0051             container_of(*new, struct vmw_resource, mob_node);
0052 
0053         parent = *new;
0054         new = (res->backup_offset < this->backup_offset) ?
0055             &((*new)->rb_left) : &((*new)->rb_right);
0056     }
0057 
0058     rb_link_node(&res->mob_node, parent, new);
0059     rb_insert_color(&res->mob_node, &backup->res_tree);
0060 
0061     vmw_bo_prio_add(backup, res->used_prio);
0062 }
0063 
0064 /**
0065  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
0066  * @res: The resource
0067  */
0068 void vmw_resource_mob_detach(struct vmw_resource *res)
0069 {
0070     struct vmw_buffer_object *backup = res->backup;
0071 
0072     dma_resv_assert_held(backup->base.base.resv);
0073     if (vmw_resource_mob_attached(res)) {
0074         rb_erase(&res->mob_node, &backup->res_tree);
0075         RB_CLEAR_NODE(&res->mob_node);
0076         vmw_bo_prio_del(backup, res->used_prio);
0077     }
0078 }
0079 
0080 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
0081 {
0082     kref_get(&res->kref);
0083     return res;
0084 }
0085 
0086 struct vmw_resource *
0087 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
0088 {
0089     return kref_get_unless_zero(&res->kref) ? res : NULL;
0090 }
0091 
0092 /**
0093  * vmw_resource_release_id - release a resource id to the id manager.
0094  *
0095  * @res: Pointer to the resource.
0096  *
0097  * Release the resource id to the resource id manager and set it to -1
0098  */
0099 void vmw_resource_release_id(struct vmw_resource *res)
0100 {
0101     struct vmw_private *dev_priv = res->dev_priv;
0102     struct idr *idr = &dev_priv->res_idr[res->func->res_type];
0103 
0104     spin_lock(&dev_priv->resource_lock);
0105     if (res->id != -1)
0106         idr_remove(idr, res->id);
0107     res->id = -1;
0108     spin_unlock(&dev_priv->resource_lock);
0109 }
0110 
0111 static void vmw_resource_release(struct kref *kref)
0112 {
0113     struct vmw_resource *res =
0114         container_of(kref, struct vmw_resource, kref);
0115     struct vmw_private *dev_priv = res->dev_priv;
0116     int id;
0117     int ret;
0118     struct idr *idr = &dev_priv->res_idr[res->func->res_type];
0119 
0120     spin_lock(&dev_priv->resource_lock);
0121     list_del_init(&res->lru_head);
0122     spin_unlock(&dev_priv->resource_lock);
0123     if (res->backup) {
0124         struct ttm_buffer_object *bo = &res->backup->base;
0125 
0126         ret = ttm_bo_reserve(bo, false, false, NULL);
0127         BUG_ON(ret);
0128         if (vmw_resource_mob_attached(res) &&
0129             res->func->unbind != NULL) {
0130             struct ttm_validate_buffer val_buf;
0131 
0132             val_buf.bo = bo;
0133             val_buf.num_shared = 0;
0134             res->func->unbind(res, false, &val_buf);
0135         }
0136         res->backup_dirty = false;
0137         vmw_resource_mob_detach(res);
0138         if (res->dirty)
0139             res->func->dirty_free(res);
0140         if (res->coherent)
0141             vmw_bo_dirty_release(res->backup);
0142         ttm_bo_unreserve(bo);
0143         vmw_bo_unreference(&res->backup);
0144     }
0145 
0146     if (likely(res->hw_destroy != NULL)) {
0147         mutex_lock(&dev_priv->binding_mutex);
0148         vmw_binding_res_list_kill(&res->binding_head);
0149         mutex_unlock(&dev_priv->binding_mutex);
0150         res->hw_destroy(res);
0151     }
0152 
0153     id = res->id;
0154     if (res->res_free != NULL)
0155         res->res_free(res);
0156     else
0157         kfree(res);
0158 
0159     spin_lock(&dev_priv->resource_lock);
0160     if (id != -1)
0161         idr_remove(idr, id);
0162     spin_unlock(&dev_priv->resource_lock);
0163 }
0164 
0165 void vmw_resource_unreference(struct vmw_resource **p_res)
0166 {
0167     struct vmw_resource *res = *p_res;
0168 
0169     *p_res = NULL;
0170     kref_put(&res->kref, vmw_resource_release);
0171 }
0172 
0173 
0174 /**
0175  * vmw_resource_alloc_id - release a resource id to the id manager.
0176  *
0177  * @res: Pointer to the resource.
0178  *
0179  * Allocate the lowest free resource from the resource manager, and set
0180  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
0181  */
0182 int vmw_resource_alloc_id(struct vmw_resource *res)
0183 {
0184     struct vmw_private *dev_priv = res->dev_priv;
0185     int ret;
0186     struct idr *idr = &dev_priv->res_idr[res->func->res_type];
0187 
0188     BUG_ON(res->id != -1);
0189 
0190     idr_preload(GFP_KERNEL);
0191     spin_lock(&dev_priv->resource_lock);
0192 
0193     ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
0194     if (ret >= 0)
0195         res->id = ret;
0196 
0197     spin_unlock(&dev_priv->resource_lock);
0198     idr_preload_end();
0199     return ret < 0 ? ret : 0;
0200 }
0201 
0202 /**
0203  * vmw_resource_init - initialize a struct vmw_resource
0204  *
0205  * @dev_priv:       Pointer to a device private struct.
0206  * @res:            The struct vmw_resource to initialize.
0207  * @delay_id:       Boolean whether to defer device id allocation until
0208  *                  the first validation.
0209  * @res_free:       Resource destructor.
0210  * @func:           Resource function table.
0211  */
0212 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
0213               bool delay_id,
0214               void (*res_free) (struct vmw_resource *res),
0215               const struct vmw_res_func *func)
0216 {
0217     kref_init(&res->kref);
0218     res->hw_destroy = NULL;
0219     res->res_free = res_free;
0220     res->dev_priv = dev_priv;
0221     res->func = func;
0222     RB_CLEAR_NODE(&res->mob_node);
0223     INIT_LIST_HEAD(&res->lru_head);
0224     INIT_LIST_HEAD(&res->binding_head);
0225     res->id = -1;
0226     res->backup = NULL;
0227     res->backup_offset = 0;
0228     res->backup_dirty = false;
0229     res->res_dirty = false;
0230     res->coherent = false;
0231     res->used_prio = 3;
0232     res->dirty = NULL;
0233     if (delay_id)
0234         return 0;
0235     else
0236         return vmw_resource_alloc_id(res);
0237 }
0238 
0239 
0240 /**
0241  * vmw_user_resource_lookup_handle - lookup a struct resource from a
0242  * TTM user-space handle and perform basic type checks
0243  *
0244  * @dev_priv:     Pointer to a device private struct
0245  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
0246  * @handle:       The TTM user-space handle
0247  * @converter:    Pointer to an object describing the resource type
0248  * @p_res:        On successful return the location pointed to will contain
0249  *                a pointer to a refcounted struct vmw_resource.
0250  *
0251  * If the handle can't be found or is associated with an incorrect resource
0252  * type, -EINVAL will be returned.
0253  */
0254 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
0255                     struct ttm_object_file *tfile,
0256                     uint32_t handle,
0257                     const struct vmw_user_resource_conv
0258                     *converter,
0259                     struct vmw_resource **p_res)
0260 {
0261     struct ttm_base_object *base;
0262     struct vmw_resource *res;
0263     int ret = -EINVAL;
0264 
0265     base = ttm_base_object_lookup(tfile, handle);
0266     if (unlikely(base == NULL))
0267         return -EINVAL;
0268 
0269     if (unlikely(ttm_base_object_type(base) != converter->object_type))
0270         goto out_bad_resource;
0271 
0272     res = converter->base_obj_to_res(base);
0273     kref_get(&res->kref);
0274 
0275     *p_res = res;
0276     ret = 0;
0277 
0278 out_bad_resource:
0279     ttm_base_object_unref(&base);
0280 
0281     return ret;
0282 }
0283 
0284 /**
0285  * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
0286  * TTM user-space handle and perform basic type checks
0287  *
0288  * @dev_priv:     Pointer to a device private struct
0289  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
0290  * @handle:       The TTM user-space handle
0291  * @converter:    Pointer to an object describing the resource type
0292  *
0293  * If the handle can't be found or is associated with an incorrect resource
0294  * type, -EINVAL will be returned.
0295  */
0296 struct vmw_resource *
0297 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
0298                       struct ttm_object_file *tfile,
0299                       uint32_t handle,
0300                       const struct vmw_user_resource_conv
0301                       *converter)
0302 {
0303     struct ttm_base_object *base;
0304 
0305     base = ttm_base_object_noref_lookup(tfile, handle);
0306     if (!base)
0307         return ERR_PTR(-ESRCH);
0308 
0309     if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
0310         ttm_base_object_noref_release();
0311         return ERR_PTR(-EINVAL);
0312     }
0313 
0314     return converter->base_obj_to_res(base);
0315 }
0316 
0317 /*
0318  * Helper function that looks either a surface or bo.
0319  *
0320  * The pointer this pointed at by out_surf and out_buf needs to be null.
0321  */
0322 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
0323                struct drm_file *filp,
0324                uint32_t handle,
0325                struct vmw_surface **out_surf,
0326                struct vmw_buffer_object **out_buf)
0327 {
0328     struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
0329     struct vmw_resource *res;
0330     int ret;
0331 
0332     BUG_ON(*out_surf || *out_buf);
0333 
0334     ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
0335                           user_surface_converter,
0336                           &res);
0337     if (!ret) {
0338         *out_surf = vmw_res_to_srf(res);
0339         return 0;
0340     }
0341 
0342     *out_surf = NULL;
0343     ret = vmw_user_bo_lookup(filp, handle, out_buf);
0344     return ret;
0345 }
0346 
0347 /**
0348  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
0349  *
0350  * @res:            The resource for which to allocate a backup buffer.
0351  * @interruptible:  Whether any sleeps during allocation should be
0352  *                  performed while interruptible.
0353  */
0354 static int vmw_resource_buf_alloc(struct vmw_resource *res,
0355                   bool interruptible)
0356 {
0357     unsigned long size = PFN_ALIGN(res->backup_size);
0358     struct vmw_buffer_object *backup;
0359     int ret;
0360 
0361     if (likely(res->backup)) {
0362         BUG_ON(res->backup->base.base.size < size);
0363         return 0;
0364     }
0365 
0366     ret = vmw_bo_create(res->dev_priv, res->backup_size,
0367                 res->func->backup_placement,
0368                 interruptible, false,
0369                 &vmw_bo_bo_free, &backup);
0370     if (unlikely(ret != 0))
0371         goto out_no_bo;
0372 
0373     res->backup = backup;
0374 
0375 out_no_bo:
0376     return ret;
0377 }
0378 
0379 /**
0380  * vmw_resource_do_validate - Make a resource up-to-date and visible
0381  *                            to the device.
0382  *
0383  * @res:            The resource to make visible to the device.
0384  * @val_buf:        Information about a buffer possibly
0385  *                  containing backup data if a bind operation is needed.
0386  * @dirtying:       Transfer dirty regions.
0387  *
0388  * On hardware resource shortage, this function returns -EBUSY and
0389  * should be retried once resources have been freed up.
0390  */
0391 static int vmw_resource_do_validate(struct vmw_resource *res,
0392                     struct ttm_validate_buffer *val_buf,
0393                     bool dirtying)
0394 {
0395     int ret = 0;
0396     const struct vmw_res_func *func = res->func;
0397 
0398     if (unlikely(res->id == -1)) {
0399         ret = func->create(res);
0400         if (unlikely(ret != 0))
0401             return ret;
0402     }
0403 
0404     if (func->bind &&
0405         ((func->needs_backup && !vmw_resource_mob_attached(res) &&
0406           val_buf->bo != NULL) ||
0407          (!func->needs_backup && val_buf->bo != NULL))) {
0408         ret = func->bind(res, val_buf);
0409         if (unlikely(ret != 0))
0410             goto out_bind_failed;
0411         if (func->needs_backup)
0412             vmw_resource_mob_attach(res);
0413     }
0414 
0415     /*
0416      * Handle the case where the backup mob is marked coherent but
0417      * the resource isn't.
0418      */
0419     if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
0420         !res->coherent) {
0421         if (res->backup->dirty && !res->dirty) {
0422             ret = func->dirty_alloc(res);
0423             if (ret)
0424                 return ret;
0425         } else if (!res->backup->dirty && res->dirty) {
0426             func->dirty_free(res);
0427         }
0428     }
0429 
0430     /*
0431      * Transfer the dirty regions to the resource and update
0432      * the resource.
0433      */
0434     if (res->dirty) {
0435         if (dirtying && !res->res_dirty) {
0436             pgoff_t start = res->backup_offset >> PAGE_SHIFT;
0437             pgoff_t end = __KERNEL_DIV_ROUND_UP
0438                 (res->backup_offset + res->backup_size,
0439                  PAGE_SIZE);
0440 
0441             vmw_bo_dirty_unmap(res->backup, start, end);
0442         }
0443 
0444         vmw_bo_dirty_transfer_to_res(res);
0445         return func->dirty_sync(res);
0446     }
0447 
0448     return 0;
0449 
0450 out_bind_failed:
0451     func->destroy(res);
0452 
0453     return ret;
0454 }
0455 
0456 /**
0457  * vmw_resource_unreserve - Unreserve a resource previously reserved for
0458  * command submission.
0459  *
0460  * @res:               Pointer to the struct vmw_resource to unreserve.
0461  * @dirty_set:         Change dirty status of the resource.
0462  * @dirty:             When changing dirty status indicates the new status.
0463  * @switch_backup:     Backup buffer has been switched.
0464  * @new_backup:        Pointer to new backup buffer if command submission
0465  *                     switched. May be NULL.
0466  * @new_backup_offset: New backup offset if @switch_backup is true.
0467  *
0468  * Currently unreserving a resource means putting it back on the device's
0469  * resource lru list, so that it can be evicted if necessary.
0470  */
0471 void vmw_resource_unreserve(struct vmw_resource *res,
0472                 bool dirty_set,
0473                 bool dirty,
0474                 bool switch_backup,
0475                 struct vmw_buffer_object *new_backup,
0476                 unsigned long new_backup_offset)
0477 {
0478     struct vmw_private *dev_priv = res->dev_priv;
0479 
0480     if (!list_empty(&res->lru_head))
0481         return;
0482 
0483     if (switch_backup && new_backup != res->backup) {
0484         if (res->backup) {
0485             vmw_resource_mob_detach(res);
0486             if (res->coherent)
0487                 vmw_bo_dirty_release(res->backup);
0488             vmw_bo_unreference(&res->backup);
0489         }
0490 
0491         if (new_backup) {
0492             res->backup = vmw_bo_reference(new_backup);
0493 
0494             /*
0495              * The validation code should already have added a
0496              * dirty tracker here.
0497              */
0498             WARN_ON(res->coherent && !new_backup->dirty);
0499 
0500             vmw_resource_mob_attach(res);
0501         } else {
0502             res->backup = NULL;
0503         }
0504     } else if (switch_backup && res->coherent) {
0505         vmw_bo_dirty_release(res->backup);
0506     }
0507 
0508     if (switch_backup)
0509         res->backup_offset = new_backup_offset;
0510 
0511     if (dirty_set)
0512         res->res_dirty = dirty;
0513 
0514     if (!res->func->may_evict || res->id == -1 || res->pin_count)
0515         return;
0516 
0517     spin_lock(&dev_priv->resource_lock);
0518     list_add_tail(&res->lru_head,
0519               &res->dev_priv->res_lru[res->func->res_type]);
0520     spin_unlock(&dev_priv->resource_lock);
0521 }
0522 
0523 /**
0524  * vmw_resource_check_buffer - Check whether a backup buffer is needed
0525  *                             for a resource and in that case, allocate
0526  *                             one, reserve and validate it.
0527  *
0528  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
0529  * @res:            The resource for which to allocate a backup buffer.
0530  * @interruptible:  Whether any sleeps during allocation should be
0531  *                  performed while interruptible.
0532  * @val_buf:        On successful return contains data about the
0533  *                  reserved and validated backup buffer.
0534  */
0535 static int
0536 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
0537               struct vmw_resource *res,
0538               bool interruptible,
0539               struct ttm_validate_buffer *val_buf)
0540 {
0541     struct ttm_operation_ctx ctx = { true, false };
0542     struct list_head val_list;
0543     bool backup_dirty = false;
0544     int ret;
0545 
0546     if (unlikely(res->backup == NULL)) {
0547         ret = vmw_resource_buf_alloc(res, interruptible);
0548         if (unlikely(ret != 0))
0549             return ret;
0550     }
0551 
0552     INIT_LIST_HEAD(&val_list);
0553     ttm_bo_get(&res->backup->base);
0554     val_buf->bo = &res->backup->base;
0555     val_buf->num_shared = 0;
0556     list_add_tail(&val_buf->head, &val_list);
0557     ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
0558     if (unlikely(ret != 0))
0559         goto out_no_reserve;
0560 
0561     if (res->func->needs_backup && !vmw_resource_mob_attached(res))
0562         return 0;
0563 
0564     backup_dirty = res->backup_dirty;
0565     ret = ttm_bo_validate(&res->backup->base,
0566                   res->func->backup_placement,
0567                   &ctx);
0568 
0569     if (unlikely(ret != 0))
0570         goto out_no_validate;
0571 
0572     return 0;
0573 
0574 out_no_validate:
0575     ttm_eu_backoff_reservation(ticket, &val_list);
0576 out_no_reserve:
0577     ttm_bo_put(val_buf->bo);
0578     val_buf->bo = NULL;
0579     if (backup_dirty)
0580         vmw_bo_unreference(&res->backup);
0581 
0582     return ret;
0583 }
0584 
0585 /*
0586  * vmw_resource_reserve - Reserve a resource for command submission
0587  *
0588  * @res:            The resource to reserve.
0589  *
0590  * This function takes the resource off the LRU list and make sure
0591  * a backup buffer is present for guest-backed resources. However,
0592  * the buffer may not be bound to the resource at this point.
0593  *
0594  */
0595 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
0596              bool no_backup)
0597 {
0598     struct vmw_private *dev_priv = res->dev_priv;
0599     int ret;
0600 
0601     spin_lock(&dev_priv->resource_lock);
0602     list_del_init(&res->lru_head);
0603     spin_unlock(&dev_priv->resource_lock);
0604 
0605     if (res->func->needs_backup && res->backup == NULL &&
0606         !no_backup) {
0607         ret = vmw_resource_buf_alloc(res, interruptible);
0608         if (unlikely(ret != 0)) {
0609             DRM_ERROR("Failed to allocate a backup buffer "
0610                   "of size %lu. bytes\n",
0611                   (unsigned long) res->backup_size);
0612             return ret;
0613         }
0614     }
0615 
0616     return 0;
0617 }
0618 
0619 /**
0620  * vmw_resource_backoff_reservation - Unreserve and unreference a
0621  *                                    backup buffer
0622  *.
0623  * @ticket:         The ww acquire ctx used for reservation.
0624  * @val_buf:        Backup buffer information.
0625  */
0626 static void
0627 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
0628                  struct ttm_validate_buffer *val_buf)
0629 {
0630     struct list_head val_list;
0631 
0632     if (likely(val_buf->bo == NULL))
0633         return;
0634 
0635     INIT_LIST_HEAD(&val_list);
0636     list_add_tail(&val_buf->head, &val_list);
0637     ttm_eu_backoff_reservation(ticket, &val_list);
0638     ttm_bo_put(val_buf->bo);
0639     val_buf->bo = NULL;
0640 }
0641 
0642 /**
0643  * vmw_resource_do_evict - Evict a resource, and transfer its data
0644  *                         to a backup buffer.
0645  *
0646  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
0647  * @res:            The resource to evict.
0648  * @interruptible:  Whether to wait interruptible.
0649  */
0650 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
0651                  struct vmw_resource *res, bool interruptible)
0652 {
0653     struct ttm_validate_buffer val_buf;
0654     const struct vmw_res_func *func = res->func;
0655     int ret;
0656 
0657     BUG_ON(!func->may_evict);
0658 
0659     val_buf.bo = NULL;
0660     val_buf.num_shared = 0;
0661     ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
0662     if (unlikely(ret != 0))
0663         return ret;
0664 
0665     if (unlikely(func->unbind != NULL &&
0666              (!func->needs_backup || vmw_resource_mob_attached(res)))) {
0667         ret = func->unbind(res, res->res_dirty, &val_buf);
0668         if (unlikely(ret != 0))
0669             goto out_no_unbind;
0670         vmw_resource_mob_detach(res);
0671     }
0672     ret = func->destroy(res);
0673     res->backup_dirty = true;
0674     res->res_dirty = false;
0675 out_no_unbind:
0676     vmw_resource_backoff_reservation(ticket, &val_buf);
0677 
0678     return ret;
0679 }
0680 
0681 
0682 /**
0683  * vmw_resource_validate - Make a resource up-to-date and visible
0684  *                         to the device.
0685  * @res: The resource to make visible to the device.
0686  * @intr: Perform waits interruptible if possible.
0687  * @dirtying: Pending GPU operation will dirty the resource
0688  *
0689  * On succesful return, any backup DMA buffer pointed to by @res->backup will
0690  * be reserved and validated.
0691  * On hardware resource shortage, this function will repeatedly evict
0692  * resources of the same type until the validation succeeds.
0693  *
0694  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
0695  * on failure.
0696  */
0697 int vmw_resource_validate(struct vmw_resource *res, bool intr,
0698               bool dirtying)
0699 {
0700     int ret;
0701     struct vmw_resource *evict_res;
0702     struct vmw_private *dev_priv = res->dev_priv;
0703     struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
0704     struct ttm_validate_buffer val_buf;
0705     unsigned err_count = 0;
0706 
0707     if (!res->func->create)
0708         return 0;
0709 
0710     val_buf.bo = NULL;
0711     val_buf.num_shared = 0;
0712     if (res->backup)
0713         val_buf.bo = &res->backup->base;
0714     do {
0715         ret = vmw_resource_do_validate(res, &val_buf, dirtying);
0716         if (likely(ret != -EBUSY))
0717             break;
0718 
0719         spin_lock(&dev_priv->resource_lock);
0720         if (list_empty(lru_list) || !res->func->may_evict) {
0721             DRM_ERROR("Out of device device resources "
0722                   "for %s.\n", res->func->type_name);
0723             ret = -EBUSY;
0724             spin_unlock(&dev_priv->resource_lock);
0725             break;
0726         }
0727 
0728         evict_res = vmw_resource_reference
0729             (list_first_entry(lru_list, struct vmw_resource,
0730                       lru_head));
0731         list_del_init(&evict_res->lru_head);
0732 
0733         spin_unlock(&dev_priv->resource_lock);
0734 
0735         /* Trylock backup buffers with a NULL ticket. */
0736         ret = vmw_resource_do_evict(NULL, evict_res, intr);
0737         if (unlikely(ret != 0)) {
0738             spin_lock(&dev_priv->resource_lock);
0739             list_add_tail(&evict_res->lru_head, lru_list);
0740             spin_unlock(&dev_priv->resource_lock);
0741             if (ret == -ERESTARTSYS ||
0742                 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
0743                 vmw_resource_unreference(&evict_res);
0744                 goto out_no_validate;
0745             }
0746         }
0747 
0748         vmw_resource_unreference(&evict_res);
0749     } while (1);
0750 
0751     if (unlikely(ret != 0))
0752         goto out_no_validate;
0753     else if (!res->func->needs_backup && res->backup) {
0754         WARN_ON_ONCE(vmw_resource_mob_attached(res));
0755         vmw_bo_unreference(&res->backup);
0756     }
0757 
0758     return 0;
0759 
0760 out_no_validate:
0761     return ret;
0762 }
0763 
0764 
0765 /**
0766  * vmw_resource_unbind_list
0767  *
0768  * @vbo: Pointer to the current backing MOB.
0769  *
0770  * Evicts the Guest Backed hardware resource if the backup
0771  * buffer is being moved out of MOB memory.
0772  * Note that this function will not race with the resource
0773  * validation code, since resource validation and eviction
0774  * both require the backup buffer to be reserved.
0775  */
0776 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
0777 {
0778     struct ttm_validate_buffer val_buf = {
0779         .bo = &vbo->base,
0780         .num_shared = 0
0781     };
0782 
0783     dma_resv_assert_held(vbo->base.base.resv);
0784     while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
0785         struct rb_node *node = vbo->res_tree.rb_node;
0786         struct vmw_resource *res =
0787             container_of(node, struct vmw_resource, mob_node);
0788 
0789         if (!WARN_ON_ONCE(!res->func->unbind))
0790             (void) res->func->unbind(res, res->res_dirty, &val_buf);
0791 
0792         res->backup_dirty = true;
0793         res->res_dirty = false;
0794         vmw_resource_mob_detach(res);
0795     }
0796 
0797     (void) ttm_bo_wait(&vbo->base, false, false);
0798 }
0799 
0800 
0801 /**
0802  * vmw_query_readback_all - Read back cached query states
0803  *
0804  * @dx_query_mob: Buffer containing the DX query MOB
0805  *
0806  * Read back cached states from the device if they exist.  This function
0807  * assumings binding_mutex is held.
0808  */
0809 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
0810 {
0811     struct vmw_resource *dx_query_ctx;
0812     struct vmw_private *dev_priv;
0813     struct {
0814         SVGA3dCmdHeader header;
0815         SVGA3dCmdDXReadbackAllQuery body;
0816     } *cmd;
0817 
0818 
0819     /* No query bound, so do nothing */
0820     if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
0821         return 0;
0822 
0823     dx_query_ctx = dx_query_mob->dx_query_ctx;
0824     dev_priv     = dx_query_ctx->dev_priv;
0825 
0826     cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
0827     if (unlikely(cmd == NULL))
0828         return -ENOMEM;
0829 
0830     cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
0831     cmd->header.size = sizeof(cmd->body);
0832     cmd->body.cid    = dx_query_ctx->id;
0833 
0834     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0835 
0836     /* Triggers a rebind the next time affected context is bound */
0837     dx_query_mob->dx_query_ctx = NULL;
0838 
0839     return 0;
0840 }
0841 
0842 
0843 
0844 /**
0845  * vmw_query_move_notify - Read back cached query states
0846  *
0847  * @bo: The TTM buffer object about to move.
0848  * @old_mem: The memory region @bo is moving from.
0849  * @new_mem: The memory region @bo is moving to.
0850  *
0851  * Called before the query MOB is swapped out to read back cached query
0852  * states from the device.
0853  */
0854 void vmw_query_move_notify(struct ttm_buffer_object *bo,
0855                struct ttm_resource *old_mem,
0856                struct ttm_resource *new_mem)
0857 {
0858     struct vmw_buffer_object *dx_query_mob;
0859     struct ttm_device *bdev = bo->bdev;
0860     struct vmw_private *dev_priv;
0861 
0862     dev_priv = container_of(bdev, struct vmw_private, bdev);
0863 
0864     mutex_lock(&dev_priv->binding_mutex);
0865 
0866     /* If BO is being moved from MOB to system memory */
0867     if (new_mem->mem_type == TTM_PL_SYSTEM &&
0868         old_mem->mem_type == VMW_PL_MOB) {
0869         struct vmw_fence_obj *fence;
0870 
0871         dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
0872         if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
0873             mutex_unlock(&dev_priv->binding_mutex);
0874             return;
0875         }
0876 
0877         (void) vmw_query_readback_all(dx_query_mob);
0878         mutex_unlock(&dev_priv->binding_mutex);
0879 
0880         /* Create a fence and attach the BO to it */
0881         (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
0882         vmw_bo_fence_single(bo, fence);
0883 
0884         if (fence != NULL)
0885             vmw_fence_obj_unreference(&fence);
0886 
0887         (void) ttm_bo_wait(bo, false, false);
0888     } else
0889         mutex_unlock(&dev_priv->binding_mutex);
0890 }
0891 
0892 /**
0893  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
0894  *
0895  * @res:            The resource being queried.
0896  */
0897 bool vmw_resource_needs_backup(const struct vmw_resource *res)
0898 {
0899     return res->func->needs_backup;
0900 }
0901 
0902 /**
0903  * vmw_resource_evict_type - Evict all resources of a specific type
0904  *
0905  * @dev_priv:       Pointer to a device private struct
0906  * @type:           The resource type to evict
0907  *
0908  * To avoid thrashing starvation or as part of the hibernation sequence,
0909  * try to evict all evictable resources of a specific type.
0910  */
0911 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
0912                     enum vmw_res_type type)
0913 {
0914     struct list_head *lru_list = &dev_priv->res_lru[type];
0915     struct vmw_resource *evict_res;
0916     unsigned err_count = 0;
0917     int ret;
0918     struct ww_acquire_ctx ticket;
0919 
0920     do {
0921         spin_lock(&dev_priv->resource_lock);
0922 
0923         if (list_empty(lru_list))
0924             goto out_unlock;
0925 
0926         evict_res = vmw_resource_reference(
0927             list_first_entry(lru_list, struct vmw_resource,
0928                      lru_head));
0929         list_del_init(&evict_res->lru_head);
0930         spin_unlock(&dev_priv->resource_lock);
0931 
0932         /* Wait lock backup buffers with a ticket. */
0933         ret = vmw_resource_do_evict(&ticket, evict_res, false);
0934         if (unlikely(ret != 0)) {
0935             spin_lock(&dev_priv->resource_lock);
0936             list_add_tail(&evict_res->lru_head, lru_list);
0937             spin_unlock(&dev_priv->resource_lock);
0938             if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
0939                 vmw_resource_unreference(&evict_res);
0940                 return;
0941             }
0942         }
0943 
0944         vmw_resource_unreference(&evict_res);
0945     } while (1);
0946 
0947 out_unlock:
0948     spin_unlock(&dev_priv->resource_lock);
0949 }
0950 
0951 /**
0952  * vmw_resource_evict_all - Evict all evictable resources
0953  *
0954  * @dev_priv:       Pointer to a device private struct
0955  *
0956  * To avoid thrashing starvation or as part of the hibernation sequence,
0957  * evict all evictable resources. In particular this means that all
0958  * guest-backed resources that are registered with the device are
0959  * evicted and the OTable becomes clean.
0960  */
0961 void vmw_resource_evict_all(struct vmw_private *dev_priv)
0962 {
0963     enum vmw_res_type type;
0964 
0965     mutex_lock(&dev_priv->cmdbuf_mutex);
0966 
0967     for (type = 0; type < vmw_res_max; ++type)
0968         vmw_resource_evict_type(dev_priv, type);
0969 
0970     mutex_unlock(&dev_priv->cmdbuf_mutex);
0971 }
0972 
0973 /*
0974  * vmw_resource_pin - Add a pin reference on a resource
0975  *
0976  * @res: The resource to add a pin reference on
0977  *
0978  * This function adds a pin reference, and if needed validates the resource.
0979  * Having a pin reference means that the resource can never be evicted, and
0980  * its id will never change as long as there is a pin reference.
0981  * This function returns 0 on success and a negative error code on failure.
0982  */
0983 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
0984 {
0985     struct ttm_operation_ctx ctx = { interruptible, false };
0986     struct vmw_private *dev_priv = res->dev_priv;
0987     int ret;
0988 
0989     mutex_lock(&dev_priv->cmdbuf_mutex);
0990     ret = vmw_resource_reserve(res, interruptible, false);
0991     if (ret)
0992         goto out_no_reserve;
0993 
0994     if (res->pin_count == 0) {
0995         struct vmw_buffer_object *vbo = NULL;
0996 
0997         if (res->backup) {
0998             vbo = res->backup;
0999 
1000             ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1001             if (ret)
1002                 goto out_no_validate;
1003             if (!vbo->base.pin_count) {
1004                 ret = ttm_bo_validate
1005                     (&vbo->base,
1006                      res->func->backup_placement,
1007                      &ctx);
1008                 if (ret) {
1009                     ttm_bo_unreserve(&vbo->base);
1010                     goto out_no_validate;
1011                 }
1012             }
1013 
1014             /* Do we really need to pin the MOB as well? */
1015             vmw_bo_pin_reserved(vbo, true);
1016         }
1017         ret = vmw_resource_validate(res, interruptible, true);
1018         if (vbo)
1019             ttm_bo_unreserve(&vbo->base);
1020         if (ret)
1021             goto out_no_validate;
1022     }
1023     res->pin_count++;
1024 
1025 out_no_validate:
1026     vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1027 out_no_reserve:
1028     mutex_unlock(&dev_priv->cmdbuf_mutex);
1029 
1030     return ret;
1031 }
1032 
1033 /**
1034  * vmw_resource_unpin - Remove a pin reference from a resource
1035  *
1036  * @res: The resource to remove a pin reference from
1037  *
1038  * Having a pin reference means that the resource can never be evicted, and
1039  * its id will never change as long as there is a pin reference.
1040  */
1041 void vmw_resource_unpin(struct vmw_resource *res)
1042 {
1043     struct vmw_private *dev_priv = res->dev_priv;
1044     int ret;
1045 
1046     mutex_lock(&dev_priv->cmdbuf_mutex);
1047 
1048     ret = vmw_resource_reserve(res, false, true);
1049     WARN_ON(ret);
1050 
1051     WARN_ON(res->pin_count == 0);
1052     if (--res->pin_count == 0 && res->backup) {
1053         struct vmw_buffer_object *vbo = res->backup;
1054 
1055         (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1056         vmw_bo_pin_reserved(vbo, false);
1057         ttm_bo_unreserve(&vbo->base);
1058     }
1059 
1060     vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1061 
1062     mutex_unlock(&dev_priv->cmdbuf_mutex);
1063 }
1064 
1065 /**
1066  * vmw_res_type - Return the resource type
1067  *
1068  * @res: Pointer to the resource
1069  */
1070 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1071 {
1072     return res->func->res_type;
1073 }
1074 
1075 /**
1076  * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1077  * sequential range of touched backing store memory.
1078  * @res: The resource.
1079  * @start: The first page touched.
1080  * @end: The last page touched + 1.
1081  */
1082 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1083                    pgoff_t end)
1084 {
1085     if (res->dirty)
1086         res->func->dirty_range_add(res, start << PAGE_SHIFT,
1087                        end << PAGE_SHIFT);
1088 }
1089 
1090 /**
1091  * vmw_resources_clean - Clean resources intersecting a mob range
1092  * @vbo: The mob buffer object
1093  * @start: The mob page offset starting the range
1094  * @end: The mob page offset ending the range
1095  * @num_prefault: Returns how many pages including the first have been
1096  * cleaned and are ok to prefault
1097  */
1098 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1099             pgoff_t end, pgoff_t *num_prefault)
1100 {
1101     struct rb_node *cur = vbo->res_tree.rb_node;
1102     struct vmw_resource *found = NULL;
1103     unsigned long res_start = start << PAGE_SHIFT;
1104     unsigned long res_end = end << PAGE_SHIFT;
1105     unsigned long last_cleaned = 0;
1106 
1107     /*
1108      * Find the resource with lowest backup_offset that intersects the
1109      * range.
1110      */
1111     while (cur) {
1112         struct vmw_resource *cur_res =
1113             container_of(cur, struct vmw_resource, mob_node);
1114 
1115         if (cur_res->backup_offset >= res_end) {
1116             cur = cur->rb_left;
1117         } else if (cur_res->backup_offset + cur_res->backup_size <=
1118                res_start) {
1119             cur = cur->rb_right;
1120         } else {
1121             found = cur_res;
1122             cur = cur->rb_left;
1123             /* Continue to look for resources with lower offsets */
1124         }
1125     }
1126 
1127     /*
1128      * In order of increasing backup_offset, clean dirty resorces
1129      * intersecting the range.
1130      */
1131     while (found) {
1132         if (found->res_dirty) {
1133             int ret;
1134 
1135             if (!found->func->clean)
1136                 return -EINVAL;
1137 
1138             ret = found->func->clean(found);
1139             if (ret)
1140                 return ret;
1141 
1142             found->res_dirty = false;
1143         }
1144         last_cleaned = found->backup_offset + found->backup_size;
1145         cur = rb_next(&found->mob_node);
1146         if (!cur)
1147             break;
1148 
1149         found = container_of(cur, struct vmw_resource, mob_node);
1150         if (found->backup_offset >= res_end)
1151             break;
1152     }
1153 
1154     /*
1155      * Set number of pages allowed prefaulting and fence the buffer object
1156      */
1157     *num_prefault = 1;
1158     if (last_cleaned > res_start) {
1159         struct ttm_buffer_object *bo = &vbo->base;
1160 
1161         *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1162                               PAGE_SIZE);
1163         vmw_bo_fence_single(bo, NULL);
1164     }
1165 
1166     return 0;
1167 }