Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
0004  *
0005  * Based on bo.c which bears the following copyright notice,
0006  * but is dual licensed:
0007  *
0008  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
0009  * All Rights Reserved.
0010  *
0011  * Permission is hereby granted, free of charge, to any person obtaining a
0012  * copy of this software and associated documentation files (the
0013  * "Software"), to deal in the Software without restriction, including
0014  * without limitation the rights to use, copy, modify, merge, publish,
0015  * distribute, sub license, and/or sell copies of the Software, and to
0016  * permit persons to whom the Software is furnished to do so, subject to
0017  * the following conditions:
0018  *
0019  * The above copyright notice and this permission notice (including the
0020  * next paragraph) shall be included in all copies or substantial portions
0021  * of the Software.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0024  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0025  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0026  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0027  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0028  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0029  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0030  *
0031  **************************************************************************/
0032 /*
0033  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
0034  */
0035 
0036 #include <linux/dma-resv.h>
0037 #include <linux/dma-fence-array.h>
0038 #include <linux/export.h>
0039 #include <linux/mm.h>
0040 #include <linux/sched/mm.h>
0041 #include <linux/mmu_notifier.h>
0042 #include <linux/seq_file.h>
0043 
0044 /**
0045  * DOC: Reservation Object Overview
0046  *
0047  * The reservation object provides a mechanism to manage a container of
0048  * dma_fence object associated with a resource. A reservation object
0049  * can have any number of fences attaches to it. Each fence carries an usage
0050  * parameter determining how the operation represented by the fence is using the
0051  * resource. The RCU mechanism is used to protect read access to fences from
0052  * locked write-side updates.
0053  *
0054  * See struct dma_resv for more details.
0055  */
0056 
0057 DEFINE_WD_CLASS(reservation_ww_class);
0058 EXPORT_SYMBOL(reservation_ww_class);
0059 
0060 /* Mask for the lower fence pointer bits */
0061 #define DMA_RESV_LIST_MASK  0x3
0062 
0063 struct dma_resv_list {
0064     struct rcu_head rcu;
0065     u32 num_fences, max_fences;
0066     struct dma_fence __rcu *table[];
0067 };
0068 
0069 /* Extract the fence and usage flags from an RCU protected entry in the list. */
0070 static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
0071                 struct dma_resv *resv, struct dma_fence **fence,
0072                 enum dma_resv_usage *usage)
0073 {
0074     long tmp;
0075 
0076     tmp = (long)rcu_dereference_check(list->table[index],
0077                       resv ? dma_resv_held(resv) : true);
0078     *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
0079     if (usage)
0080         *usage = tmp & DMA_RESV_LIST_MASK;
0081 }
0082 
0083 /* Set the fence and usage flags at the specific index in the list. */
0084 static void dma_resv_list_set(struct dma_resv_list *list,
0085                   unsigned int index,
0086                   struct dma_fence *fence,
0087                   enum dma_resv_usage usage)
0088 {
0089     long tmp = ((long)fence) | usage;
0090 
0091     RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
0092 }
0093 
0094 /*
0095  * Allocate a new dma_resv_list and make sure to correctly initialize
0096  * max_fences.
0097  */
0098 static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
0099 {
0100     struct dma_resv_list *list;
0101 
0102     list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
0103     if (!list)
0104         return NULL;
0105 
0106     list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
0107         sizeof(*list->table);
0108 
0109     return list;
0110 }
0111 
0112 /* Free a dma_resv_list and make sure to drop all references. */
0113 static void dma_resv_list_free(struct dma_resv_list *list)
0114 {
0115     unsigned int i;
0116 
0117     if (!list)
0118         return;
0119 
0120     for (i = 0; i < list->num_fences; ++i) {
0121         struct dma_fence *fence;
0122 
0123         dma_resv_list_entry(list, i, NULL, &fence, NULL);
0124         dma_fence_put(fence);
0125     }
0126     kfree_rcu(list, rcu);
0127 }
0128 
0129 /**
0130  * dma_resv_init - initialize a reservation object
0131  * @obj: the reservation object
0132  */
0133 void dma_resv_init(struct dma_resv *obj)
0134 {
0135     ww_mutex_init(&obj->lock, &reservation_ww_class);
0136 
0137     RCU_INIT_POINTER(obj->fences, NULL);
0138 }
0139 EXPORT_SYMBOL(dma_resv_init);
0140 
0141 /**
0142  * dma_resv_fini - destroys a reservation object
0143  * @obj: the reservation object
0144  */
0145 void dma_resv_fini(struct dma_resv *obj)
0146 {
0147     /*
0148      * This object should be dead and all references must have
0149      * been released to it, so no need to be protected with rcu.
0150      */
0151     dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
0152     ww_mutex_destroy(&obj->lock);
0153 }
0154 EXPORT_SYMBOL(dma_resv_fini);
0155 
0156 /* Dereference the fences while ensuring RCU rules */
0157 static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
0158 {
0159     return rcu_dereference_check(obj->fences, dma_resv_held(obj));
0160 }
0161 
0162 /**
0163  * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
0164  * @obj: reservation object
0165  * @num_fences: number of fences we want to add
0166  *
0167  * Should be called before dma_resv_add_fence().  Must be called with @obj
0168  * locked through dma_resv_lock().
0169  *
0170  * Note that the preallocated slots need to be re-reserved if @obj is unlocked
0171  * at any time before calling dma_resv_add_fence(). This is validated when
0172  * CONFIG_DEBUG_MUTEXES is enabled.
0173  *
0174  * RETURNS
0175  * Zero for success, or -errno
0176  */
0177 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
0178 {
0179     struct dma_resv_list *old, *new;
0180     unsigned int i, j, k, max;
0181 
0182     dma_resv_assert_held(obj);
0183 
0184     old = dma_resv_fences_list(obj);
0185     if (old && old->max_fences) {
0186         if ((old->num_fences + num_fences) <= old->max_fences)
0187             return 0;
0188         max = max(old->num_fences + num_fences, old->max_fences * 2);
0189     } else {
0190         max = max(4ul, roundup_pow_of_two(num_fences));
0191     }
0192 
0193     new = dma_resv_list_alloc(max);
0194     if (!new)
0195         return -ENOMEM;
0196 
0197     /*
0198      * no need to bump fence refcounts, rcu_read access
0199      * requires the use of kref_get_unless_zero, and the
0200      * references from the old struct are carried over to
0201      * the new.
0202      */
0203     for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
0204         enum dma_resv_usage usage;
0205         struct dma_fence *fence;
0206 
0207         dma_resv_list_entry(old, i, obj, &fence, &usage);
0208         if (dma_fence_is_signaled(fence))
0209             RCU_INIT_POINTER(new->table[--k], fence);
0210         else
0211             dma_resv_list_set(new, j++, fence, usage);
0212     }
0213     new->num_fences = j;
0214 
0215     /*
0216      * We are not changing the effective set of fences here so can
0217      * merely update the pointer to the new array; both existing
0218      * readers and new readers will see exactly the same set of
0219      * active (unsignaled) fences. Individual fences and the
0220      * old array are protected by RCU and so will not vanish under
0221      * the gaze of the rcu_read_lock() readers.
0222      */
0223     rcu_assign_pointer(obj->fences, new);
0224 
0225     if (!old)
0226         return 0;
0227 
0228     /* Drop the references to the signaled fences */
0229     for (i = k; i < max; ++i) {
0230         struct dma_fence *fence;
0231 
0232         fence = rcu_dereference_protected(new->table[i],
0233                           dma_resv_held(obj));
0234         dma_fence_put(fence);
0235     }
0236     kfree_rcu(old, rcu);
0237 
0238     return 0;
0239 }
0240 EXPORT_SYMBOL(dma_resv_reserve_fences);
0241 
0242 #ifdef CONFIG_DEBUG_MUTEXES
0243 /**
0244  * dma_resv_reset_max_fences - reset fences for debugging
0245  * @obj: the dma_resv object to reset
0246  *
0247  * Reset the number of pre-reserved fence slots to test that drivers do
0248  * correct slot allocation using dma_resv_reserve_fences(). See also
0249  * &dma_resv_list.max_fences.
0250  */
0251 void dma_resv_reset_max_fences(struct dma_resv *obj)
0252 {
0253     struct dma_resv_list *fences = dma_resv_fences_list(obj);
0254 
0255     dma_resv_assert_held(obj);
0256 
0257     /* Test fence slot reservation */
0258     if (fences)
0259         fences->max_fences = fences->num_fences;
0260 }
0261 EXPORT_SYMBOL(dma_resv_reset_max_fences);
0262 #endif
0263 
0264 /**
0265  * dma_resv_add_fence - Add a fence to the dma_resv obj
0266  * @obj: the reservation object
0267  * @fence: the fence to add
0268  * @usage: how the fence is used, see enum dma_resv_usage
0269  *
0270  * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
0271  * dma_resv_reserve_fences() has been called.
0272  *
0273  * See also &dma_resv.fence for a discussion of the semantics.
0274  */
0275 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
0276             enum dma_resv_usage usage)
0277 {
0278     struct dma_resv_list *fobj;
0279     struct dma_fence *old;
0280     unsigned int i, count;
0281 
0282     dma_fence_get(fence);
0283 
0284     dma_resv_assert_held(obj);
0285 
0286     /* Drivers should not add containers here, instead add each fence
0287      * individually.
0288      */
0289     WARN_ON(dma_fence_is_container(fence));
0290 
0291     fobj = dma_resv_fences_list(obj);
0292     count = fobj->num_fences;
0293 
0294     for (i = 0; i < count; ++i) {
0295         enum dma_resv_usage old_usage;
0296 
0297         dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
0298         if ((old->context == fence->context && old_usage >= usage &&
0299              dma_fence_is_later(fence, old)) ||
0300             dma_fence_is_signaled(old)) {
0301             dma_resv_list_set(fobj, i, fence, usage);
0302             dma_fence_put(old);
0303             return;
0304         }
0305     }
0306 
0307     BUG_ON(fobj->num_fences >= fobj->max_fences);
0308     count++;
0309 
0310     dma_resv_list_set(fobj, i, fence, usage);
0311     /* pointer update must be visible before we extend the num_fences */
0312     smp_store_mb(fobj->num_fences, count);
0313 }
0314 EXPORT_SYMBOL(dma_resv_add_fence);
0315 
0316 /**
0317  * dma_resv_replace_fences - replace fences in the dma_resv obj
0318  * @obj: the reservation object
0319  * @context: the context of the fences to replace
0320  * @replacement: the new fence to use instead
0321  * @usage: how the new fence is used, see enum dma_resv_usage
0322  *
0323  * Replace fences with a specified context with a new fence. Only valid if the
0324  * operation represented by the original fence has no longer access to the
0325  * resources represented by the dma_resv object when the new fence completes.
0326  *
0327  * And example for using this is replacing a preemption fence with a page table
0328  * update fence which makes the resource inaccessible.
0329  */
0330 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
0331                  struct dma_fence *replacement,
0332                  enum dma_resv_usage usage)
0333 {
0334     struct dma_resv_list *list;
0335     unsigned int i;
0336 
0337     dma_resv_assert_held(obj);
0338 
0339     list = dma_resv_fences_list(obj);
0340     for (i = 0; list && i < list->num_fences; ++i) {
0341         struct dma_fence *old;
0342 
0343         dma_resv_list_entry(list, i, obj, &old, NULL);
0344         if (old->context != context)
0345             continue;
0346 
0347         dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
0348         dma_fence_put(old);
0349     }
0350 }
0351 EXPORT_SYMBOL(dma_resv_replace_fences);
0352 
0353 /* Restart the unlocked iteration by initializing the cursor object. */
0354 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
0355 {
0356     cursor->index = 0;
0357     cursor->num_fences = 0;
0358     cursor->fences = dma_resv_fences_list(cursor->obj);
0359     if (cursor->fences)
0360         cursor->num_fences = cursor->fences->num_fences;
0361     cursor->is_restarted = true;
0362 }
0363 
0364 /* Walk to the next not signaled fence and grab a reference to it */
0365 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
0366 {
0367     if (!cursor->fences)
0368         return;
0369 
0370     do {
0371         /* Drop the reference from the previous round */
0372         dma_fence_put(cursor->fence);
0373 
0374         if (cursor->index >= cursor->num_fences) {
0375             cursor->fence = NULL;
0376             break;
0377 
0378         }
0379 
0380         dma_resv_list_entry(cursor->fences, cursor->index++,
0381                     cursor->obj, &cursor->fence,
0382                     &cursor->fence_usage);
0383         cursor->fence = dma_fence_get_rcu(cursor->fence);
0384         if (!cursor->fence) {
0385             dma_resv_iter_restart_unlocked(cursor);
0386             continue;
0387         }
0388 
0389         if (!dma_fence_is_signaled(cursor->fence) &&
0390             cursor->usage >= cursor->fence_usage)
0391             break;
0392     } while (true);
0393 }
0394 
0395 /**
0396  * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
0397  * @cursor: the cursor with the current position
0398  *
0399  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
0400  *
0401  * Beware that the iterator can be restarted.  Code which accumulates statistics
0402  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
0403  * this reason prefer the locked dma_resv_iter_first() whenver possible.
0404  *
0405  * Returns the first fence from an unlocked dma_resv obj.
0406  */
0407 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
0408 {
0409     rcu_read_lock();
0410     do {
0411         dma_resv_iter_restart_unlocked(cursor);
0412         dma_resv_iter_walk_unlocked(cursor);
0413     } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
0414     rcu_read_unlock();
0415 
0416     return cursor->fence;
0417 }
0418 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
0419 
0420 /**
0421  * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
0422  * @cursor: the cursor with the current position
0423  *
0424  * Beware that the iterator can be restarted.  Code which accumulates statistics
0425  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
0426  * this reason prefer the locked dma_resv_iter_next() whenver possible.
0427  *
0428  * Returns the next fence from an unlocked dma_resv obj.
0429  */
0430 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
0431 {
0432     bool restart;
0433 
0434     rcu_read_lock();
0435     cursor->is_restarted = false;
0436     restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
0437     do {
0438         if (restart)
0439             dma_resv_iter_restart_unlocked(cursor);
0440         dma_resv_iter_walk_unlocked(cursor);
0441         restart = true;
0442     } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
0443     rcu_read_unlock();
0444 
0445     return cursor->fence;
0446 }
0447 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
0448 
0449 /**
0450  * dma_resv_iter_first - first fence from a locked dma_resv object
0451  * @cursor: cursor to record the current position
0452  *
0453  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
0454  *
0455  * Return the first fence in the dma_resv object while holding the
0456  * &dma_resv.lock.
0457  */
0458 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
0459 {
0460     struct dma_fence *fence;
0461 
0462     dma_resv_assert_held(cursor->obj);
0463 
0464     cursor->index = 0;
0465     cursor->fences = dma_resv_fences_list(cursor->obj);
0466 
0467     fence = dma_resv_iter_next(cursor);
0468     cursor->is_restarted = true;
0469     return fence;
0470 }
0471 EXPORT_SYMBOL_GPL(dma_resv_iter_first);
0472 
0473 /**
0474  * dma_resv_iter_next - next fence from a locked dma_resv object
0475  * @cursor: cursor to record the current position
0476  *
0477  * Return the next fences from the dma_resv object while holding the
0478  * &dma_resv.lock.
0479  */
0480 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
0481 {
0482     struct dma_fence *fence;
0483 
0484     dma_resv_assert_held(cursor->obj);
0485 
0486     cursor->is_restarted = false;
0487 
0488     do {
0489         if (!cursor->fences ||
0490             cursor->index >= cursor->fences->num_fences)
0491             return NULL;
0492 
0493         dma_resv_list_entry(cursor->fences, cursor->index++,
0494                     cursor->obj, &fence, &cursor->fence_usage);
0495     } while (cursor->fence_usage > cursor->usage);
0496 
0497     return fence;
0498 }
0499 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
0500 
0501 /**
0502  * dma_resv_copy_fences - Copy all fences from src to dst.
0503  * @dst: the destination reservation object
0504  * @src: the source reservation object
0505  *
0506  * Copy all fences from src to dst. dst-lock must be held.
0507  */
0508 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
0509 {
0510     struct dma_resv_iter cursor;
0511     struct dma_resv_list *list;
0512     struct dma_fence *f;
0513 
0514     dma_resv_assert_held(dst);
0515 
0516     list = NULL;
0517 
0518     dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
0519     dma_resv_for_each_fence_unlocked(&cursor, f) {
0520 
0521         if (dma_resv_iter_is_restarted(&cursor)) {
0522             dma_resv_list_free(list);
0523 
0524             list = dma_resv_list_alloc(cursor.num_fences);
0525             if (!list) {
0526                 dma_resv_iter_end(&cursor);
0527                 return -ENOMEM;
0528             }
0529             list->num_fences = 0;
0530         }
0531 
0532         dma_fence_get(f);
0533         dma_resv_list_set(list, list->num_fences++, f,
0534                   dma_resv_iter_usage(&cursor));
0535     }
0536     dma_resv_iter_end(&cursor);
0537 
0538     list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
0539     dma_resv_list_free(list);
0540     return 0;
0541 }
0542 EXPORT_SYMBOL(dma_resv_copy_fences);
0543 
0544 /**
0545  * dma_resv_get_fences - Get an object's fences
0546  * fences without update side lock held
0547  * @obj: the reservation object
0548  * @usage: controls which fences to include, see enum dma_resv_usage.
0549  * @num_fences: the number of fences returned
0550  * @fences: the array of fence ptrs returned (array is krealloc'd to the
0551  * required size, and must be freed by caller)
0552  *
0553  * Retrieve all fences from the reservation object.
0554  * Returns either zero or -ENOMEM.
0555  */
0556 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
0557             unsigned int *num_fences, struct dma_fence ***fences)
0558 {
0559     struct dma_resv_iter cursor;
0560     struct dma_fence *fence;
0561 
0562     *num_fences = 0;
0563     *fences = NULL;
0564 
0565     dma_resv_iter_begin(&cursor, obj, usage);
0566     dma_resv_for_each_fence_unlocked(&cursor, fence) {
0567 
0568         if (dma_resv_iter_is_restarted(&cursor)) {
0569             unsigned int count;
0570 
0571             while (*num_fences)
0572                 dma_fence_put((*fences)[--(*num_fences)]);
0573 
0574             count = cursor.num_fences + 1;
0575 
0576             /* Eventually re-allocate the array */
0577             *fences = krealloc_array(*fences, count,
0578                          sizeof(void *),
0579                          GFP_KERNEL);
0580             if (count && !*fences) {
0581                 dma_resv_iter_end(&cursor);
0582                 return -ENOMEM;
0583             }
0584         }
0585 
0586         (*fences)[(*num_fences)++] = dma_fence_get(fence);
0587     }
0588     dma_resv_iter_end(&cursor);
0589 
0590     return 0;
0591 }
0592 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
0593 
0594 /**
0595  * dma_resv_get_singleton - Get a single fence for all the fences
0596  * @obj: the reservation object
0597  * @usage: controls which fences to include, see enum dma_resv_usage.
0598  * @fence: the resulting fence
0599  *
0600  * Get a single fence representing all the fences inside the resv object.
0601  * Returns either 0 for success or -ENOMEM.
0602  *
0603  * Warning: This can't be used like this when adding the fence back to the resv
0604  * object since that can lead to stack corruption when finalizing the
0605  * dma_fence_array.
0606  *
0607  * Returns 0 on success and negative error values on failure.
0608  */
0609 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
0610                struct dma_fence **fence)
0611 {
0612     struct dma_fence_array *array;
0613     struct dma_fence **fences;
0614     unsigned count;
0615     int r;
0616 
0617     r = dma_resv_get_fences(obj, usage, &count, &fences);
0618         if (r)
0619         return r;
0620 
0621     if (count == 0) {
0622         *fence = NULL;
0623         return 0;
0624     }
0625 
0626     if (count == 1) {
0627         *fence = fences[0];
0628         kfree(fences);
0629         return 0;
0630     }
0631 
0632     array = dma_fence_array_create(count, fences,
0633                        dma_fence_context_alloc(1),
0634                        1, false);
0635     if (!array) {
0636         while (count--)
0637             dma_fence_put(fences[count]);
0638         kfree(fences);
0639         return -ENOMEM;
0640     }
0641 
0642     *fence = &array->base;
0643     return 0;
0644 }
0645 EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
0646 
0647 /**
0648  * dma_resv_wait_timeout - Wait on reservation's objects fences
0649  * @obj: the reservation object
0650  * @usage: controls which fences to include, see enum dma_resv_usage.
0651  * @intr: if true, do interruptible wait
0652  * @timeout: timeout value in jiffies or zero to return immediately
0653  *
0654  * Callers are not required to hold specific locks, but maybe hold
0655  * dma_resv_lock() already
0656  * RETURNS
0657  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
0658  * greater than zer on success.
0659  */
0660 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
0661                bool intr, unsigned long timeout)
0662 {
0663     long ret = timeout ? timeout : 1;
0664     struct dma_resv_iter cursor;
0665     struct dma_fence *fence;
0666 
0667     dma_resv_iter_begin(&cursor, obj, usage);
0668     dma_resv_for_each_fence_unlocked(&cursor, fence) {
0669 
0670         ret = dma_fence_wait_timeout(fence, intr, ret);
0671         if (ret <= 0) {
0672             dma_resv_iter_end(&cursor);
0673             return ret;
0674         }
0675     }
0676     dma_resv_iter_end(&cursor);
0677 
0678     return ret;
0679 }
0680 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
0681 
0682 
0683 /**
0684  * dma_resv_test_signaled - Test if a reservation object's fences have been
0685  * signaled.
0686  * @obj: the reservation object
0687  * @usage: controls which fences to include, see enum dma_resv_usage.
0688  *
0689  * Callers are not required to hold specific locks, but maybe hold
0690  * dma_resv_lock() already.
0691  *
0692  * RETURNS
0693  *
0694  * True if all fences signaled, else false.
0695  */
0696 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
0697 {
0698     struct dma_resv_iter cursor;
0699     struct dma_fence *fence;
0700 
0701     dma_resv_iter_begin(&cursor, obj, usage);
0702     dma_resv_for_each_fence_unlocked(&cursor, fence) {
0703         dma_resv_iter_end(&cursor);
0704         return false;
0705     }
0706     dma_resv_iter_end(&cursor);
0707     return true;
0708 }
0709 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
0710 
0711 /**
0712  * dma_resv_describe - Dump description of the resv object into seq_file
0713  * @obj: the reservation object
0714  * @seq: the seq_file to dump the description into
0715  *
0716  * Dump a textual description of the fences inside an dma_resv object into the
0717  * seq_file.
0718  */
0719 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
0720 {
0721     static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
0722     struct dma_resv_iter cursor;
0723     struct dma_fence *fence;
0724 
0725     dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
0726         seq_printf(seq, "\t%s fence:",
0727                usage[dma_resv_iter_usage(&cursor)]);
0728         dma_fence_describe(fence, seq);
0729     }
0730 }
0731 EXPORT_SYMBOL_GPL(dma_resv_describe);
0732 
0733 #if IS_ENABLED(CONFIG_LOCKDEP)
0734 static int __init dma_resv_lockdep(void)
0735 {
0736     struct mm_struct *mm = mm_alloc();
0737     struct ww_acquire_ctx ctx;
0738     struct dma_resv obj;
0739     struct address_space mapping;
0740     int ret;
0741 
0742     if (!mm)
0743         return -ENOMEM;
0744 
0745     dma_resv_init(&obj);
0746     address_space_init_once(&mapping);
0747 
0748     mmap_read_lock(mm);
0749     ww_acquire_init(&ctx, &reservation_ww_class);
0750     ret = dma_resv_lock(&obj, &ctx);
0751     if (ret == -EDEADLK)
0752         dma_resv_lock_slow(&obj, &ctx);
0753     fs_reclaim_acquire(GFP_KERNEL);
0754     /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
0755     i_mmap_lock_write(&mapping);
0756     i_mmap_unlock_write(&mapping);
0757 #ifdef CONFIG_MMU_NOTIFIER
0758     lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
0759     __dma_fence_might_wait();
0760     lock_map_release(&__mmu_notifier_invalidate_range_start_map);
0761 #else
0762     __dma_fence_might_wait();
0763 #endif
0764     fs_reclaim_release(GFP_KERNEL);
0765     ww_mutex_unlock(&obj.lock);
0766     ww_acquire_fini(&ctx);
0767     mmap_read_unlock(mm);
0768 
0769     mmput(mm);
0770 
0771     return 0;
0772 }
0773 subsys_initcall(dma_resv_lockdep);
0774 #endif