Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Header file for reservations for dma-buf and ttm
0003  *
0004  * Copyright(C) 2011 Linaro Limited. All rights reserved.
0005  * Copyright (C) 2012-2013 Canonical Ltd
0006  * Copyright (C) 2012 Texas Instruments
0007  *
0008  * Authors:
0009  * Rob Clark <robdclark@gmail.com>
0010  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
0011  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
0012  *
0013  * Based on bo.c which bears the following copyright notice,
0014  * but is dual licensed:
0015  *
0016  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
0017  * All Rights Reserved.
0018  *
0019  * Permission is hereby granted, free of charge, to any person obtaining a
0020  * copy of this software and associated documentation files (the
0021  * "Software"), to deal in the Software without restriction, including
0022  * without limitation the rights to use, copy, modify, merge, publish,
0023  * distribute, sub license, and/or sell copies of the Software, and to
0024  * permit persons to whom the Software is furnished to do so, subject to
0025  * the following conditions:
0026  *
0027  * The above copyright notice and this permission notice (including the
0028  * next paragraph) shall be included in all copies or substantial portions
0029  * of the Software.
0030  *
0031  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0032  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0033  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0034  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0035  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0036  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0037  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0038  */
0039 #ifndef _LINUX_RESERVATION_H
0040 #define _LINUX_RESERVATION_H
0041 
0042 #include <linux/ww_mutex.h>
0043 #include <linux/dma-fence.h>
0044 #include <linux/slab.h>
0045 #include <linux/seqlock.h>
0046 #include <linux/rcupdate.h>
0047 
0048 extern struct ww_class reservation_ww_class;
0049 
0050 struct dma_resv_list;
0051 
0052 /**
0053  * enum dma_resv_usage - how the fences from a dma_resv obj are used
0054  *
0055  * This enum describes the different use cases for a dma_resv object and
0056  * controls which fences are returned when queried.
0057  *
0058  * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
0059  * when the dma_resv object is asked for fences for one use case the fences
0060  * for the lower use case are returned as well.
0061  *
0062  * For example when asking for WRITE fences then the KERNEL fences are returned
0063  * as well. Similar when asked for READ fences then both WRITE and KERNEL
0064  * fences are returned as well.
0065  */
0066 enum dma_resv_usage {
0067     /**
0068      * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
0069      *
0070      * This should only be used for things like copying or clearing memory
0071      * with a DMA hardware engine for the purpose of kernel memory
0072      * management.
0073      *
0074      * Drivers *always* must wait for those fences before accessing the
0075      * resource protected by the dma_resv object. The only exception for
0076      * that is when the resource is known to be locked down in place by
0077      * pinning it previously.
0078      */
0079     DMA_RESV_USAGE_KERNEL,
0080 
0081     /**
0082      * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
0083      *
0084      * This should only be used for userspace command submissions which add
0085      * an implicit write dependency.
0086      */
0087     DMA_RESV_USAGE_WRITE,
0088 
0089     /**
0090      * @DMA_RESV_USAGE_READ: Implicit read synchronization.
0091      *
0092      * This should only be used for userspace command submissions which add
0093      * an implicit read dependency.
0094      */
0095     DMA_RESV_USAGE_READ,
0096 
0097     /**
0098      * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
0099      *
0100      * This should be used by submissions which don't want to participate in
0101      * implicit synchronization.
0102      *
0103      * The most common case are preemption fences as well as page table
0104      * updates and their TLB flushes.
0105      */
0106     DMA_RESV_USAGE_BOOKKEEP
0107 };
0108 
0109 /**
0110  * dma_resv_usage_rw - helper for implicit sync
0111  * @write: true if we create a new implicit sync write
0112  *
0113  * This returns the implicit synchronization usage for write or read accesses,
0114  * see enum dma_resv_usage and &dma_buf.resv.
0115  */
0116 static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
0117 {
0118     /* This looks confusing at first sight, but is indeed correct.
0119      *
0120      * The rational is that new write operations needs to wait for the
0121      * existing read and write operations to finish.
0122      * But a new read operation only needs to wait for the existing write
0123      * operations to finish.
0124      */
0125     return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
0126 }
0127 
0128 /**
0129  * struct dma_resv - a reservation object manages fences for a buffer
0130  *
0131  * This is a container for dma_fence objects which needs to handle multiple use
0132  * cases.
0133  *
0134  * One use is to synchronize cross-driver access to a struct dma_buf, either for
0135  * dynamic buffer management or just to handle implicit synchronization between
0136  * different users of the buffer in userspace. See &dma_buf.resv for a more
0137  * in-depth discussion.
0138  *
0139  * The other major use is to manage access and locking within a driver in a
0140  * buffer based memory manager. struct ttm_buffer_object is the canonical
0141  * example here, since this is where reservation objects originated from. But
0142  * use in drivers is spreading and some drivers also manage struct
0143  * drm_gem_object with the same scheme.
0144  */
0145 struct dma_resv {
0146     /**
0147      * @lock:
0148      *
0149      * Update side lock. Don't use directly, instead use the wrapper
0150      * functions like dma_resv_lock() and dma_resv_unlock().
0151      *
0152      * Drivers which use the reservation object to manage memory dynamically
0153      * also use this lock to protect buffer object state like placement,
0154      * allocation policies or throughout command submission.
0155      */
0156     struct ww_mutex lock;
0157 
0158     /**
0159      * @fences:
0160      *
0161      * Array of fences which where added to the dma_resv object
0162      *
0163      * A new fence is added by calling dma_resv_add_fence(). Since this
0164      * often needs to be done past the point of no return in command
0165      * submission it cannot fail, and therefore sufficient slots need to be
0166      * reserved by calling dma_resv_reserve_fences().
0167      */
0168     struct dma_resv_list __rcu *fences;
0169 };
0170 
0171 /**
0172  * struct dma_resv_iter - current position into the dma_resv fences
0173  *
0174  * Don't touch this directly in the driver, use the accessor function instead.
0175  *
0176  * IMPORTANT
0177  *
0178  * When using the lockless iterators like dma_resv_iter_next_unlocked() or
0179  * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
0180  * Code which accumulates statistics or similar needs to check for this with
0181  * dma_resv_iter_is_restarted().
0182  */
0183 struct dma_resv_iter {
0184     /** @obj: The dma_resv object we iterate over */
0185     struct dma_resv *obj;
0186 
0187     /** @usage: Return fences with this usage or lower. */
0188     enum dma_resv_usage usage;
0189 
0190     /** @fence: the currently handled fence */
0191     struct dma_fence *fence;
0192 
0193     /** @fence_usage: the usage of the current fence */
0194     enum dma_resv_usage fence_usage;
0195 
0196     /** @index: index into the shared fences */
0197     unsigned int index;
0198 
0199     /** @fences: the shared fences; private, *MUST* not dereference  */
0200     struct dma_resv_list *fences;
0201 
0202     /** @num_fences: number of fences */
0203     unsigned int num_fences;
0204 
0205     /** @is_restarted: true if this is the first returned fence */
0206     bool is_restarted;
0207 };
0208 
0209 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
0210 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
0211 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
0212 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
0213 
0214 /**
0215  * dma_resv_iter_begin - initialize a dma_resv_iter object
0216  * @cursor: The dma_resv_iter object to initialize
0217  * @obj: The dma_resv object which we want to iterate over
0218  * @usage: controls which fences to include, see enum dma_resv_usage.
0219  */
0220 static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
0221                        struct dma_resv *obj,
0222                        enum dma_resv_usage usage)
0223 {
0224     cursor->obj = obj;
0225     cursor->usage = usage;
0226     cursor->fence = NULL;
0227 }
0228 
0229 /**
0230  * dma_resv_iter_end - cleanup a dma_resv_iter object
0231  * @cursor: the dma_resv_iter object which should be cleaned up
0232  *
0233  * Make sure that the reference to the fence in the cursor is properly
0234  * dropped.
0235  */
0236 static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
0237 {
0238     dma_fence_put(cursor->fence);
0239 }
0240 
0241 /**
0242  * dma_resv_iter_usage - Return the usage of the current fence
0243  * @cursor: the cursor of the current position
0244  *
0245  * Returns the usage of the currently processed fence.
0246  */
0247 static inline enum dma_resv_usage
0248 dma_resv_iter_usage(struct dma_resv_iter *cursor)
0249 {
0250     return cursor->fence_usage;
0251 }
0252 
0253 /**
0254  * dma_resv_iter_is_restarted - test if this is the first fence after a restart
0255  * @cursor: the cursor with the current position
0256  *
0257  * Return true if this is the first fence in an iteration after a restart.
0258  */
0259 static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
0260 {
0261     return cursor->is_restarted;
0262 }
0263 
0264 /**
0265  * dma_resv_for_each_fence_unlocked - unlocked fence iterator
0266  * @cursor: a struct dma_resv_iter pointer
0267  * @fence: the current fence
0268  *
0269  * Iterate over the fences in a struct dma_resv object without holding the
0270  * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
0271  * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
0272  * the iterator a reference to the dma_fence is held and the RCU lock dropped.
0273  *
0274  * Beware that the iterator can be restarted when the struct dma_resv for
0275  * @cursor is modified. Code which accumulates statistics or similar needs to
0276  * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
0277  * lock iterator dma_resv_for_each_fence() whenever possible.
0278  */
0279 #define dma_resv_for_each_fence_unlocked(cursor, fence)         \
0280     for (fence = dma_resv_iter_first_unlocked(cursor);      \
0281          fence; fence = dma_resv_iter_next_unlocked(cursor))
0282 
0283 /**
0284  * dma_resv_for_each_fence - fence iterator
0285  * @cursor: a struct dma_resv_iter pointer
0286  * @obj: a dma_resv object pointer
0287  * @usage: controls which fences to return
0288  * @fence: the current fence
0289  *
0290  * Iterate over the fences in a struct dma_resv object while holding the
0291  * &dma_resv.lock. @all_fences controls if the shared fences are returned as
0292  * well. The cursor initialisation is part of the iterator and the fence stays
0293  * valid as long as the lock is held and so no extra reference to the fence is
0294  * taken.
0295  */
0296 #define dma_resv_for_each_fence(cursor, obj, usage, fence)  \
0297     for (dma_resv_iter_begin(cursor, obj, usage),   \
0298          fence = dma_resv_iter_first(cursor); fence;    \
0299          fence = dma_resv_iter_next(cursor))
0300 
0301 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
0302 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
0303 
0304 #ifdef CONFIG_DEBUG_MUTEXES
0305 void dma_resv_reset_max_fences(struct dma_resv *obj);
0306 #else
0307 static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
0308 #endif
0309 
0310 /**
0311  * dma_resv_lock - lock the reservation object
0312  * @obj: the reservation object
0313  * @ctx: the locking context
0314  *
0315  * Locks the reservation object for exclusive access and modification. Note,
0316  * that the lock is only against other writers, readers will run concurrently
0317  * with a writer under RCU. The seqlock is used to notify readers if they
0318  * overlap with a writer.
0319  *
0320  * As the reservation object may be locked by multiple parties in an
0321  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
0322  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
0323  * object may be locked by itself by passing NULL as @ctx.
0324  *
0325  * When a die situation is indicated by returning -EDEADLK all locks held by
0326  * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
0327  *
0328  * Unlocked by calling dma_resv_unlock().
0329  *
0330  * See also dma_resv_lock_interruptible() for the interruptible variant.
0331  */
0332 static inline int dma_resv_lock(struct dma_resv *obj,
0333                 struct ww_acquire_ctx *ctx)
0334 {
0335     return ww_mutex_lock(&obj->lock, ctx);
0336 }
0337 
0338 /**
0339  * dma_resv_lock_interruptible - lock the reservation object
0340  * @obj: the reservation object
0341  * @ctx: the locking context
0342  *
0343  * Locks the reservation object interruptible for exclusive access and
0344  * modification. Note, that the lock is only against other writers, readers
0345  * will run concurrently with a writer under RCU. The seqlock is used to
0346  * notify readers if they overlap with a writer.
0347  *
0348  * As the reservation object may be locked by multiple parties in an
0349  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
0350  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
0351  * object may be locked by itself by passing NULL as @ctx.
0352  *
0353  * When a die situation is indicated by returning -EDEADLK all locks held by
0354  * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
0355  * @obj.
0356  *
0357  * Unlocked by calling dma_resv_unlock().
0358  */
0359 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
0360                           struct ww_acquire_ctx *ctx)
0361 {
0362     return ww_mutex_lock_interruptible(&obj->lock, ctx);
0363 }
0364 
0365 /**
0366  * dma_resv_lock_slow - slowpath lock the reservation object
0367  * @obj: the reservation object
0368  * @ctx: the locking context
0369  *
0370  * Acquires the reservation object after a die case. This function
0371  * will sleep until the lock becomes available. See dma_resv_lock() as
0372  * well.
0373  *
0374  * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
0375  */
0376 static inline void dma_resv_lock_slow(struct dma_resv *obj,
0377                       struct ww_acquire_ctx *ctx)
0378 {
0379     ww_mutex_lock_slow(&obj->lock, ctx);
0380 }
0381 
0382 /**
0383  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
0384  * object, interruptible
0385  * @obj: the reservation object
0386  * @ctx: the locking context
0387  *
0388  * Acquires the reservation object interruptible after a die case. This function
0389  * will sleep until the lock becomes available. See
0390  * dma_resv_lock_interruptible() as well.
0391  */
0392 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
0393                            struct ww_acquire_ctx *ctx)
0394 {
0395     return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
0396 }
0397 
0398 /**
0399  * dma_resv_trylock - trylock the reservation object
0400  * @obj: the reservation object
0401  *
0402  * Tries to lock the reservation object for exclusive access and modification.
0403  * Note, that the lock is only against other writers, readers will run
0404  * concurrently with a writer under RCU. The seqlock is used to notify readers
0405  * if they overlap with a writer.
0406  *
0407  * Also note that since no context is provided, no deadlock protection is
0408  * possible, which is also not needed for a trylock.
0409  *
0410  * Returns true if the lock was acquired, false otherwise.
0411  */
0412 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
0413 {
0414     return ww_mutex_trylock(&obj->lock, NULL);
0415 }
0416 
0417 /**
0418  * dma_resv_is_locked - is the reservation object locked
0419  * @obj: the reservation object
0420  *
0421  * Returns true if the mutex is locked, false if unlocked.
0422  */
0423 static inline bool dma_resv_is_locked(struct dma_resv *obj)
0424 {
0425     return ww_mutex_is_locked(&obj->lock);
0426 }
0427 
0428 /**
0429  * dma_resv_locking_ctx - returns the context used to lock the object
0430  * @obj: the reservation object
0431  *
0432  * Returns the context used to lock a reservation object or NULL if no context
0433  * was used or the object is not locked at all.
0434  *
0435  * WARNING: This interface is pretty horrible, but TTM needs it because it
0436  * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
0437  * Everyone else just uses it to check whether they're holding a reservation or
0438  * not.
0439  */
0440 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
0441 {
0442     return READ_ONCE(obj->lock.ctx);
0443 }
0444 
0445 /**
0446  * dma_resv_unlock - unlock the reservation object
0447  * @obj: the reservation object
0448  *
0449  * Unlocks the reservation object following exclusive access.
0450  */
0451 static inline void dma_resv_unlock(struct dma_resv *obj)
0452 {
0453     dma_resv_reset_max_fences(obj);
0454     ww_mutex_unlock(&obj->lock);
0455 }
0456 
0457 void dma_resv_init(struct dma_resv *obj);
0458 void dma_resv_fini(struct dma_resv *obj);
0459 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
0460 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
0461             enum dma_resv_usage usage);
0462 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
0463                  struct dma_fence *fence,
0464                  enum dma_resv_usage usage);
0465 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
0466             unsigned int *num_fences, struct dma_fence ***fences);
0467 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
0468                struct dma_fence **fence);
0469 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
0470 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
0471                bool intr, unsigned long timeout);
0472 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
0473 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
0474 
0475 #endif /* _LINUX_RESERVATION_H */