Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * rtmutex API
0004  */
0005 #include <linux/spinlock.h>
0006 #include <linux/export.h>
0007 
0008 #define RT_MUTEX_BUILD_MUTEX
0009 #define WW_RT
0010 #include "rtmutex.c"
0011 
0012 int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
0013 {
0014     struct rt_mutex *rtm = &lock->base;
0015 
0016     if (!ww_ctx)
0017         return rt_mutex_trylock(rtm);
0018 
0019     /*
0020      * Reset the wounded flag after a kill. No other process can
0021      * race and wound us here, since they can't have a valid owner
0022      * pointer if we don't have any locks held.
0023      */
0024     if (ww_ctx->acquired == 0)
0025         ww_ctx->wounded = 0;
0026 
0027     if (__rt_mutex_trylock(&rtm->rtmutex)) {
0028         ww_mutex_set_context_fastpath(lock, ww_ctx);
0029         mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
0030         return 1;
0031     }
0032 
0033     return 0;
0034 }
0035 EXPORT_SYMBOL(ww_mutex_trylock);
0036 
0037 static int __sched
0038 __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
0039            unsigned int state, unsigned long ip)
0040 {
0041     struct lockdep_map __maybe_unused *nest_lock = NULL;
0042     struct rt_mutex *rtm = &lock->base;
0043     int ret;
0044 
0045     might_sleep();
0046 
0047     if (ww_ctx) {
0048         if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
0049             return -EALREADY;
0050 
0051         /*
0052          * Reset the wounded flag after a kill. No other process can
0053          * race and wound us here, since they can't have a valid owner
0054          * pointer if we don't have any locks held.
0055          */
0056         if (ww_ctx->acquired == 0)
0057             ww_ctx->wounded = 0;
0058 
0059 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0060         nest_lock = &ww_ctx->dep_map;
0061 #endif
0062     }
0063     mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
0064 
0065     if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
0066         if (ww_ctx)
0067             ww_mutex_set_context_fastpath(lock, ww_ctx);
0068         return 0;
0069     }
0070 
0071     ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
0072 
0073     if (ret)
0074         mutex_release(&rtm->dep_map, ip);
0075     return ret;
0076 }
0077 
0078 int __sched
0079 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
0080 {
0081     return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
0082 }
0083 EXPORT_SYMBOL(ww_mutex_lock);
0084 
0085 int __sched
0086 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
0087 {
0088     return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
0089 }
0090 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
0091 
0092 void __sched ww_mutex_unlock(struct ww_mutex *lock)
0093 {
0094     struct rt_mutex *rtm = &lock->base;
0095 
0096     __ww_mutex_unlock(lock);
0097 
0098     mutex_release(&rtm->dep_map, _RET_IP_);
0099     __rt_mutex_unlock(&rtm->rtmutex);
0100 }
0101 EXPORT_SYMBOL(ww_mutex_unlock);