Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * rtmutex API
0004  */
0005 #include <linux/spinlock.h>
0006 #include <linux/export.h>
0007 
0008 #define RT_MUTEX_BUILD_MUTEX
0009 #include "rtmutex.c"
0010 
0011 /*
0012  * Max number of times we'll walk the boosting chain:
0013  */
0014 int max_lock_depth = 1024;
0015 
0016 /*
0017  * Debug aware fast / slowpath lock,trylock,unlock
0018  *
0019  * The atomic acquire/release ops are compiled away, when either the
0020  * architecture does not support cmpxchg or when debugging is enabled.
0021  */
0022 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
0023                           unsigned int state,
0024                           struct lockdep_map *nest_lock,
0025                           unsigned int subclass)
0026 {
0027     int ret;
0028 
0029     might_sleep();
0030     mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
0031     ret = __rt_mutex_lock(&lock->rtmutex, state);
0032     if (ret)
0033         mutex_release(&lock->dep_map, _RET_IP_);
0034     return ret;
0035 }
0036 
0037 void rt_mutex_base_init(struct rt_mutex_base *rtb)
0038 {
0039     __rt_mutex_base_init(rtb);
0040 }
0041 EXPORT_SYMBOL(rt_mutex_base_init);
0042 
0043 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0044 /**
0045  * rt_mutex_lock_nested - lock a rt_mutex
0046  *
0047  * @lock: the rt_mutex to be locked
0048  * @subclass: the lockdep subclass
0049  */
0050 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
0051 {
0052     __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
0053 }
0054 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
0055 
0056 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
0057 {
0058     __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
0059 }
0060 EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
0061 
0062 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
0063 
0064 /**
0065  * rt_mutex_lock - lock a rt_mutex
0066  *
0067  * @lock: the rt_mutex to be locked
0068  */
0069 void __sched rt_mutex_lock(struct rt_mutex *lock)
0070 {
0071     __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
0072 }
0073 EXPORT_SYMBOL_GPL(rt_mutex_lock);
0074 #endif
0075 
0076 /**
0077  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
0078  *
0079  * @lock:       the rt_mutex to be locked
0080  *
0081  * Returns:
0082  *  0       on success
0083  * -EINTR   when interrupted by a signal
0084  */
0085 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
0086 {
0087     return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
0088 }
0089 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
0090 
0091 /**
0092  * rt_mutex_lock_killable - lock a rt_mutex killable
0093  *
0094  * @lock:       the rt_mutex to be locked
0095  *
0096  * Returns:
0097  *  0       on success
0098  * -EINTR   when interrupted by a signal
0099  */
0100 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
0101 {
0102     return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
0103 }
0104 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
0105 
0106 /**
0107  * rt_mutex_trylock - try to lock a rt_mutex
0108  *
0109  * @lock:   the rt_mutex to be locked
0110  *
0111  * This function can only be called in thread context. It's safe to call it
0112  * from atomic regions, but not from hard or soft interrupt context.
0113  *
0114  * Returns:
0115  *  1 on success
0116  *  0 on contention
0117  */
0118 int __sched rt_mutex_trylock(struct rt_mutex *lock)
0119 {
0120     int ret;
0121 
0122     if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
0123         return 0;
0124 
0125     ret = __rt_mutex_trylock(&lock->rtmutex);
0126     if (ret)
0127         mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0128 
0129     return ret;
0130 }
0131 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
0132 
0133 /**
0134  * rt_mutex_unlock - unlock a rt_mutex
0135  *
0136  * @lock: the rt_mutex to be unlocked
0137  */
0138 void __sched rt_mutex_unlock(struct rt_mutex *lock)
0139 {
0140     mutex_release(&lock->dep_map, _RET_IP_);
0141     __rt_mutex_unlock(&lock->rtmutex);
0142 }
0143 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
0144 
0145 /*
0146  * Futex variants, must not use fastpath.
0147  */
0148 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
0149 {
0150     return rt_mutex_slowtrylock(lock);
0151 }
0152 
0153 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
0154 {
0155     return __rt_mutex_slowtrylock(lock);
0156 }
0157 
0158 /**
0159  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
0160  * do not use the fast-path, can be simple and will not need to retry.
0161  *
0162  * @lock:   The rt_mutex to be unlocked
0163  * @wqh:    The wake queue head from which to get the next lock waiter
0164  */
0165 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
0166                      struct rt_wake_q_head *wqh)
0167 {
0168     lockdep_assert_held(&lock->wait_lock);
0169 
0170     debug_rt_mutex_unlock(lock);
0171 
0172     if (!rt_mutex_has_waiters(lock)) {
0173         lock->owner = NULL;
0174         return false; /* done */
0175     }
0176 
0177     /*
0178      * We've already deboosted, mark_wakeup_next_waiter() will
0179      * retain preempt_disabled when we drop the wait_lock, to
0180      * avoid inversion prior to the wakeup.  preempt_disable()
0181      * therein pairs with rt_mutex_postunlock().
0182      */
0183     mark_wakeup_next_waiter(wqh, lock);
0184 
0185     return true; /* call postunlock() */
0186 }
0187 
0188 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
0189 {
0190     DEFINE_RT_WAKE_Q(wqh);
0191     unsigned long flags;
0192     bool postunlock;
0193 
0194     raw_spin_lock_irqsave(&lock->wait_lock, flags);
0195     postunlock = __rt_mutex_futex_unlock(lock, &wqh);
0196     raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
0197 
0198     if (postunlock)
0199         rt_mutex_postunlock(&wqh);
0200 }
0201 
0202 /**
0203  * __rt_mutex_init - initialize the rt_mutex
0204  *
0205  * @lock:   The rt_mutex to be initialized
0206  * @name:   The lock name used for debugging
0207  * @key:    The lock class key used for debugging
0208  *
0209  * Initialize the rt_mutex to unlocked state.
0210  *
0211  * Initializing of a locked rt_mutex is not allowed
0212  */
0213 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
0214                  struct lock_class_key *key)
0215 {
0216     debug_check_no_locks_freed((void *)lock, sizeof(*lock));
0217     __rt_mutex_base_init(&lock->rtmutex);
0218     lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
0219 }
0220 EXPORT_SYMBOL_GPL(__rt_mutex_init);
0221 
0222 /**
0223  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
0224  *              proxy owner
0225  *
0226  * @lock:   the rt_mutex to be locked
0227  * @proxy_owner:the task to set as owner
0228  *
0229  * No locking. Caller has to do serializing itself
0230  *
0231  * Special API call for PI-futex support. This initializes the rtmutex and
0232  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
0233  * possible at this point because the pi_state which contains the rtmutex
0234  * is not yet visible to other tasks.
0235  */
0236 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
0237                     struct task_struct *proxy_owner)
0238 {
0239     static struct lock_class_key pi_futex_key;
0240 
0241     __rt_mutex_base_init(lock);
0242     /*
0243      * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
0244      * and rtmutex based. That causes a lockdep false positive, because
0245      * some of the futex functions invoke spin_unlock(&hb->lock) with
0246      * the wait_lock of the rtmutex associated to the pi_futex held.
0247      * spin_unlock() in turn takes wait_lock of the rtmutex on which
0248      * the spinlock is based, which makes lockdep notice a lock
0249      * recursion. Give the futex/rtmutex wait_lock a separate key.
0250      */
0251     lockdep_set_class(&lock->wait_lock, &pi_futex_key);
0252     rt_mutex_set_owner(lock, proxy_owner);
0253 }
0254 
0255 /**
0256  * rt_mutex_proxy_unlock - release a lock on behalf of owner
0257  *
0258  * @lock:   the rt_mutex to be locked
0259  *
0260  * No locking. Caller has to do serializing itself
0261  *
0262  * Special API call for PI-futex support. This just cleans up the rtmutex
0263  * (debugging) state. Concurrent operations on this rt_mutex are not
0264  * possible because it belongs to the pi_state which is about to be freed
0265  * and it is not longer visible to other tasks.
0266  */
0267 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
0268 {
0269     debug_rt_mutex_proxy_unlock(lock);
0270     rt_mutex_set_owner(lock, NULL);
0271 }
0272 
0273 /**
0274  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
0275  * @lock:       the rt_mutex to take
0276  * @waiter:     the pre-initialized rt_mutex_waiter
0277  * @task:       the task to prepare
0278  *
0279  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
0280  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
0281  *
0282  * NOTE: does _NOT_ remove the @waiter on failure; must either call
0283  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
0284  *
0285  * Returns:
0286  *  0 - task blocked on lock
0287  *  1 - acquired the lock for task, caller should wake it up
0288  * <0 - error
0289  *
0290  * Special API call for PI-futex support.
0291  */
0292 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
0293                     struct rt_mutex_waiter *waiter,
0294                     struct task_struct *task)
0295 {
0296     int ret;
0297 
0298     lockdep_assert_held(&lock->wait_lock);
0299 
0300     if (try_to_take_rt_mutex(lock, task, NULL))
0301         return 1;
0302 
0303     /* We enforce deadlock detection for futexes */
0304     ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
0305                       RT_MUTEX_FULL_CHAINWALK);
0306 
0307     if (ret && !rt_mutex_owner(lock)) {
0308         /*
0309          * Reset the return value. We might have
0310          * returned with -EDEADLK and the owner
0311          * released the lock while we were walking the
0312          * pi chain.  Let the waiter sort it out.
0313          */
0314         ret = 0;
0315     }
0316 
0317     return ret;
0318 }
0319 
0320 /**
0321  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
0322  * @lock:       the rt_mutex to take
0323  * @waiter:     the pre-initialized rt_mutex_waiter
0324  * @task:       the task to prepare
0325  *
0326  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
0327  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
0328  *
0329  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
0330  * on failure.
0331  *
0332  * Returns:
0333  *  0 - task blocked on lock
0334  *  1 - acquired the lock for task, caller should wake it up
0335  * <0 - error
0336  *
0337  * Special API call for PI-futex support.
0338  */
0339 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
0340                       struct rt_mutex_waiter *waiter,
0341                       struct task_struct *task)
0342 {
0343     int ret;
0344 
0345     raw_spin_lock_irq(&lock->wait_lock);
0346     ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
0347     if (unlikely(ret))
0348         remove_waiter(lock, waiter);
0349     raw_spin_unlock_irq(&lock->wait_lock);
0350 
0351     return ret;
0352 }
0353 
0354 /**
0355  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
0356  * @lock:       the rt_mutex we were woken on
0357  * @to:         the timeout, null if none. hrtimer should already have
0358  *          been started.
0359  * @waiter:     the pre-initialized rt_mutex_waiter
0360  *
0361  * Wait for the lock acquisition started on our behalf by
0362  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
0363  * rt_mutex_cleanup_proxy_lock().
0364  *
0365  * Returns:
0366  *  0 - success
0367  * <0 - error, one of -EINTR, -ETIMEDOUT
0368  *
0369  * Special API call for PI-futex support
0370  */
0371 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
0372                      struct hrtimer_sleeper *to,
0373                      struct rt_mutex_waiter *waiter)
0374 {
0375     int ret;
0376 
0377     raw_spin_lock_irq(&lock->wait_lock);
0378     /* sleep on the mutex */
0379     set_current_state(TASK_INTERRUPTIBLE);
0380     ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
0381     /*
0382      * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
0383      * have to fix that up.
0384      */
0385     fixup_rt_mutex_waiters(lock);
0386     raw_spin_unlock_irq(&lock->wait_lock);
0387 
0388     return ret;
0389 }
0390 
0391 /**
0392  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
0393  * @lock:       the rt_mutex we were woken on
0394  * @waiter:     the pre-initialized rt_mutex_waiter
0395  *
0396  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
0397  * rt_mutex_wait_proxy_lock().
0398  *
0399  * Unless we acquired the lock; we're still enqueued on the wait-list and can
0400  * in fact still be granted ownership until we're removed. Therefore we can
0401  * find we are in fact the owner and must disregard the
0402  * rt_mutex_wait_proxy_lock() failure.
0403  *
0404  * Returns:
0405  *  true  - did the cleanup, we done.
0406  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
0407  *          caller should disregards its return value.
0408  *
0409  * Special API call for PI-futex support
0410  */
0411 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
0412                      struct rt_mutex_waiter *waiter)
0413 {
0414     bool cleanup = false;
0415 
0416     raw_spin_lock_irq(&lock->wait_lock);
0417     /*
0418      * Do an unconditional try-lock, this deals with the lock stealing
0419      * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
0420      * sets a NULL owner.
0421      *
0422      * We're not interested in the return value, because the subsequent
0423      * test on rt_mutex_owner() will infer that. If the trylock succeeded,
0424      * we will own the lock and it will have removed the waiter. If we
0425      * failed the trylock, we're still not owner and we need to remove
0426      * ourselves.
0427      */
0428     try_to_take_rt_mutex(lock, current, waiter);
0429     /*
0430      * Unless we're the owner; we're still enqueued on the wait_list.
0431      * So check if we became owner, if not, take us off the wait_list.
0432      */
0433     if (rt_mutex_owner(lock) != current) {
0434         remove_waiter(lock, waiter);
0435         cleanup = true;
0436     }
0437     /*
0438      * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
0439      * have to fix that up.
0440      */
0441     fixup_rt_mutex_waiters(lock);
0442 
0443     raw_spin_unlock_irq(&lock->wait_lock);
0444 
0445     return cleanup;
0446 }
0447 
0448 /*
0449  * Recheck the pi chain, in case we got a priority setting
0450  *
0451  * Called from sched_setscheduler
0452  */
0453 void __sched rt_mutex_adjust_pi(struct task_struct *task)
0454 {
0455     struct rt_mutex_waiter *waiter;
0456     struct rt_mutex_base *next_lock;
0457     unsigned long flags;
0458 
0459     raw_spin_lock_irqsave(&task->pi_lock, flags);
0460 
0461     waiter = task->pi_blocked_on;
0462     if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
0463         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
0464         return;
0465     }
0466     next_lock = waiter->lock;
0467     raw_spin_unlock_irqrestore(&task->pi_lock, flags);
0468 
0469     /* gets dropped in rt_mutex_adjust_prio_chain()! */
0470     get_task_struct(task);
0471 
0472     rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
0473                    next_lock, NULL, task);
0474 }
0475 
0476 /*
0477  * Performs the wakeup of the top-waiter and re-enables preemption.
0478  */
0479 void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
0480 {
0481     rt_mutex_wake_up_q(wqh);
0482 }
0483 
0484 #ifdef CONFIG_DEBUG_RT_MUTEXES
0485 void rt_mutex_debug_task_free(struct task_struct *task)
0486 {
0487     DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
0488     DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
0489 }
0490 #endif
0491 
0492 #ifdef CONFIG_PREEMPT_RT
0493 /* Mutexes */
0494 void __mutex_rt_init(struct mutex *mutex, const char *name,
0495              struct lock_class_key *key)
0496 {
0497     debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
0498     lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
0499 }
0500 EXPORT_SYMBOL(__mutex_rt_init);
0501 
0502 static __always_inline int __mutex_lock_common(struct mutex *lock,
0503                            unsigned int state,
0504                            unsigned int subclass,
0505                            struct lockdep_map *nest_lock,
0506                            unsigned long ip)
0507 {
0508     int ret;
0509 
0510     might_sleep();
0511     mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
0512     ret = __rt_mutex_lock(&lock->rtmutex, state);
0513     if (ret)
0514         mutex_release(&lock->dep_map, ip);
0515     else
0516         lock_acquired(&lock->dep_map, ip);
0517     return ret;
0518 }
0519 
0520 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0521 void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
0522 {
0523     __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
0524 }
0525 EXPORT_SYMBOL_GPL(mutex_lock_nested);
0526 
0527 void __sched _mutex_lock_nest_lock(struct mutex *lock,
0528                    struct lockdep_map *nest_lock)
0529 {
0530     __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
0531 }
0532 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
0533 
0534 int __sched mutex_lock_interruptible_nested(struct mutex *lock,
0535                         unsigned int subclass)
0536 {
0537     return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
0538 }
0539 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
0540 
0541 int __sched mutex_lock_killable_nested(struct mutex *lock,
0542                         unsigned int subclass)
0543 {
0544     return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
0545 }
0546 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
0547 
0548 void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
0549 {
0550     int token;
0551 
0552     might_sleep();
0553 
0554     token = io_schedule_prepare();
0555     __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
0556     io_schedule_finish(token);
0557 }
0558 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
0559 
0560 #else /* CONFIG_DEBUG_LOCK_ALLOC */
0561 
0562 void __sched mutex_lock(struct mutex *lock)
0563 {
0564     __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
0565 }
0566 EXPORT_SYMBOL(mutex_lock);
0567 
0568 int __sched mutex_lock_interruptible(struct mutex *lock)
0569 {
0570     return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
0571 }
0572 EXPORT_SYMBOL(mutex_lock_interruptible);
0573 
0574 int __sched mutex_lock_killable(struct mutex *lock)
0575 {
0576     return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
0577 }
0578 EXPORT_SYMBOL(mutex_lock_killable);
0579 
0580 void __sched mutex_lock_io(struct mutex *lock)
0581 {
0582     int token = io_schedule_prepare();
0583 
0584     __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
0585     io_schedule_finish(token);
0586 }
0587 EXPORT_SYMBOL(mutex_lock_io);
0588 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
0589 
0590 int __sched mutex_trylock(struct mutex *lock)
0591 {
0592     int ret;
0593 
0594     if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
0595         return 0;
0596 
0597     ret = __rt_mutex_trylock(&lock->rtmutex);
0598     if (ret)
0599         mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0600 
0601     return ret;
0602 }
0603 EXPORT_SYMBOL(mutex_trylock);
0604 
0605 void __sched mutex_unlock(struct mutex *lock)
0606 {
0607     mutex_release(&lock->dep_map, _RET_IP_);
0608     __rt_mutex_unlock(&lock->rtmutex);
0609 }
0610 EXPORT_SYMBOL(mutex_unlock);
0611 
0612 #endif /* CONFIG_PREEMPT_RT */