Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Hardware spinlock public header
0004  *
0005  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
0006  *
0007  * Contact: Ohad Ben-Cohen <ohad@wizery.com>
0008  */
0009 
0010 #ifndef __LINUX_HWSPINLOCK_H
0011 #define __LINUX_HWSPINLOCK_H
0012 
0013 #include <linux/err.h>
0014 #include <linux/sched.h>
0015 
0016 /* hwspinlock mode argument */
0017 #define HWLOCK_IRQSTATE     0x01 /* Disable interrupts, save state */
0018 #define HWLOCK_IRQ      0x02 /* Disable interrupts, don't save state */
0019 #define HWLOCK_RAW      0x03
0020 #define HWLOCK_IN_ATOMIC    0x04 /* Called while in atomic context */
0021 
0022 struct device;
0023 struct device_node;
0024 struct hwspinlock;
0025 struct hwspinlock_device;
0026 struct hwspinlock_ops;
0027 
0028 /**
0029  * struct hwspinlock_pdata - platform data for hwspinlock drivers
0030  * @base_id: base id for this hwspinlock device
0031  *
0032  * hwspinlock devices provide system-wide hardware locks that are used
0033  * by remote processors that have no other way to achieve synchronization.
0034  *
0035  * To achieve that, each physical lock must have a system-wide id number
0036  * that is agreed upon, otherwise remote processors can't possibly assume
0037  * they're using the same hardware lock.
0038  *
0039  * Usually boards have a single hwspinlock device, which provides several
0040  * hwspinlocks, and in this case, they can be trivially numbered 0 to
0041  * (num-of-locks - 1).
0042  *
0043  * In case boards have several hwspinlocks devices, a different base id
0044  * should be used for each hwspinlock device (they can't all use 0 as
0045  * a starting id!).
0046  *
0047  * This platform data structure should be used to provide the base id
0048  * for each device (which is trivially 0 when only a single hwspinlock
0049  * device exists). It can be shared between different platforms, hence
0050  * its location.
0051  */
0052 struct hwspinlock_pdata {
0053     int base_id;
0054 };
0055 
0056 #ifdef CONFIG_HWSPINLOCK
0057 
0058 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
0059         const struct hwspinlock_ops *ops, int base_id, int num_locks);
0060 int hwspin_lock_unregister(struct hwspinlock_device *bank);
0061 struct hwspinlock *hwspin_lock_request(void);
0062 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
0063 int hwspin_lock_free(struct hwspinlock *hwlock);
0064 int of_hwspin_lock_get_id(struct device_node *np, int index);
0065 int hwspin_lock_get_id(struct hwspinlock *hwlock);
0066 int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
0067                             unsigned long *);
0068 int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
0069 void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
0070 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
0071 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
0072 struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
0073 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
0074                              unsigned int id);
0075 int devm_hwspin_lock_unregister(struct device *dev,
0076                 struct hwspinlock_device *bank);
0077 int devm_hwspin_lock_register(struct device *dev,
0078                   struct hwspinlock_device *bank,
0079                   const struct hwspinlock_ops *ops,
0080                   int base_id, int num_locks);
0081 
0082 #else /* !CONFIG_HWSPINLOCK */
0083 
0084 /*
0085  * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
0086  * enabled. We prefer to silently succeed in this case, and let the
0087  * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
0088  * required on a given setup, users will still work.
0089  *
0090  * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
0091  * we _do_ want users to fail (no point in registering hwspinlock instances if
0092  * the framework is not available).
0093  *
0094  * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
0095  * users. Others, which care, can still check this with IS_ERR.
0096  */
0097 static inline struct hwspinlock *hwspin_lock_request(void)
0098 {
0099     return ERR_PTR(-ENODEV);
0100 }
0101 
0102 static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
0103 {
0104     return ERR_PTR(-ENODEV);
0105 }
0106 
0107 static inline int hwspin_lock_free(struct hwspinlock *hwlock)
0108 {
0109     return 0;
0110 }
0111 
0112 static inline
0113 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
0114                     int mode, unsigned long *flags)
0115 {
0116     return 0;
0117 }
0118 
0119 static inline
0120 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
0121 {
0122     return 0;
0123 }
0124 
0125 static inline
0126 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
0127 {
0128 }
0129 
0130 static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
0131 {
0132     return 0;
0133 }
0134 
0135 static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
0136 {
0137     return 0;
0138 }
0139 
0140 static inline
0141 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
0142 {
0143     return 0;
0144 }
0145 
0146 static inline
0147 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
0148 {
0149     return 0;
0150 }
0151 
0152 static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
0153 {
0154     return ERR_PTR(-ENODEV);
0155 }
0156 
0157 static inline
0158 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
0159                              unsigned int id)
0160 {
0161     return ERR_PTR(-ENODEV);
0162 }
0163 
0164 #endif /* !CONFIG_HWSPINLOCK */
0165 
0166 /**
0167  * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
0168  * @hwlock: an hwspinlock which we want to trylock
0169  * @flags: a pointer to where the caller's interrupt state will be saved at
0170  *
0171  * This function attempts to lock the underlying hwspinlock, and will
0172  * immediately fail if the hwspinlock is already locked.
0173  *
0174  * Upon a successful return from this function, preemption and local
0175  * interrupts are disabled (previous interrupts state is saved at @flags),
0176  * so the caller must not sleep, and is advised to release the hwspinlock
0177  * as soon as possible.
0178  *
0179  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
0180  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
0181  */
0182 static inline
0183 int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
0184 {
0185     return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
0186 }
0187 
0188 /**
0189  * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
0190  * @hwlock: an hwspinlock which we want to trylock
0191  *
0192  * This function attempts to lock the underlying hwspinlock, and will
0193  * immediately fail if the hwspinlock is already locked.
0194  *
0195  * Upon a successful return from this function, preemption and local
0196  * interrupts are disabled, so the caller must not sleep, and is advised
0197  * to release the hwspinlock as soon as possible.
0198  *
0199  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
0200  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
0201  */
0202 static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
0203 {
0204     return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
0205 }
0206 
0207 /**
0208  * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
0209  * @hwlock: an hwspinlock which we want to trylock
0210  *
0211  * This function attempts to lock an hwspinlock, and will immediately fail
0212  * if the hwspinlock is already taken.
0213  *
0214  * Caution: User must protect the routine of getting hardware lock with mutex
0215  * or spinlock to avoid dead-lock, that will let user can do some time-consuming
0216  * or sleepable operations under the hardware lock.
0217  *
0218  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
0219  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
0220  */
0221 static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
0222 {
0223     return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
0224 }
0225 
0226 /**
0227  * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
0228  * @hwlock: an hwspinlock which we want to trylock
0229  *
0230  * This function attempts to lock an hwspinlock, and will immediately fail
0231  * if the hwspinlock is already taken.
0232  *
0233  * This function shall be called only from an atomic context.
0234  *
0235  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
0236  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
0237  */
0238 static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
0239 {
0240     return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
0241 }
0242 
0243 /**
0244  * hwspin_trylock() - attempt to lock a specific hwspinlock
0245  * @hwlock: an hwspinlock which we want to trylock
0246  *
0247  * This function attempts to lock an hwspinlock, and will immediately fail
0248  * if the hwspinlock is already taken.
0249  *
0250  * Upon a successful return from this function, preemption is disabled,
0251  * so the caller must not sleep, and is advised to release the hwspinlock
0252  * as soon as possible. This is required in order to minimize remote cores
0253  * polling on the hardware interconnect.
0254  *
0255  * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
0256  * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
0257  */
0258 static inline int hwspin_trylock(struct hwspinlock *hwlock)
0259 {
0260     return __hwspin_trylock(hwlock, 0, NULL);
0261 }
0262 
0263 /**
0264  * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
0265  * @hwlock: the hwspinlock to be locked
0266  * @to: timeout value in msecs
0267  * @flags: a pointer to where the caller's interrupt state will be saved at
0268  *
0269  * This function locks the underlying @hwlock. If the @hwlock
0270  * is already taken, the function will busy loop waiting for it to
0271  * be released, but give up when @timeout msecs have elapsed.
0272  *
0273  * Upon a successful return from this function, preemption and local interrupts
0274  * are disabled (plus previous interrupt state is saved), so the caller must
0275  * not sleep, and is advised to release the hwspinlock as soon as possible.
0276  *
0277  * Returns 0 when the @hwlock was successfully taken, and an appropriate
0278  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
0279  * busy after @timeout msecs). The function will never sleep.
0280  */
0281 static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
0282                 unsigned int to, unsigned long *flags)
0283 {
0284     return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
0285 }
0286 
0287 /**
0288  * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
0289  * @hwlock: the hwspinlock to be locked
0290  * @to: timeout value in msecs
0291  *
0292  * This function locks the underlying @hwlock. If the @hwlock
0293  * is already taken, the function will busy loop waiting for it to
0294  * be released, but give up when @timeout msecs have elapsed.
0295  *
0296  * Upon a successful return from this function, preemption and local interrupts
0297  * are disabled so the caller must not sleep, and is advised to release the
0298  * hwspinlock as soon as possible.
0299  *
0300  * Returns 0 when the @hwlock was successfully taken, and an appropriate
0301  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
0302  * busy after @timeout msecs). The function will never sleep.
0303  */
0304 static inline
0305 int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
0306 {
0307     return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
0308 }
0309 
0310 /**
0311  * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
0312  * @hwlock: the hwspinlock to be locked
0313  * @to: timeout value in msecs
0314  *
0315  * This function locks the underlying @hwlock. If the @hwlock
0316  * is already taken, the function will busy loop waiting for it to
0317  * be released, but give up when @timeout msecs have elapsed.
0318  *
0319  * Caution: User must protect the routine of getting hardware lock with mutex
0320  * or spinlock to avoid dead-lock, that will let user can do some time-consuming
0321  * or sleepable operations under the hardware lock.
0322  *
0323  * Returns 0 when the @hwlock was successfully taken, and an appropriate
0324  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
0325  * busy after @timeout msecs). The function will never sleep.
0326  */
0327 static inline
0328 int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
0329 {
0330     return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
0331 }
0332 
0333 /**
0334  * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
0335  * @hwlock: the hwspinlock to be locked
0336  * @to: timeout value in msecs
0337  *
0338  * This function locks the underlying @hwlock. If the @hwlock
0339  * is already taken, the function will busy loop waiting for it to
0340  * be released, but give up when @timeout msecs have elapsed.
0341  *
0342  * This function shall be called only from an atomic context and the timeout
0343  * value shall not exceed a few msecs.
0344  *
0345  * Returns 0 when the @hwlock was successfully taken, and an appropriate
0346  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
0347  * busy after @timeout msecs). The function will never sleep.
0348  */
0349 static inline
0350 int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
0351 {
0352     return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
0353 }
0354 
0355 /**
0356  * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
0357  * @hwlock: the hwspinlock to be locked
0358  * @to: timeout value in msecs
0359  *
0360  * This function locks the underlying @hwlock. If the @hwlock
0361  * is already taken, the function will busy loop waiting for it to
0362  * be released, but give up when @timeout msecs have elapsed.
0363  *
0364  * Upon a successful return from this function, preemption is disabled
0365  * so the caller must not sleep, and is advised to release the hwspinlock
0366  * as soon as possible.
0367  * This is required in order to minimize remote cores polling on the
0368  * hardware interconnect.
0369  *
0370  * Returns 0 when the @hwlock was successfully taken, and an appropriate
0371  * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
0372  * busy after @timeout msecs). The function will never sleep.
0373  */
0374 static inline
0375 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
0376 {
0377     return __hwspin_lock_timeout(hwlock, to, 0, NULL);
0378 }
0379 
0380 /**
0381  * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
0382  * @hwlock: a previously-acquired hwspinlock which we want to unlock
0383  * @flags: previous caller's interrupt state to restore
0384  *
0385  * This function will unlock a specific hwspinlock, enable preemption and
0386  * restore the previous state of the local interrupts. It should be used
0387  * to undo, e.g., hwspin_trylock_irqsave().
0388  *
0389  * @hwlock must be already locked before calling this function: it is a bug
0390  * to call unlock on a @hwlock that is already unlocked.
0391  */
0392 static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
0393                             unsigned long *flags)
0394 {
0395     __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
0396 }
0397 
0398 /**
0399  * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
0400  * @hwlock: a previously-acquired hwspinlock which we want to unlock
0401  *
0402  * This function will unlock a specific hwspinlock, enable preemption and
0403  * enable local interrupts. Should be used to undo hwspin_lock_irq().
0404  *
0405  * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
0406  * calling this function: it is a bug to call unlock on a @hwlock that is
0407  * already unlocked.
0408  */
0409 static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
0410 {
0411     __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
0412 }
0413 
0414 /**
0415  * hwspin_unlock_raw() - unlock hwspinlock
0416  * @hwlock: a previously-acquired hwspinlock which we want to unlock
0417  *
0418  * This function will unlock a specific hwspinlock.
0419  *
0420  * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
0421  * this function: it is a bug to call unlock on a @hwlock that is already
0422  * unlocked.
0423  */
0424 static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
0425 {
0426     __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
0427 }
0428 
0429 /**
0430  * hwspin_unlock_in_atomic() - unlock hwspinlock
0431  * @hwlock: a previously-acquired hwspinlock which we want to unlock
0432  *
0433  * This function will unlock a specific hwspinlock.
0434  *
0435  * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
0436  * this function: it is a bug to call unlock on a @hwlock that is already
0437  * unlocked.
0438  */
0439 static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
0440 {
0441     __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
0442 }
0443 
0444 /**
0445  * hwspin_unlock() - unlock hwspinlock
0446  * @hwlock: a previously-acquired hwspinlock which we want to unlock
0447  *
0448  * This function will unlock a specific hwspinlock and enable preemption
0449  * back.
0450  *
0451  * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
0452  * this function: it is a bug to call unlock on a @hwlock that is already
0453  * unlocked.
0454  */
0455 static inline void hwspin_unlock(struct hwspinlock *hwlock)
0456 {
0457     __hwspin_unlock(hwlock, 0, NULL);
0458 }
0459 
0460 #endif /* __LINUX_HWSPINLOCK_H */