Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Hardware spinlock framework
0004  *
0005  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
0006  *
0007  * Contact: Ohad Ben-Cohen <ohad@wizery.com>
0008  */
0009 
0010 #define pr_fmt(fmt)    "%s: " fmt, __func__
0011 
0012 #include <linux/delay.h>
0013 #include <linux/kernel.h>
0014 #include <linux/module.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/types.h>
0017 #include <linux/err.h>
0018 #include <linux/jiffies.h>
0019 #include <linux/radix-tree.h>
0020 #include <linux/hwspinlock.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/mutex.h>
0023 #include <linux/of.h>
0024 
0025 #include "hwspinlock_internal.h"
0026 
0027 /* retry delay used in atomic context */
0028 #define HWSPINLOCK_RETRY_DELAY_US   100
0029 
0030 /* radix tree tags */
0031 #define HWSPINLOCK_UNUSED   (0) /* tags an hwspinlock as unused */
0032 
0033 /*
0034  * A radix tree is used to maintain the available hwspinlock instances.
0035  * The tree associates hwspinlock pointers with their integer key id,
0036  * and provides easy-to-use API which makes the hwspinlock core code simple
0037  * and easy to read.
0038  *
0039  * Radix trees are quick on lookups, and reasonably efficient in terms of
0040  * storage, especially with high density usages such as this framework
0041  * requires (a continuous range of integer keys, beginning with zero, is
0042  * used as the ID's of the hwspinlock instances).
0043  *
0044  * The radix tree API supports tagging items in the tree, which this
0045  * framework uses to mark unused hwspinlock instances (see the
0046  * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
0047  * tree, looking for an unused hwspinlock instance, is now reduced to a
0048  * single radix tree API call.
0049  */
0050 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
0051 
0052 /*
0053  * Synchronization of access to the tree is achieved using this mutex,
0054  * as the radix-tree API requires that users provide all synchronisation.
0055  * A mutex is needed because we're using non-atomic radix tree allocations.
0056  */
0057 static DEFINE_MUTEX(hwspinlock_tree_lock);
0058 
0059 
0060 /**
0061  * __hwspin_trylock() - attempt to lock a specific hwspinlock
0062  * @hwlock: an hwspinlock which we want to trylock
0063  * @mode: controls whether local interrupts are disabled or not
0064  * @flags: a pointer where the caller's interrupt state will be saved at (if
0065  *         requested)
0066  *
0067  * This function attempts to lock an hwspinlock, and will immediately
0068  * fail if the hwspinlock is already taken.
0069  *
0070  * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
0071  * of getting hardware lock with mutex or spinlock. Since in some scenarios,
0072  * user need some time-consuming or sleepable operations under the hardware
0073  * lock, they need one sleepable lock (like mutex) to protect the operations.
0074  *
0075  * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
0076  * return from this function, preemption (and possibly interrupts) is disabled,
0077  * so the caller must not sleep, and is advised to release the hwspinlock as
0078  * soon as possible. This is required in order to minimize remote cores polling
0079  * on the hardware interconnect.
0080  *
0081  * The user decides whether local interrupts are disabled or not, and if yes,
0082  * whether he wants their previous state to be saved. It is up to the user
0083  * to choose the appropriate @mode of operation, exactly the same way users
0084  * should decide between spin_trylock, spin_trylock_irq and
0085  * spin_trylock_irqsave.
0086  *
0087  * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
0088  * the hwspinlock was already taken.
0089  * This function will never sleep.
0090  */
0091 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
0092 {
0093     int ret;
0094 
0095     if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
0096         return -EINVAL;
0097 
0098     /*
0099      * This spin_lock{_irq, _irqsave} serves three purposes:
0100      *
0101      * 1. Disable preemption, in order to minimize the period of time
0102      *    in which the hwspinlock is taken. This is important in order
0103      *    to minimize the possible polling on the hardware interconnect
0104      *    by a remote user of this lock.
0105      * 2. Make the hwspinlock SMP-safe (so we can take it from
0106      *    additional contexts on the local host).
0107      * 3. Ensure that in_atomic/might_sleep checks catch potential
0108      *    problems with hwspinlock usage (e.g. scheduler checks like
0109      *    'scheduling while atomic' etc.)
0110      */
0111     switch (mode) {
0112     case HWLOCK_IRQSTATE:
0113         ret = spin_trylock_irqsave(&hwlock->lock, *flags);
0114         break;
0115     case HWLOCK_IRQ:
0116         ret = spin_trylock_irq(&hwlock->lock);
0117         break;
0118     case HWLOCK_RAW:
0119     case HWLOCK_IN_ATOMIC:
0120         ret = 1;
0121         break;
0122     default:
0123         ret = spin_trylock(&hwlock->lock);
0124         break;
0125     }
0126 
0127     /* is lock already taken by another context on the local cpu ? */
0128     if (!ret)
0129         return -EBUSY;
0130 
0131     /* try to take the hwspinlock device */
0132     ret = hwlock->bank->ops->trylock(hwlock);
0133 
0134     /* if hwlock is already taken, undo spin_trylock_* and exit */
0135     if (!ret) {
0136         switch (mode) {
0137         case HWLOCK_IRQSTATE:
0138             spin_unlock_irqrestore(&hwlock->lock, *flags);
0139             break;
0140         case HWLOCK_IRQ:
0141             spin_unlock_irq(&hwlock->lock);
0142             break;
0143         case HWLOCK_RAW:
0144         case HWLOCK_IN_ATOMIC:
0145             /* Nothing to do */
0146             break;
0147         default:
0148             spin_unlock(&hwlock->lock);
0149             break;
0150         }
0151 
0152         return -EBUSY;
0153     }
0154 
0155     /*
0156      * We can be sure the other core's memory operations
0157      * are observable to us only _after_ we successfully take
0158      * the hwspinlock, and we must make sure that subsequent memory
0159      * operations (both reads and writes) will not be reordered before
0160      * we actually took the hwspinlock.
0161      *
0162      * Note: the implicit memory barrier of the spinlock above is too
0163      * early, so we need this additional explicit memory barrier.
0164      */
0165     mb();
0166 
0167     return 0;
0168 }
0169 EXPORT_SYMBOL_GPL(__hwspin_trylock);
0170 
0171 /**
0172  * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
0173  * @hwlock: the hwspinlock to be locked
0174  * @timeout: timeout value in msecs
0175  * @mode: mode which controls whether local interrupts are disabled or not
0176  * @flags: a pointer to where the caller's interrupt state will be saved at (if
0177  *         requested)
0178  *
0179  * This function locks the given @hwlock. If the @hwlock
0180  * is already taken, the function will busy loop waiting for it to
0181  * be released, but give up after @timeout msecs have elapsed.
0182  *
0183  * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
0184  * of getting hardware lock with mutex or spinlock. Since in some scenarios,
0185  * user need some time-consuming or sleepable operations under the hardware
0186  * lock, they need one sleepable lock (like mutex) to protect the operations.
0187  *
0188  * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
0189  * is handled with busy-waiting delays, hence shall not exceed few msecs.
0190  *
0191  * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
0192  * return from this function, preemption (and possibly interrupts) is disabled,
0193  * so the caller must not sleep, and is advised to release the hwspinlock as
0194  * soon as possible. This is required in order to minimize remote cores polling
0195  * on the hardware interconnect.
0196  *
0197  * The user decides whether local interrupts are disabled or not, and if yes,
0198  * whether he wants their previous state to be saved. It is up to the user
0199  * to choose the appropriate @mode of operation, exactly the same way users
0200  * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
0201  *
0202  * Returns 0 when the @hwlock was successfully taken, and an appropriate
0203  * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
0204  * busy after @timeout msecs). The function will never sleep.
0205  */
0206 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
0207                     int mode, unsigned long *flags)
0208 {
0209     int ret;
0210     unsigned long expire, atomic_delay = 0;
0211 
0212     expire = msecs_to_jiffies(to) + jiffies;
0213 
0214     for (;;) {
0215         /* Try to take the hwspinlock */
0216         ret = __hwspin_trylock(hwlock, mode, flags);
0217         if (ret != -EBUSY)
0218             break;
0219 
0220         /*
0221          * The lock is already taken, let's check if the user wants
0222          * us to try again
0223          */
0224         if (mode == HWLOCK_IN_ATOMIC) {
0225             udelay(HWSPINLOCK_RETRY_DELAY_US);
0226             atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
0227             if (atomic_delay > to * 1000)
0228                 return -ETIMEDOUT;
0229         } else {
0230             if (time_is_before_eq_jiffies(expire))
0231                 return -ETIMEDOUT;
0232         }
0233 
0234         /*
0235          * Allow platform-specific relax handlers to prevent
0236          * hogging the interconnect (no sleeping, though)
0237          */
0238         if (hwlock->bank->ops->relax)
0239             hwlock->bank->ops->relax(hwlock);
0240     }
0241 
0242     return ret;
0243 }
0244 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
0245 
0246 /**
0247  * __hwspin_unlock() - unlock a specific hwspinlock
0248  * @hwlock: a previously-acquired hwspinlock which we want to unlock
0249  * @mode: controls whether local interrupts needs to be restored or not
0250  * @flags: previous caller's interrupt state to restore (if requested)
0251  *
0252  * This function will unlock a specific hwspinlock, enable preemption and
0253  * (possibly) enable interrupts or restore their previous state.
0254  * @hwlock must be already locked before calling this function: it is a bug
0255  * to call unlock on a @hwlock that is already unlocked.
0256  *
0257  * The user decides whether local interrupts should be enabled or not, and
0258  * if yes, whether he wants their previous state to be restored. It is up
0259  * to the user to choose the appropriate @mode of operation, exactly the
0260  * same way users decide between spin_unlock, spin_unlock_irq and
0261  * spin_unlock_irqrestore.
0262  *
0263  * The function will never sleep.
0264  */
0265 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
0266 {
0267     if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
0268         return;
0269 
0270     /*
0271      * We must make sure that memory operations (both reads and writes),
0272      * done before unlocking the hwspinlock, will not be reordered
0273      * after the lock is released.
0274      *
0275      * That's the purpose of this explicit memory barrier.
0276      *
0277      * Note: the memory barrier induced by the spin_unlock below is too
0278      * late; the other core is going to access memory soon after it will
0279      * take the hwspinlock, and by then we want to be sure our memory
0280      * operations are already observable.
0281      */
0282     mb();
0283 
0284     hwlock->bank->ops->unlock(hwlock);
0285 
0286     /* Undo the spin_trylock{_irq, _irqsave} called while locking */
0287     switch (mode) {
0288     case HWLOCK_IRQSTATE:
0289         spin_unlock_irqrestore(&hwlock->lock, *flags);
0290         break;
0291     case HWLOCK_IRQ:
0292         spin_unlock_irq(&hwlock->lock);
0293         break;
0294     case HWLOCK_RAW:
0295     case HWLOCK_IN_ATOMIC:
0296         /* Nothing to do */
0297         break;
0298     default:
0299         spin_unlock(&hwlock->lock);
0300         break;
0301     }
0302 }
0303 EXPORT_SYMBOL_GPL(__hwspin_unlock);
0304 
0305 /**
0306  * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
0307  * @bank: the hwspinlock device bank
0308  * @hwlock_spec: hwlock specifier as found in the device tree
0309  *
0310  * This is a simple translation function, suitable for hwspinlock platform
0311  * drivers that only has a lock specifier length of 1.
0312  *
0313  * Returns a relative index of the lock within a specified bank on success,
0314  * or -EINVAL on invalid specifier cell count.
0315  */
0316 static inline int
0317 of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
0318 {
0319     if (WARN_ON(hwlock_spec->args_count != 1))
0320         return -EINVAL;
0321 
0322     return hwlock_spec->args[0];
0323 }
0324 
0325 /**
0326  * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
0327  * @np: device node from which to request the specific hwlock
0328  * @index: index of the hwlock in the list of values
0329  *
0330  * This function provides a means for DT users of the hwspinlock module to
0331  * get the global lock id of a specific hwspinlock using the phandle of the
0332  * hwspinlock device, so that it can be requested using the normal
0333  * hwspin_lock_request_specific() API.
0334  *
0335  * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
0336  * device is not yet registered, -EINVAL on invalid args specifier value or an
0337  * appropriate error as returned from the OF parsing of the DT client node.
0338  */
0339 int of_hwspin_lock_get_id(struct device_node *np, int index)
0340 {
0341     struct of_phandle_args args;
0342     struct hwspinlock *hwlock;
0343     struct radix_tree_iter iter;
0344     void **slot;
0345     int id;
0346     int ret;
0347 
0348     ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
0349                      &args);
0350     if (ret)
0351         return ret;
0352 
0353     if (!of_device_is_available(args.np)) {
0354         ret = -ENOENT;
0355         goto out;
0356     }
0357 
0358     /* Find the hwspinlock device: we need its base_id */
0359     ret = -EPROBE_DEFER;
0360     rcu_read_lock();
0361     radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
0362         hwlock = radix_tree_deref_slot(slot);
0363         if (unlikely(!hwlock))
0364             continue;
0365         if (radix_tree_deref_retry(hwlock)) {
0366             slot = radix_tree_iter_retry(&iter);
0367             continue;
0368         }
0369 
0370         if (hwlock->bank->dev->of_node == args.np) {
0371             ret = 0;
0372             break;
0373         }
0374     }
0375     rcu_read_unlock();
0376     if (ret < 0)
0377         goto out;
0378 
0379     id = of_hwspin_lock_simple_xlate(&args);
0380     if (id < 0 || id >= hwlock->bank->num_locks) {
0381         ret = -EINVAL;
0382         goto out;
0383     }
0384     id += hwlock->bank->base_id;
0385 
0386 out:
0387     of_node_put(args.np);
0388     return ret ? ret : id;
0389 }
0390 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
0391 
0392 /**
0393  * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
0394  * @np: device node from which to request the specific hwlock
0395  * @name: hwlock name
0396  *
0397  * This function provides a means for DT users of the hwspinlock module to
0398  * get the global lock id of a specific hwspinlock using the specified name of
0399  * the hwspinlock device, so that it can be requested using the normal
0400  * hwspin_lock_request_specific() API.
0401  *
0402  * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
0403  * device is not yet registered, -EINVAL on invalid args specifier value or an
0404  * appropriate error as returned from the OF parsing of the DT client node.
0405  */
0406 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
0407 {
0408     int index;
0409 
0410     if (!name)
0411         return -EINVAL;
0412 
0413     index = of_property_match_string(np, "hwlock-names", name);
0414     if (index < 0)
0415         return index;
0416 
0417     return of_hwspin_lock_get_id(np, index);
0418 }
0419 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
0420 
0421 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
0422 {
0423     struct hwspinlock *tmp;
0424     int ret;
0425 
0426     mutex_lock(&hwspinlock_tree_lock);
0427 
0428     ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
0429     if (ret) {
0430         if (ret == -EEXIST)
0431             pr_err("hwspinlock id %d already exists!\n", id);
0432         goto out;
0433     }
0434 
0435     /* mark this hwspinlock as available */
0436     tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
0437 
0438     /* self-sanity check which should never fail */
0439     WARN_ON(tmp != hwlock);
0440 
0441 out:
0442     mutex_unlock(&hwspinlock_tree_lock);
0443     return 0;
0444 }
0445 
0446 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
0447 {
0448     struct hwspinlock *hwlock = NULL;
0449     int ret;
0450 
0451     mutex_lock(&hwspinlock_tree_lock);
0452 
0453     /* make sure the hwspinlock is not in use (tag is set) */
0454     ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
0455     if (ret == 0) {
0456         pr_err("hwspinlock %d still in use (or not present)\n", id);
0457         goto out;
0458     }
0459 
0460     hwlock = radix_tree_delete(&hwspinlock_tree, id);
0461     if (!hwlock) {
0462         pr_err("failed to delete hwspinlock %d\n", id);
0463         goto out;
0464     }
0465 
0466 out:
0467     mutex_unlock(&hwspinlock_tree_lock);
0468     return hwlock;
0469 }
0470 
0471 /**
0472  * hwspin_lock_register() - register a new hw spinlock device
0473  * @bank: the hwspinlock device, which usually provides numerous hw locks
0474  * @dev: the backing device
0475  * @ops: hwspinlock handlers for this device
0476  * @base_id: id of the first hardware spinlock in this bank
0477  * @num_locks: number of hwspinlocks provided by this device
0478  *
0479  * This function should be called from the underlying platform-specific
0480  * implementation, to register a new hwspinlock device instance.
0481  *
0482  * Should be called from a process context (might sleep)
0483  *
0484  * Returns 0 on success, or an appropriate error code on failure
0485  */
0486 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
0487         const struct hwspinlock_ops *ops, int base_id, int num_locks)
0488 {
0489     struct hwspinlock *hwlock;
0490     int ret = 0, i;
0491 
0492     if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
0493                             !ops->unlock) {
0494         pr_err("invalid parameters\n");
0495         return -EINVAL;
0496     }
0497 
0498     bank->dev = dev;
0499     bank->ops = ops;
0500     bank->base_id = base_id;
0501     bank->num_locks = num_locks;
0502 
0503     for (i = 0; i < num_locks; i++) {
0504         hwlock = &bank->lock[i];
0505 
0506         spin_lock_init(&hwlock->lock);
0507         hwlock->bank = bank;
0508 
0509         ret = hwspin_lock_register_single(hwlock, base_id + i);
0510         if (ret)
0511             goto reg_failed;
0512     }
0513 
0514     return 0;
0515 
0516 reg_failed:
0517     while (--i >= 0)
0518         hwspin_lock_unregister_single(base_id + i);
0519     return ret;
0520 }
0521 EXPORT_SYMBOL_GPL(hwspin_lock_register);
0522 
0523 /**
0524  * hwspin_lock_unregister() - unregister an hw spinlock device
0525  * @bank: the hwspinlock device, which usually provides numerous hw locks
0526  *
0527  * This function should be called from the underlying platform-specific
0528  * implementation, to unregister an existing (and unused) hwspinlock.
0529  *
0530  * Should be called from a process context (might sleep)
0531  *
0532  * Returns 0 on success, or an appropriate error code on failure
0533  */
0534 int hwspin_lock_unregister(struct hwspinlock_device *bank)
0535 {
0536     struct hwspinlock *hwlock, *tmp;
0537     int i;
0538 
0539     for (i = 0; i < bank->num_locks; i++) {
0540         hwlock = &bank->lock[i];
0541 
0542         tmp = hwspin_lock_unregister_single(bank->base_id + i);
0543         if (!tmp)
0544             return -EBUSY;
0545 
0546         /* self-sanity check that should never fail */
0547         WARN_ON(tmp != hwlock);
0548     }
0549 
0550     return 0;
0551 }
0552 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
0553 
0554 static void devm_hwspin_lock_unreg(struct device *dev, void *res)
0555 {
0556     hwspin_lock_unregister(*(struct hwspinlock_device **)res);
0557 }
0558 
0559 static int devm_hwspin_lock_device_match(struct device *dev, void *res,
0560                      void *data)
0561 {
0562     struct hwspinlock_device **bank = res;
0563 
0564     if (WARN_ON(!bank || !*bank))
0565         return 0;
0566 
0567     return *bank == data;
0568 }
0569 
0570 /**
0571  * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
0572  *                 a managed device
0573  * @dev: the backing device
0574  * @bank: the hwspinlock device, which usually provides numerous hw locks
0575  *
0576  * This function should be called from the underlying platform-specific
0577  * implementation, to unregister an existing (and unused) hwspinlock.
0578  *
0579  * Should be called from a process context (might sleep)
0580  *
0581  * Returns 0 on success, or an appropriate error code on failure
0582  */
0583 int devm_hwspin_lock_unregister(struct device *dev,
0584                 struct hwspinlock_device *bank)
0585 {
0586     int ret;
0587 
0588     ret = devres_release(dev, devm_hwspin_lock_unreg,
0589                  devm_hwspin_lock_device_match, bank);
0590     WARN_ON(ret);
0591 
0592     return ret;
0593 }
0594 EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
0595 
0596 /**
0597  * devm_hwspin_lock_register() - register a new hw spinlock device for
0598  *               a managed device
0599  * @dev: the backing device
0600  * @bank: the hwspinlock device, which usually provides numerous hw locks
0601  * @ops: hwspinlock handlers for this device
0602  * @base_id: id of the first hardware spinlock in this bank
0603  * @num_locks: number of hwspinlocks provided by this device
0604  *
0605  * This function should be called from the underlying platform-specific
0606  * implementation, to register a new hwspinlock device instance.
0607  *
0608  * Should be called from a process context (might sleep)
0609  *
0610  * Returns 0 on success, or an appropriate error code on failure
0611  */
0612 int devm_hwspin_lock_register(struct device *dev,
0613                   struct hwspinlock_device *bank,
0614                   const struct hwspinlock_ops *ops,
0615                   int base_id, int num_locks)
0616 {
0617     struct hwspinlock_device **ptr;
0618     int ret;
0619 
0620     ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
0621     if (!ptr)
0622         return -ENOMEM;
0623 
0624     ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
0625     if (!ret) {
0626         *ptr = bank;
0627         devres_add(dev, ptr);
0628     } else {
0629         devres_free(ptr);
0630     }
0631 
0632     return ret;
0633 }
0634 EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
0635 
0636 /**
0637  * __hwspin_lock_request() - tag an hwspinlock as used and power it up
0638  *
0639  * This is an internal function that prepares an hwspinlock instance
0640  * before it is given to the user. The function assumes that
0641  * hwspinlock_tree_lock is taken.
0642  *
0643  * Returns 0 or positive to indicate success, and a negative value to
0644  * indicate an error (with the appropriate error code)
0645  */
0646 static int __hwspin_lock_request(struct hwspinlock *hwlock)
0647 {
0648     struct device *dev = hwlock->bank->dev;
0649     struct hwspinlock *tmp;
0650     int ret;
0651 
0652     /* prevent underlying implementation from being removed */
0653     if (!try_module_get(dev->driver->owner)) {
0654         dev_err(dev, "%s: can't get owner\n", __func__);
0655         return -EINVAL;
0656     }
0657 
0658     /* notify PM core that power is now needed */
0659     ret = pm_runtime_get_sync(dev);
0660     if (ret < 0 && ret != -EACCES) {
0661         dev_err(dev, "%s: can't power on device\n", __func__);
0662         pm_runtime_put_noidle(dev);
0663         module_put(dev->driver->owner);
0664         return ret;
0665     }
0666 
0667     ret = 0;
0668 
0669     /* mark hwspinlock as used, should not fail */
0670     tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
0671                             HWSPINLOCK_UNUSED);
0672 
0673     /* self-sanity check that should never fail */
0674     WARN_ON(tmp != hwlock);
0675 
0676     return ret;
0677 }
0678 
0679 /**
0680  * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
0681  * @hwlock: a valid hwspinlock instance
0682  *
0683  * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
0684  */
0685 int hwspin_lock_get_id(struct hwspinlock *hwlock)
0686 {
0687     if (!hwlock) {
0688         pr_err("invalid hwlock\n");
0689         return -EINVAL;
0690     }
0691 
0692     return hwlock_to_id(hwlock);
0693 }
0694 EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
0695 
0696 /**
0697  * hwspin_lock_request() - request an hwspinlock
0698  *
0699  * This function should be called by users of the hwspinlock device,
0700  * in order to dynamically assign them an unused hwspinlock.
0701  * Usually the user of this lock will then have to communicate the lock's id
0702  * to the remote core before it can be used for synchronization (to get the
0703  * id of a given hwlock, use hwspin_lock_get_id()).
0704  *
0705  * Should be called from a process context (might sleep)
0706  *
0707  * Returns the address of the assigned hwspinlock, or NULL on error
0708  */
0709 struct hwspinlock *hwspin_lock_request(void)
0710 {
0711     struct hwspinlock *hwlock;
0712     int ret;
0713 
0714     mutex_lock(&hwspinlock_tree_lock);
0715 
0716     /* look for an unused lock */
0717     ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
0718                         0, 1, HWSPINLOCK_UNUSED);
0719     if (ret == 0) {
0720         pr_warn("a free hwspinlock is not available\n");
0721         hwlock = NULL;
0722         goto out;
0723     }
0724 
0725     /* sanity check that should never fail */
0726     WARN_ON(ret > 1);
0727 
0728     /* mark as used and power up */
0729     ret = __hwspin_lock_request(hwlock);
0730     if (ret < 0)
0731         hwlock = NULL;
0732 
0733 out:
0734     mutex_unlock(&hwspinlock_tree_lock);
0735     return hwlock;
0736 }
0737 EXPORT_SYMBOL_GPL(hwspin_lock_request);
0738 
0739 /**
0740  * hwspin_lock_request_specific() - request for a specific hwspinlock
0741  * @id: index of the specific hwspinlock that is requested
0742  *
0743  * This function should be called by users of the hwspinlock module,
0744  * in order to assign them a specific hwspinlock.
0745  * Usually early board code will be calling this function in order to
0746  * reserve specific hwspinlock ids for predefined purposes.
0747  *
0748  * Should be called from a process context (might sleep)
0749  *
0750  * Returns the address of the assigned hwspinlock, or NULL on error
0751  */
0752 struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
0753 {
0754     struct hwspinlock *hwlock;
0755     int ret;
0756 
0757     mutex_lock(&hwspinlock_tree_lock);
0758 
0759     /* make sure this hwspinlock exists */
0760     hwlock = radix_tree_lookup(&hwspinlock_tree, id);
0761     if (!hwlock) {
0762         pr_warn("hwspinlock %u does not exist\n", id);
0763         goto out;
0764     }
0765 
0766     /* sanity check (this shouldn't happen) */
0767     WARN_ON(hwlock_to_id(hwlock) != id);
0768 
0769     /* make sure this hwspinlock is unused */
0770     ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
0771     if (ret == 0) {
0772         pr_warn("hwspinlock %u is already in use\n", id);
0773         hwlock = NULL;
0774         goto out;
0775     }
0776 
0777     /* mark as used and power up */
0778     ret = __hwspin_lock_request(hwlock);
0779     if (ret < 0)
0780         hwlock = NULL;
0781 
0782 out:
0783     mutex_unlock(&hwspinlock_tree_lock);
0784     return hwlock;
0785 }
0786 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
0787 
0788 /**
0789  * hwspin_lock_free() - free a specific hwspinlock
0790  * @hwlock: the specific hwspinlock to free
0791  *
0792  * This function mark @hwlock as free again.
0793  * Should only be called with an @hwlock that was retrieved from
0794  * an earlier call to hwspin_lock_request{_specific}.
0795  *
0796  * Should be called from a process context (might sleep)
0797  *
0798  * Returns 0 on success, or an appropriate error code on failure
0799  */
0800 int hwspin_lock_free(struct hwspinlock *hwlock)
0801 {
0802     struct device *dev;
0803     struct hwspinlock *tmp;
0804     int ret;
0805 
0806     if (!hwlock) {
0807         pr_err("invalid hwlock\n");
0808         return -EINVAL;
0809     }
0810 
0811     dev = hwlock->bank->dev;
0812     mutex_lock(&hwspinlock_tree_lock);
0813 
0814     /* make sure the hwspinlock is used */
0815     ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
0816                             HWSPINLOCK_UNUSED);
0817     if (ret == 1) {
0818         dev_err(dev, "%s: hwlock is already free\n", __func__);
0819         dump_stack();
0820         ret = -EINVAL;
0821         goto out;
0822     }
0823 
0824     /* notify the underlying device that power is not needed */
0825     pm_runtime_put(dev);
0826 
0827     /* mark this hwspinlock as available */
0828     tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
0829                             HWSPINLOCK_UNUSED);
0830 
0831     /* sanity check (this shouldn't happen) */
0832     WARN_ON(tmp != hwlock);
0833 
0834     module_put(dev->driver->owner);
0835 
0836 out:
0837     mutex_unlock(&hwspinlock_tree_lock);
0838     return ret;
0839 }
0840 EXPORT_SYMBOL_GPL(hwspin_lock_free);
0841 
0842 static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
0843 {
0844     struct hwspinlock **hwlock = res;
0845 
0846     if (WARN_ON(!hwlock || !*hwlock))
0847         return 0;
0848 
0849     return *hwlock == data;
0850 }
0851 
0852 static void devm_hwspin_lock_release(struct device *dev, void *res)
0853 {
0854     hwspin_lock_free(*(struct hwspinlock **)res);
0855 }
0856 
0857 /**
0858  * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
0859  * @dev: the device to free the specific hwspinlock
0860  * @hwlock: the specific hwspinlock to free
0861  *
0862  * This function mark @hwlock as free again.
0863  * Should only be called with an @hwlock that was retrieved from
0864  * an earlier call to hwspin_lock_request{_specific}.
0865  *
0866  * Should be called from a process context (might sleep)
0867  *
0868  * Returns 0 on success, or an appropriate error code on failure
0869  */
0870 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
0871 {
0872     int ret;
0873 
0874     ret = devres_release(dev, devm_hwspin_lock_release,
0875                  devm_hwspin_lock_match, hwlock);
0876     WARN_ON(ret);
0877 
0878     return ret;
0879 }
0880 EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
0881 
0882 /**
0883  * devm_hwspin_lock_request() - request an hwspinlock for a managed device
0884  * @dev: the device to request an hwspinlock
0885  *
0886  * This function should be called by users of the hwspinlock device,
0887  * in order to dynamically assign them an unused hwspinlock.
0888  * Usually the user of this lock will then have to communicate the lock's id
0889  * to the remote core before it can be used for synchronization (to get the
0890  * id of a given hwlock, use hwspin_lock_get_id()).
0891  *
0892  * Should be called from a process context (might sleep)
0893  *
0894  * Returns the address of the assigned hwspinlock, or NULL on error
0895  */
0896 struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
0897 {
0898     struct hwspinlock **ptr, *hwlock;
0899 
0900     ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
0901     if (!ptr)
0902         return NULL;
0903 
0904     hwlock = hwspin_lock_request();
0905     if (hwlock) {
0906         *ptr = hwlock;
0907         devres_add(dev, ptr);
0908     } else {
0909         devres_free(ptr);
0910     }
0911 
0912     return hwlock;
0913 }
0914 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
0915 
0916 /**
0917  * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
0918  *                   a managed device
0919  * @dev: the device to request the specific hwspinlock
0920  * @id: index of the specific hwspinlock that is requested
0921  *
0922  * This function should be called by users of the hwspinlock module,
0923  * in order to assign them a specific hwspinlock.
0924  * Usually early board code will be calling this function in order to
0925  * reserve specific hwspinlock ids for predefined purposes.
0926  *
0927  * Should be called from a process context (might sleep)
0928  *
0929  * Returns the address of the assigned hwspinlock, or NULL on error
0930  */
0931 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
0932                              unsigned int id)
0933 {
0934     struct hwspinlock **ptr, *hwlock;
0935 
0936     ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
0937     if (!ptr)
0938         return NULL;
0939 
0940     hwlock = hwspin_lock_request_specific(id);
0941     if (hwlock) {
0942         *ptr = hwlock;
0943         devres_add(dev, ptr);
0944     } else {
0945         devres_free(ptr);
0946     }
0947 
0948     return hwlock;
0949 }
0950 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
0951 
0952 MODULE_LICENSE("GPL v2");
0953 MODULE_DESCRIPTION("Hardware spinlock interface");
0954 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");