Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * I/O Address Space ID allocator. There is one global IOASID space, split into
0004  * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
0005  * free IOASIDs with ioasid_alloc() and ioasid_free().
0006  */
0007 #include <linux/ioasid.h>
0008 #include <linux/module.h>
0009 #include <linux/slab.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/xarray.h>
0012 
0013 struct ioasid_data {
0014     ioasid_t id;
0015     struct ioasid_set *set;
0016     void *private;
0017     struct rcu_head rcu;
0018 };
0019 
0020 /*
0021  * struct ioasid_allocator_data - Internal data structure to hold information
0022  * about an allocator. There are two types of allocators:
0023  *
0024  * - Default allocator always has its own XArray to track the IOASIDs allocated.
0025  * - Custom allocators may share allocation helpers with different private data.
0026  *   Custom allocators that share the same helper functions also share the same
0027  *   XArray.
0028  * Rules:
0029  * 1. Default allocator is always available, not dynamically registered. This is
0030  *    to prevent race conditions with early boot code that want to register
0031  *    custom allocators or allocate IOASIDs.
0032  * 2. Custom allocators take precedence over the default allocator.
0033  * 3. When all custom allocators sharing the same helper functions are
0034  *    unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
0035  *    freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
0036  * 4. When switching between custom allocators sharing the same helper
0037  *    functions, outstanding IOASIDs are preserved.
0038  * 5. When switching between custom allocator and default allocator, all IOASIDs
0039  *    must be freed to ensure unadulterated space for the new allocator.
0040  *
0041  * @ops:    allocator helper functions and its data
0042  * @list:   registered custom allocators
0043  * @slist:  allocators share the same ops but different data
0044  * @flags:  attributes of the allocator
0045  * @xa:     xarray holds the IOASID space
0046  * @rcu:    used for kfree_rcu when unregistering allocator
0047  */
0048 struct ioasid_allocator_data {
0049     struct ioasid_allocator_ops *ops;
0050     struct list_head list;
0051     struct list_head slist;
0052 #define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
0053     unsigned long flags;
0054     struct xarray xa;
0055     struct rcu_head rcu;
0056 };
0057 
0058 static DEFINE_SPINLOCK(ioasid_allocator_lock);
0059 static LIST_HEAD(allocators_list);
0060 
0061 static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
0062 static void default_free(ioasid_t ioasid, void *opaque);
0063 
0064 static struct ioasid_allocator_ops default_ops = {
0065     .alloc = default_alloc,
0066     .free = default_free,
0067 };
0068 
0069 static struct ioasid_allocator_data default_allocator = {
0070     .ops = &default_ops,
0071     .flags = 0,
0072     .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
0073 };
0074 
0075 static struct ioasid_allocator_data *active_allocator = &default_allocator;
0076 
0077 static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
0078 {
0079     ioasid_t id;
0080 
0081     if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
0082         pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
0083         return INVALID_IOASID;
0084     }
0085 
0086     return id;
0087 }
0088 
0089 static void default_free(ioasid_t ioasid, void *opaque)
0090 {
0091     struct ioasid_data *ioasid_data;
0092 
0093     ioasid_data = xa_erase(&default_allocator.xa, ioasid);
0094     kfree_rcu(ioasid_data, rcu);
0095 }
0096 
0097 /* Allocate and initialize a new custom allocator with its helper functions */
0098 static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
0099 {
0100     struct ioasid_allocator_data *ia_data;
0101 
0102     ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
0103     if (!ia_data)
0104         return NULL;
0105 
0106     xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
0107     INIT_LIST_HEAD(&ia_data->slist);
0108     ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
0109     ia_data->ops = ops;
0110 
0111     /* For tracking custom allocators that share the same ops */
0112     list_add_tail(&ops->list, &ia_data->slist);
0113 
0114     return ia_data;
0115 }
0116 
0117 static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
0118 {
0119     return (a->free == b->free) && (a->alloc == b->alloc);
0120 }
0121 
0122 /**
0123  * ioasid_register_allocator - register a custom allocator
0124  * @ops: the custom allocator ops to be registered
0125  *
0126  * Custom allocators take precedence over the default xarray based allocator.
0127  * Private data associated with the IOASID allocated by the custom allocators
0128  * are managed by IOASID framework similar to data stored in xa by default
0129  * allocator.
0130  *
0131  * There can be multiple allocators registered but only one is active. In case
0132  * of runtime removal of a custom allocator, the next one is activated based
0133  * on the registration ordering.
0134  *
0135  * Multiple allocators can share the same alloc() function, in this case the
0136  * IOASID space is shared.
0137  */
0138 int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
0139 {
0140     struct ioasid_allocator_data *ia_data;
0141     struct ioasid_allocator_data *pallocator;
0142     int ret = 0;
0143 
0144     spin_lock(&ioasid_allocator_lock);
0145 
0146     ia_data = ioasid_alloc_allocator(ops);
0147     if (!ia_data) {
0148         ret = -ENOMEM;
0149         goto out_unlock;
0150     }
0151 
0152     /*
0153      * No particular preference, we activate the first one and keep
0154      * the later registered allocators in a list in case the first one gets
0155      * removed due to hotplug.
0156      */
0157     if (list_empty(&allocators_list)) {
0158         WARN_ON(active_allocator != &default_allocator);
0159         /* Use this new allocator if default is not active */
0160         if (xa_empty(&active_allocator->xa)) {
0161             rcu_assign_pointer(active_allocator, ia_data);
0162             list_add_tail(&ia_data->list, &allocators_list);
0163             goto out_unlock;
0164         }
0165         pr_warn("Default allocator active with outstanding IOASID\n");
0166         ret = -EAGAIN;
0167         goto out_free;
0168     }
0169 
0170     /* Check if the allocator is already registered */
0171     list_for_each_entry(pallocator, &allocators_list, list) {
0172         if (pallocator->ops == ops) {
0173             pr_err("IOASID allocator already registered\n");
0174             ret = -EEXIST;
0175             goto out_free;
0176         } else if (use_same_ops(pallocator->ops, ops)) {
0177             /*
0178              * If the new allocator shares the same ops,
0179              * then they will share the same IOASID space.
0180              * We should put them under the same xarray.
0181              */
0182             list_add_tail(&ops->list, &pallocator->slist);
0183             goto out_free;
0184         }
0185     }
0186     list_add_tail(&ia_data->list, &allocators_list);
0187 
0188     spin_unlock(&ioasid_allocator_lock);
0189     return 0;
0190 out_free:
0191     kfree(ia_data);
0192 out_unlock:
0193     spin_unlock(&ioasid_allocator_lock);
0194     return ret;
0195 }
0196 EXPORT_SYMBOL_GPL(ioasid_register_allocator);
0197 
0198 /**
0199  * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
0200  * @ops: the custom allocator to be removed
0201  *
0202  * Remove an allocator from the list, activate the next allocator in
0203  * the order it was registered. Or revert to default allocator if all
0204  * custom allocators are unregistered without outstanding IOASIDs.
0205  */
0206 void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
0207 {
0208     struct ioasid_allocator_data *pallocator;
0209     struct ioasid_allocator_ops *sops;
0210 
0211     spin_lock(&ioasid_allocator_lock);
0212     if (list_empty(&allocators_list)) {
0213         pr_warn("No custom IOASID allocators active!\n");
0214         goto exit_unlock;
0215     }
0216 
0217     list_for_each_entry(pallocator, &allocators_list, list) {
0218         if (!use_same_ops(pallocator->ops, ops))
0219             continue;
0220 
0221         if (list_is_singular(&pallocator->slist)) {
0222             /* No shared helper functions */
0223             list_del(&pallocator->list);
0224             /*
0225              * All IOASIDs should have been freed before
0226              * the last allocator that shares the same ops
0227              * is unregistered.
0228              */
0229             WARN_ON(!xa_empty(&pallocator->xa));
0230             if (list_empty(&allocators_list)) {
0231                 pr_info("No custom IOASID allocators, switch to default.\n");
0232                 rcu_assign_pointer(active_allocator, &default_allocator);
0233             } else if (pallocator == active_allocator) {
0234                 rcu_assign_pointer(active_allocator,
0235                         list_first_entry(&allocators_list,
0236                                 struct ioasid_allocator_data, list));
0237                 pr_info("IOASID allocator changed");
0238             }
0239             kfree_rcu(pallocator, rcu);
0240             break;
0241         }
0242         /*
0243          * Find the matching shared ops to delete,
0244          * but keep outstanding IOASIDs
0245          */
0246         list_for_each_entry(sops, &pallocator->slist, list) {
0247             if (sops == ops) {
0248                 list_del(&ops->list);
0249                 break;
0250             }
0251         }
0252         break;
0253     }
0254 
0255 exit_unlock:
0256     spin_unlock(&ioasid_allocator_lock);
0257 }
0258 EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
0259 
0260 /**
0261  * ioasid_set_data - Set private data for an allocated ioasid
0262  * @ioasid: the ID to set data
0263  * @data:   the private data
0264  *
0265  * For IOASID that is already allocated, private data can be set
0266  * via this API. Future lookup can be done via ioasid_find.
0267  */
0268 int ioasid_set_data(ioasid_t ioasid, void *data)
0269 {
0270     struct ioasid_data *ioasid_data;
0271     int ret = 0;
0272 
0273     spin_lock(&ioasid_allocator_lock);
0274     ioasid_data = xa_load(&active_allocator->xa, ioasid);
0275     if (ioasid_data)
0276         rcu_assign_pointer(ioasid_data->private, data);
0277     else
0278         ret = -ENOENT;
0279     spin_unlock(&ioasid_allocator_lock);
0280 
0281     /*
0282      * Wait for readers to stop accessing the old private data, so the
0283      * caller can free it.
0284      */
0285     if (!ret)
0286         synchronize_rcu();
0287 
0288     return ret;
0289 }
0290 EXPORT_SYMBOL_GPL(ioasid_set_data);
0291 
0292 /**
0293  * ioasid_alloc - Allocate an IOASID
0294  * @set: the IOASID set
0295  * @min: the minimum ID (inclusive)
0296  * @max: the maximum ID (inclusive)
0297  * @private: data private to the caller
0298  *
0299  * Allocate an ID between @min and @max. The @private pointer is stored
0300  * internally and can be retrieved with ioasid_find().
0301  *
0302  * Return: the allocated ID on success, or %INVALID_IOASID on failure.
0303  */
0304 ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
0305               void *private)
0306 {
0307     struct ioasid_data *data;
0308     void *adata;
0309     ioasid_t id;
0310 
0311     data = kzalloc(sizeof(*data), GFP_ATOMIC);
0312     if (!data)
0313         return INVALID_IOASID;
0314 
0315     data->set = set;
0316     data->private = private;
0317 
0318     /*
0319      * Custom allocator needs allocator data to perform platform specific
0320      * operations.
0321      */
0322     spin_lock(&ioasid_allocator_lock);
0323     adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
0324     id = active_allocator->ops->alloc(min, max, adata);
0325     if (id == INVALID_IOASID) {
0326         pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
0327         goto exit_free;
0328     }
0329 
0330     if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
0331          xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
0332         /* Custom allocator needs framework to store and track allocation results */
0333         pr_err("Failed to alloc ioasid from %d\n", id);
0334         active_allocator->ops->free(id, active_allocator->ops->pdata);
0335         goto exit_free;
0336     }
0337     data->id = id;
0338 
0339     spin_unlock(&ioasid_allocator_lock);
0340     return id;
0341 exit_free:
0342     spin_unlock(&ioasid_allocator_lock);
0343     kfree(data);
0344     return INVALID_IOASID;
0345 }
0346 EXPORT_SYMBOL_GPL(ioasid_alloc);
0347 
0348 /**
0349  * ioasid_free - Free an ioasid
0350  * @ioasid: the ID to remove
0351  */
0352 void ioasid_free(ioasid_t ioasid)
0353 {
0354     struct ioasid_data *ioasid_data;
0355 
0356     spin_lock(&ioasid_allocator_lock);
0357     ioasid_data = xa_load(&active_allocator->xa, ioasid);
0358     if (!ioasid_data) {
0359         pr_err("Trying to free unknown IOASID %u\n", ioasid);
0360         goto exit_unlock;
0361     }
0362 
0363     active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
0364     /* Custom allocator needs additional steps to free the xa element */
0365     if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
0366         ioasid_data = xa_erase(&active_allocator->xa, ioasid);
0367         kfree_rcu(ioasid_data, rcu);
0368     }
0369 
0370 exit_unlock:
0371     spin_unlock(&ioasid_allocator_lock);
0372 }
0373 EXPORT_SYMBOL_GPL(ioasid_free);
0374 
0375 /**
0376  * ioasid_find - Find IOASID data
0377  * @set: the IOASID set
0378  * @ioasid: the IOASID to find
0379  * @getter: function to call on the found object
0380  *
0381  * The optional getter function allows to take a reference to the found object
0382  * under the rcu lock. The function can also check if the object is still valid:
0383  * if @getter returns false, then the object is invalid and NULL is returned.
0384  *
0385  * If the IOASID exists, return the private pointer passed to ioasid_alloc.
0386  * Private data can be NULL if not set. Return an error if the IOASID is not
0387  * found, or if @set is not NULL and the IOASID does not belong to the set.
0388  */
0389 void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
0390           bool (*getter)(void *))
0391 {
0392     void *priv;
0393     struct ioasid_data *ioasid_data;
0394     struct ioasid_allocator_data *idata;
0395 
0396     rcu_read_lock();
0397     idata = rcu_dereference(active_allocator);
0398     ioasid_data = xa_load(&idata->xa, ioasid);
0399     if (!ioasid_data) {
0400         priv = ERR_PTR(-ENOENT);
0401         goto unlock;
0402     }
0403     if (set && ioasid_data->set != set) {
0404         /* data found but does not belong to the set */
0405         priv = ERR_PTR(-EACCES);
0406         goto unlock;
0407     }
0408     /* Now IOASID and its set is verified, we can return the private data */
0409     priv = rcu_dereference(ioasid_data->private);
0410     if (getter && !getter(priv))
0411         priv = NULL;
0412 unlock:
0413     rcu_read_unlock();
0414 
0415     return priv;
0416 }
0417 EXPORT_SYMBOL_GPL(ioasid_find);
0418 
0419 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
0420 MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
0421 MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
0422 MODULE_LICENSE("GPL");