0001
0002
0003
0004
0005
0006
0007 #include <linux/ioasid.h>
0008 #include <linux/module.h>
0009 #include <linux/slab.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/xarray.h>
0012
0013 struct ioasid_data {
0014 ioasid_t id;
0015 struct ioasid_set *set;
0016 void *private;
0017 struct rcu_head rcu;
0018 };
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048 struct ioasid_allocator_data {
0049 struct ioasid_allocator_ops *ops;
0050 struct list_head list;
0051 struct list_head slist;
0052 #define IOASID_ALLOCATOR_CUSTOM BIT(0)
0053 unsigned long flags;
0054 struct xarray xa;
0055 struct rcu_head rcu;
0056 };
0057
0058 static DEFINE_SPINLOCK(ioasid_allocator_lock);
0059 static LIST_HEAD(allocators_list);
0060
0061 static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
0062 static void default_free(ioasid_t ioasid, void *opaque);
0063
0064 static struct ioasid_allocator_ops default_ops = {
0065 .alloc = default_alloc,
0066 .free = default_free,
0067 };
0068
0069 static struct ioasid_allocator_data default_allocator = {
0070 .ops = &default_ops,
0071 .flags = 0,
0072 .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
0073 };
0074
0075 static struct ioasid_allocator_data *active_allocator = &default_allocator;
0076
0077 static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
0078 {
0079 ioasid_t id;
0080
0081 if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
0082 pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
0083 return INVALID_IOASID;
0084 }
0085
0086 return id;
0087 }
0088
0089 static void default_free(ioasid_t ioasid, void *opaque)
0090 {
0091 struct ioasid_data *ioasid_data;
0092
0093 ioasid_data = xa_erase(&default_allocator.xa, ioasid);
0094 kfree_rcu(ioasid_data, rcu);
0095 }
0096
0097
0098 static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
0099 {
0100 struct ioasid_allocator_data *ia_data;
0101
0102 ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
0103 if (!ia_data)
0104 return NULL;
0105
0106 xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
0107 INIT_LIST_HEAD(&ia_data->slist);
0108 ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
0109 ia_data->ops = ops;
0110
0111
0112 list_add_tail(&ops->list, &ia_data->slist);
0113
0114 return ia_data;
0115 }
0116
0117 static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
0118 {
0119 return (a->free == b->free) && (a->alloc == b->alloc);
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
0139 {
0140 struct ioasid_allocator_data *ia_data;
0141 struct ioasid_allocator_data *pallocator;
0142 int ret = 0;
0143
0144 spin_lock(&ioasid_allocator_lock);
0145
0146 ia_data = ioasid_alloc_allocator(ops);
0147 if (!ia_data) {
0148 ret = -ENOMEM;
0149 goto out_unlock;
0150 }
0151
0152
0153
0154
0155
0156
0157 if (list_empty(&allocators_list)) {
0158 WARN_ON(active_allocator != &default_allocator);
0159
0160 if (xa_empty(&active_allocator->xa)) {
0161 rcu_assign_pointer(active_allocator, ia_data);
0162 list_add_tail(&ia_data->list, &allocators_list);
0163 goto out_unlock;
0164 }
0165 pr_warn("Default allocator active with outstanding IOASID\n");
0166 ret = -EAGAIN;
0167 goto out_free;
0168 }
0169
0170
0171 list_for_each_entry(pallocator, &allocators_list, list) {
0172 if (pallocator->ops == ops) {
0173 pr_err("IOASID allocator already registered\n");
0174 ret = -EEXIST;
0175 goto out_free;
0176 } else if (use_same_ops(pallocator->ops, ops)) {
0177
0178
0179
0180
0181
0182 list_add_tail(&ops->list, &pallocator->slist);
0183 goto out_free;
0184 }
0185 }
0186 list_add_tail(&ia_data->list, &allocators_list);
0187
0188 spin_unlock(&ioasid_allocator_lock);
0189 return 0;
0190 out_free:
0191 kfree(ia_data);
0192 out_unlock:
0193 spin_unlock(&ioasid_allocator_lock);
0194 return ret;
0195 }
0196 EXPORT_SYMBOL_GPL(ioasid_register_allocator);
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
0207 {
0208 struct ioasid_allocator_data *pallocator;
0209 struct ioasid_allocator_ops *sops;
0210
0211 spin_lock(&ioasid_allocator_lock);
0212 if (list_empty(&allocators_list)) {
0213 pr_warn("No custom IOASID allocators active!\n");
0214 goto exit_unlock;
0215 }
0216
0217 list_for_each_entry(pallocator, &allocators_list, list) {
0218 if (!use_same_ops(pallocator->ops, ops))
0219 continue;
0220
0221 if (list_is_singular(&pallocator->slist)) {
0222
0223 list_del(&pallocator->list);
0224
0225
0226
0227
0228
0229 WARN_ON(!xa_empty(&pallocator->xa));
0230 if (list_empty(&allocators_list)) {
0231 pr_info("No custom IOASID allocators, switch to default.\n");
0232 rcu_assign_pointer(active_allocator, &default_allocator);
0233 } else if (pallocator == active_allocator) {
0234 rcu_assign_pointer(active_allocator,
0235 list_first_entry(&allocators_list,
0236 struct ioasid_allocator_data, list));
0237 pr_info("IOASID allocator changed");
0238 }
0239 kfree_rcu(pallocator, rcu);
0240 break;
0241 }
0242
0243
0244
0245
0246 list_for_each_entry(sops, &pallocator->slist, list) {
0247 if (sops == ops) {
0248 list_del(&ops->list);
0249 break;
0250 }
0251 }
0252 break;
0253 }
0254
0255 exit_unlock:
0256 spin_unlock(&ioasid_allocator_lock);
0257 }
0258 EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 int ioasid_set_data(ioasid_t ioasid, void *data)
0269 {
0270 struct ioasid_data *ioasid_data;
0271 int ret = 0;
0272
0273 spin_lock(&ioasid_allocator_lock);
0274 ioasid_data = xa_load(&active_allocator->xa, ioasid);
0275 if (ioasid_data)
0276 rcu_assign_pointer(ioasid_data->private, data);
0277 else
0278 ret = -ENOENT;
0279 spin_unlock(&ioasid_allocator_lock);
0280
0281
0282
0283
0284
0285 if (!ret)
0286 synchronize_rcu();
0287
0288 return ret;
0289 }
0290 EXPORT_SYMBOL_GPL(ioasid_set_data);
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
0305 void *private)
0306 {
0307 struct ioasid_data *data;
0308 void *adata;
0309 ioasid_t id;
0310
0311 data = kzalloc(sizeof(*data), GFP_ATOMIC);
0312 if (!data)
0313 return INVALID_IOASID;
0314
0315 data->set = set;
0316 data->private = private;
0317
0318
0319
0320
0321
0322 spin_lock(&ioasid_allocator_lock);
0323 adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
0324 id = active_allocator->ops->alloc(min, max, adata);
0325 if (id == INVALID_IOASID) {
0326 pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
0327 goto exit_free;
0328 }
0329
0330 if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
0331 xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
0332
0333 pr_err("Failed to alloc ioasid from %d\n", id);
0334 active_allocator->ops->free(id, active_allocator->ops->pdata);
0335 goto exit_free;
0336 }
0337 data->id = id;
0338
0339 spin_unlock(&ioasid_allocator_lock);
0340 return id;
0341 exit_free:
0342 spin_unlock(&ioasid_allocator_lock);
0343 kfree(data);
0344 return INVALID_IOASID;
0345 }
0346 EXPORT_SYMBOL_GPL(ioasid_alloc);
0347
0348
0349
0350
0351
0352 void ioasid_free(ioasid_t ioasid)
0353 {
0354 struct ioasid_data *ioasid_data;
0355
0356 spin_lock(&ioasid_allocator_lock);
0357 ioasid_data = xa_load(&active_allocator->xa, ioasid);
0358 if (!ioasid_data) {
0359 pr_err("Trying to free unknown IOASID %u\n", ioasid);
0360 goto exit_unlock;
0361 }
0362
0363 active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
0364
0365 if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
0366 ioasid_data = xa_erase(&active_allocator->xa, ioasid);
0367 kfree_rcu(ioasid_data, rcu);
0368 }
0369
0370 exit_unlock:
0371 spin_unlock(&ioasid_allocator_lock);
0372 }
0373 EXPORT_SYMBOL_GPL(ioasid_free);
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
0390 bool (*getter)(void *))
0391 {
0392 void *priv;
0393 struct ioasid_data *ioasid_data;
0394 struct ioasid_allocator_data *idata;
0395
0396 rcu_read_lock();
0397 idata = rcu_dereference(active_allocator);
0398 ioasid_data = xa_load(&idata->xa, ioasid);
0399 if (!ioasid_data) {
0400 priv = ERR_PTR(-ENOENT);
0401 goto unlock;
0402 }
0403 if (set && ioasid_data->set != set) {
0404
0405 priv = ERR_PTR(-EACCES);
0406 goto unlock;
0407 }
0408
0409 priv = rcu_dereference(ioasid_data->private);
0410 if (getter && !getter(priv))
0411 priv = NULL;
0412 unlock:
0413 rcu_read_unlock();
0414
0415 return priv;
0416 }
0417 EXPORT_SYMBOL_GPL(ioasid_find);
0418
0419 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
0420 MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
0421 MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
0422 MODULE_LICENSE("GPL");