Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /* Copyright (c) 2018 Mellanox Technologies */
0003 
0004 #include <linux/jhash.h>
0005 #include <linux/slab.h>
0006 #include <linux/xarray.h>
0007 #include <linux/hashtable.h>
0008 #include <linux/refcount.h>
0009 
0010 #include "mapping.h"
0011 
0012 #define MAPPING_GRACE_PERIOD 2000
0013 
0014 static LIST_HEAD(shared_ctx_list);
0015 static DEFINE_MUTEX(shared_ctx_lock);
0016 
0017 struct mapping_ctx {
0018     struct xarray xarray;
0019     DECLARE_HASHTABLE(ht, 8);
0020     struct mutex lock; /* Guards hashtable and xarray */
0021     unsigned long max_id;
0022     size_t data_size;
0023     bool delayed_removal;
0024     struct delayed_work dwork;
0025     struct list_head pending_list;
0026     spinlock_t pending_list_lock; /* Guards pending list */
0027     u64 id;
0028     u8 type;
0029     struct list_head list;
0030     refcount_t refcount;
0031 };
0032 
0033 struct mapping_item {
0034     struct rcu_head rcu;
0035     struct list_head list;
0036     unsigned long timeout;
0037     struct hlist_node node;
0038     int cnt;
0039     u32 id;
0040     char data[];
0041 };
0042 
0043 int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
0044 {
0045     struct mapping_item *mi;
0046     int err = -ENOMEM;
0047     u32 hash_key;
0048 
0049     mutex_lock(&ctx->lock);
0050 
0051     hash_key = jhash(data, ctx->data_size, 0);
0052     hash_for_each_possible(ctx->ht, mi, node, hash_key) {
0053         if (!memcmp(data, mi->data, ctx->data_size))
0054             goto attach;
0055     }
0056 
0057     mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
0058     if (!mi)
0059         goto err_alloc;
0060 
0061     memcpy(mi->data, data, ctx->data_size);
0062     hash_add(ctx->ht, &mi->node, hash_key);
0063 
0064     err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
0065                GFP_KERNEL);
0066     if (err)
0067         goto err_assign;
0068 attach:
0069     ++mi->cnt;
0070     *id = mi->id;
0071 
0072     mutex_unlock(&ctx->lock);
0073 
0074     return 0;
0075 
0076 err_assign:
0077     hash_del(&mi->node);
0078     kfree(mi);
0079 err_alloc:
0080     mutex_unlock(&ctx->lock);
0081 
0082     return err;
0083 }
0084 
0085 static void mapping_remove_and_free(struct mapping_ctx *ctx,
0086                     struct mapping_item *mi)
0087 {
0088     xa_erase(&ctx->xarray, mi->id);
0089     kfree_rcu(mi, rcu);
0090 }
0091 
0092 static void mapping_free_item(struct mapping_ctx *ctx,
0093                   struct mapping_item *mi)
0094 {
0095     if (!ctx->delayed_removal) {
0096         mapping_remove_and_free(ctx, mi);
0097         return;
0098     }
0099 
0100     mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);
0101 
0102     spin_lock(&ctx->pending_list_lock);
0103     list_add_tail(&mi->list, &ctx->pending_list);
0104     spin_unlock(&ctx->pending_list_lock);
0105 
0106     schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
0107 }
0108 
0109 int mapping_remove(struct mapping_ctx *ctx, u32 id)
0110 {
0111     unsigned long index = id;
0112     struct mapping_item *mi;
0113     int err = -ENOENT;
0114 
0115     mutex_lock(&ctx->lock);
0116     mi = xa_load(&ctx->xarray, index);
0117     if (!mi)
0118         goto out;
0119     err = 0;
0120 
0121     if (--mi->cnt > 0)
0122         goto out;
0123 
0124     hash_del(&mi->node);
0125     mapping_free_item(ctx, mi);
0126 out:
0127     mutex_unlock(&ctx->lock);
0128 
0129     return err;
0130 }
0131 
0132 int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
0133 {
0134     unsigned long index = id;
0135     struct mapping_item *mi;
0136     int err = -ENOENT;
0137 
0138     rcu_read_lock();
0139     mi = xa_load(&ctx->xarray, index);
0140     if (!mi)
0141         goto err_find;
0142 
0143     memcpy(data, mi->data, ctx->data_size);
0144     err = 0;
0145 
0146 err_find:
0147     rcu_read_unlock();
0148     return err;
0149 }
0150 
0151 static void
0152 mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
0153 {
0154     struct mapping_item *mi;
0155 
0156     list_for_each_entry(mi, list, list)
0157         mapping_remove_and_free(ctx, mi);
0158 }
0159 
0160 static void mapping_work_handler(struct work_struct *work)
0161 {
0162     unsigned long min_timeout = 0, now = jiffies;
0163     struct mapping_item *mi, *next;
0164     LIST_HEAD(pending_items);
0165     struct mapping_ctx *ctx;
0166 
0167     ctx = container_of(work, struct mapping_ctx, dwork.work);
0168 
0169     spin_lock(&ctx->pending_list_lock);
0170     list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
0171         if (time_after(now, mi->timeout))
0172             list_move(&mi->list, &pending_items);
0173         else if (!min_timeout ||
0174              time_before(mi->timeout, min_timeout))
0175             min_timeout = mi->timeout;
0176     }
0177     spin_unlock(&ctx->pending_list_lock);
0178 
0179     mapping_remove_and_free_list(ctx, &pending_items);
0180 
0181     if (min_timeout)
0182         schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
0183 }
0184 
0185 static void mapping_flush_work(struct mapping_ctx *ctx)
0186 {
0187     if (!ctx->delayed_removal)
0188         return;
0189 
0190     cancel_delayed_work_sync(&ctx->dwork);
0191     mapping_remove_and_free_list(ctx, &ctx->pending_list);
0192 }
0193 
0194 struct mapping_ctx *
0195 mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
0196 {
0197     struct mapping_ctx *ctx;
0198 
0199     ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
0200     if (!ctx)
0201         return ERR_PTR(-ENOMEM);
0202 
0203     ctx->max_id = max_id ? max_id : UINT_MAX;
0204     ctx->data_size = data_size;
0205 
0206     if (delayed_removal) {
0207         INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
0208         INIT_LIST_HEAD(&ctx->pending_list);
0209         spin_lock_init(&ctx->pending_list_lock);
0210         ctx->delayed_removal = true;
0211     }
0212 
0213     mutex_init(&ctx->lock);
0214     xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
0215 
0216     refcount_set(&ctx->refcount, 1);
0217     INIT_LIST_HEAD(&ctx->list);
0218 
0219     return ctx;
0220 }
0221 
0222 struct mapping_ctx *
0223 mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal)
0224 {
0225     struct mapping_ctx *ctx;
0226 
0227     mutex_lock(&shared_ctx_lock);
0228     list_for_each_entry(ctx, &shared_ctx_list, list) {
0229         if (ctx->id == id && ctx->type == type) {
0230             if (refcount_inc_not_zero(&ctx->refcount))
0231                 goto unlock;
0232             break;
0233         }
0234     }
0235 
0236     ctx = mapping_create(data_size, max_id, delayed_removal);
0237     if (IS_ERR(ctx))
0238         goto unlock;
0239 
0240     ctx->id = id;
0241     ctx->type = type;
0242     list_add(&ctx->list, &shared_ctx_list);
0243 
0244 unlock:
0245     mutex_unlock(&shared_ctx_lock);
0246     return ctx;
0247 }
0248 
0249 void mapping_destroy(struct mapping_ctx *ctx)
0250 {
0251     if (!refcount_dec_and_test(&ctx->refcount))
0252         return;
0253 
0254     mutex_lock(&shared_ctx_lock);
0255     list_del(&ctx->list);
0256     mutex_unlock(&shared_ctx_lock);
0257 
0258     mapping_flush_work(ctx);
0259     xa_destroy(&ctx->xarray);
0260     mutex_destroy(&ctx->lock);
0261 
0262     kfree(ctx);
0263 }