Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_CLOSURE_H
0003 #define _LINUX_CLOSURE_H
0004 
0005 #include <linux/llist.h>
0006 #include <linux/sched.h>
0007 #include <linux/sched/task_stack.h>
0008 #include <linux/workqueue.h>
0009 
0010 /*
0011  * Closure is perhaps the most overused and abused term in computer science, but
0012  * since I've been unable to come up with anything better you're stuck with it
0013  * again.
0014  *
0015  * What are closures?
0016  *
0017  * They embed a refcount. The basic idea is they count "things that are in
0018  * progress" - in flight bios, some other thread that's doing something else -
0019  * anything you might want to wait on.
0020  *
0021  * The refcount may be manipulated with closure_get() and closure_put().
0022  * closure_put() is where many of the interesting things happen, when it causes
0023  * the refcount to go to 0.
0024  *
0025  * Closures can be used to wait on things both synchronously and asynchronously,
0026  * and synchronous and asynchronous use can be mixed without restriction. To
0027  * wait synchronously, use closure_sync() - you will sleep until your closure's
0028  * refcount hits 1.
0029  *
0030  * To wait asynchronously, use
0031  *   continue_at(cl, next_function, workqueue);
0032  *
0033  * passing it, as you might expect, the function to run when nothing is pending
0034  * and the workqueue to run that function out of.
0035  *
0036  * continue_at() also, critically, requires a 'return' immediately following the
0037  * location where this macro is referenced, to return to the calling function.
0038  * There's good reason for this.
0039  *
0040  * To use safely closures asynchronously, they must always have a refcount while
0041  * they are running owned by the thread that is running them. Otherwise, suppose
0042  * you submit some bios and wish to have a function run when they all complete:
0043  *
0044  * foo_endio(struct bio *bio)
0045  * {
0046  *  closure_put(cl);
0047  * }
0048  *
0049  * closure_init(cl);
0050  *
0051  * do_stuff();
0052  * closure_get(cl);
0053  * bio1->bi_endio = foo_endio;
0054  * bio_submit(bio1);
0055  *
0056  * do_more_stuff();
0057  * closure_get(cl);
0058  * bio2->bi_endio = foo_endio;
0059  * bio_submit(bio2);
0060  *
0061  * continue_at(cl, complete_some_read, system_wq);
0062  *
0063  * If closure's refcount started at 0, complete_some_read() could run before the
0064  * second bio was submitted - which is almost always not what you want! More
0065  * importantly, it wouldn't be possible to say whether the original thread or
0066  * complete_some_read()'s thread owned the closure - and whatever state it was
0067  * associated with!
0068  *
0069  * So, closure_init() initializes a closure's refcount to 1 - and when a
0070  * closure_fn is run, the refcount will be reset to 1 first.
0071  *
0072  * Then, the rule is - if you got the refcount with closure_get(), release it
0073  * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
0074  * on a closure because you called closure_init() or you were run out of a
0075  * closure - _always_ use continue_at(). Doing so consistently will help
0076  * eliminate an entire class of particularly pernicious races.
0077  *
0078  * Lastly, you might have a wait list dedicated to a specific event, and have no
0079  * need for specifying the condition - you just want to wait until someone runs
0080  * closure_wake_up() on the appropriate wait list. In that case, just use
0081  * closure_wait(). It will return either true or false, depending on whether the
0082  * closure was already on a wait list or not - a closure can only be on one wait
0083  * list at a time.
0084  *
0085  * Parents:
0086  *
0087  * closure_init() takes two arguments - it takes the closure to initialize, and
0088  * a (possibly null) parent.
0089  *
0090  * If parent is non null, the new closure will have a refcount for its lifetime;
0091  * a closure is considered to be "finished" when its refcount hits 0 and the
0092  * function to run is null. Hence
0093  *
0094  * continue_at(cl, NULL, NULL);
0095  *
0096  * returns up the (spaghetti) stack of closures, precisely like normal return
0097  * returns up the C stack. continue_at() with non null fn is better thought of
0098  * as doing a tail call.
0099  *
0100  * All this implies that a closure should typically be embedded in a particular
0101  * struct (which its refcount will normally control the lifetime of), and that
0102  * struct can very much be thought of as a stack frame.
0103  */
0104 
0105 struct closure;
0106 struct closure_syncer;
0107 typedef void (closure_fn) (struct closure *);
0108 extern struct dentry *bcache_debug;
0109 
0110 struct closure_waitlist {
0111     struct llist_head   list;
0112 };
0113 
0114 enum closure_state {
0115     /*
0116      * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
0117      * the thread that owns the closure, and cleared by the thread that's
0118      * waking up the closure.
0119      *
0120      * The rest are for debugging and don't affect behaviour:
0121      *
0122      * CLOSURE_RUNNING: Set when a closure is running (i.e. by
0123      * closure_init() and when closure_put() runs then next function), and
0124      * must be cleared before remaining hits 0. Primarily to help guard
0125      * against incorrect usage and accidentally transferring references.
0126      * continue_at() and closure_return() clear it for you, if you're doing
0127      * something unusual you can use closure_set_dead() which also helps
0128      * annotate where references are being transferred.
0129      */
0130 
0131     CLOSURE_BITS_START  = (1U << 26),
0132     CLOSURE_DESTRUCTOR  = (1U << 26),
0133     CLOSURE_WAITING     = (1U << 28),
0134     CLOSURE_RUNNING     = (1U << 30),
0135 };
0136 
0137 #define CLOSURE_GUARD_MASK                  \
0138     ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
0139 
0140 #define CLOSURE_REMAINING_MASK      (CLOSURE_BITS_START - 1)
0141 #define CLOSURE_REMAINING_INITIALIZER   (1|CLOSURE_RUNNING)
0142 
0143 struct closure {
0144     union {
0145         struct {
0146             struct workqueue_struct *wq;
0147             struct closure_syncer   *s;
0148             struct llist_node   list;
0149             closure_fn      *fn;
0150         };
0151         struct work_struct  work;
0152     };
0153 
0154     struct closure      *parent;
0155 
0156     atomic_t        remaining;
0157 
0158 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
0159 #define CLOSURE_MAGIC_DEAD  0xc054dead
0160 #define CLOSURE_MAGIC_ALIVE 0xc054a11e
0161 
0162     unsigned int        magic;
0163     struct list_head    all;
0164     unsigned long       ip;
0165     unsigned long       waiting_on;
0166 #endif
0167 };
0168 
0169 void closure_sub(struct closure *cl, int v);
0170 void closure_put(struct closure *cl);
0171 void __closure_wake_up(struct closure_waitlist *list);
0172 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
0173 void __closure_sync(struct closure *cl);
0174 
0175 /**
0176  * closure_sync - sleep until a closure a closure has nothing left to wait on
0177  *
0178  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
0179  * the last refcount.
0180  */
0181 static inline void closure_sync(struct closure *cl)
0182 {
0183     if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
0184         __closure_sync(cl);
0185 }
0186 
0187 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
0188 
0189 void closure_debug_init(void);
0190 void closure_debug_create(struct closure *cl);
0191 void closure_debug_destroy(struct closure *cl);
0192 
0193 #else
0194 
0195 static inline void closure_debug_init(void) {}
0196 static inline void closure_debug_create(struct closure *cl) {}
0197 static inline void closure_debug_destroy(struct closure *cl) {}
0198 
0199 #endif
0200 
0201 static inline void closure_set_ip(struct closure *cl)
0202 {
0203 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
0204     cl->ip = _THIS_IP_;
0205 #endif
0206 }
0207 
0208 static inline void closure_set_ret_ip(struct closure *cl)
0209 {
0210 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
0211     cl->ip = _RET_IP_;
0212 #endif
0213 }
0214 
0215 static inline void closure_set_waiting(struct closure *cl, unsigned long f)
0216 {
0217 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
0218     cl->waiting_on = f;
0219 #endif
0220 }
0221 
0222 static inline void closure_set_stopped(struct closure *cl)
0223 {
0224     atomic_sub(CLOSURE_RUNNING, &cl->remaining);
0225 }
0226 
0227 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
0228                   struct workqueue_struct *wq)
0229 {
0230     closure_set_ip(cl);
0231     cl->fn = fn;
0232     cl->wq = wq;
0233     /* between atomic_dec() in closure_put() */
0234     smp_mb__before_atomic();
0235 }
0236 
0237 static inline void closure_queue(struct closure *cl)
0238 {
0239     struct workqueue_struct *wq = cl->wq;
0240     /**
0241      * Changes made to closure, work_struct, or a couple of other structs
0242      * may cause work.func not pointing to the right location.
0243      */
0244     BUILD_BUG_ON(offsetof(struct closure, fn)
0245              != offsetof(struct work_struct, func));
0246     if (wq) {
0247         INIT_WORK(&cl->work, cl->work.func);
0248         BUG_ON(!queue_work(wq, &cl->work));
0249     } else
0250         cl->fn(cl);
0251 }
0252 
0253 /**
0254  * closure_get - increment a closure's refcount
0255  */
0256 static inline void closure_get(struct closure *cl)
0257 {
0258 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
0259     BUG_ON((atomic_inc_return(&cl->remaining) &
0260         CLOSURE_REMAINING_MASK) <= 1);
0261 #else
0262     atomic_inc(&cl->remaining);
0263 #endif
0264 }
0265 
0266 /**
0267  * closure_init - Initialize a closure, setting the refcount to 1
0268  * @cl:     closure to initialize
0269  * @parent: parent of the new closure. cl will take a refcount on it for its
0270  *      lifetime; may be NULL.
0271  */
0272 static inline void closure_init(struct closure *cl, struct closure *parent)
0273 {
0274     memset(cl, 0, sizeof(struct closure));
0275     cl->parent = parent;
0276     if (parent)
0277         closure_get(parent);
0278 
0279     atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
0280 
0281     closure_debug_create(cl);
0282     closure_set_ip(cl);
0283 }
0284 
0285 static inline void closure_init_stack(struct closure *cl)
0286 {
0287     memset(cl, 0, sizeof(struct closure));
0288     atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
0289 }
0290 
0291 /**
0292  * closure_wake_up - wake up all closures on a wait list,
0293  *           with memory barrier
0294  */
0295 static inline void closure_wake_up(struct closure_waitlist *list)
0296 {
0297     /* Memory barrier for the wait list */
0298     smp_mb();
0299     __closure_wake_up(list);
0300 }
0301 
0302 /**
0303  * continue_at - jump to another function with barrier
0304  *
0305  * After @cl is no longer waiting on anything (i.e. all outstanding refs have
0306  * been dropped with closure_put()), it will resume execution at @fn running out
0307  * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
0308  *
0309  * This is because after calling continue_at() you no longer have a ref on @cl,
0310  * and whatever @cl owns may be freed out from under you - a running closure fn
0311  * has a ref on its own closure which continue_at() drops.
0312  *
0313  * Note you are expected to immediately return after using this macro.
0314  */
0315 #define continue_at(_cl, _fn, _wq)                  \
0316 do {                                    \
0317     set_closure_fn(_cl, _fn, _wq);                  \
0318     closure_sub(_cl, CLOSURE_RUNNING + 1);              \
0319 } while (0)
0320 
0321 /**
0322  * closure_return - finish execution of a closure
0323  *
0324  * This is used to indicate that @cl is finished: when all outstanding refs on
0325  * @cl have been dropped @cl's ref on its parent closure (as passed to
0326  * closure_init()) will be dropped, if one was specified - thus this can be
0327  * thought of as returning to the parent closure.
0328  */
0329 #define closure_return(_cl) continue_at((_cl), NULL, NULL)
0330 
0331 /**
0332  * continue_at_nobarrier - jump to another function without barrier
0333  *
0334  * Causes @fn to be executed out of @cl, in @wq context (or called directly if
0335  * @wq is NULL).
0336  *
0337  * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
0338  * thus it's not safe to touch anything protected by @cl after a
0339  * continue_at_nobarrier().
0340  */
0341 #define continue_at_nobarrier(_cl, _fn, _wq)                \
0342 do {                                    \
0343     set_closure_fn(_cl, _fn, _wq);                  \
0344     closure_queue(_cl);                     \
0345 } while (0)
0346 
0347 /**
0348  * closure_return_with_destructor - finish execution of a closure,
0349  *                  with destructor
0350  *
0351  * Works like closure_return(), except @destructor will be called when all
0352  * outstanding refs on @cl have been dropped; @destructor may be used to safely
0353  * free the memory occupied by @cl, and it is called with the ref on the parent
0354  * closure still held - so @destructor could safely return an item to a
0355  * freelist protected by @cl's parent.
0356  */
0357 #define closure_return_with_destructor(_cl, _destructor)        \
0358 do {                                    \
0359     set_closure_fn(_cl, _destructor, NULL);             \
0360     closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
0361 } while (0)
0362 
0363 /**
0364  * closure_call - execute @fn out of a new, uninitialized closure
0365  *
0366  * Typically used when running out of one closure, and we want to run @fn
0367  * asynchronously out of a new closure - @parent will then wait for @cl to
0368  * finish.
0369  */
0370 static inline void closure_call(struct closure *cl, closure_fn fn,
0371                 struct workqueue_struct *wq,
0372                 struct closure *parent)
0373 {
0374     closure_init(cl, parent);
0375     continue_at_nobarrier(cl, fn, wq);
0376 }
0377 
0378 #endif /* _LINUX_CLOSURE_H */