Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Asynchronous refcounty things
0004  *
0005  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
0006  * Copyright 2012 Google, Inc.
0007  */
0008 
0009 #include <linux/debugfs.h>
0010 #include <linux/module.h>
0011 #include <linux/seq_file.h>
0012 #include <linux/sched/debug.h>
0013 
0014 #include "closure.h"
0015 
0016 static inline void closure_put_after_sub(struct closure *cl, int flags)
0017 {
0018     int r = flags & CLOSURE_REMAINING_MASK;
0019 
0020     BUG_ON(flags & CLOSURE_GUARD_MASK);
0021     BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
0022 
0023     if (!r) {
0024         if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
0025             atomic_set(&cl->remaining,
0026                    CLOSURE_REMAINING_INITIALIZER);
0027             closure_queue(cl);
0028         } else {
0029             struct closure *parent = cl->parent;
0030             closure_fn *destructor = cl->fn;
0031 
0032             closure_debug_destroy(cl);
0033 
0034             if (destructor)
0035                 destructor(cl);
0036 
0037             if (parent)
0038                 closure_put(parent);
0039         }
0040     }
0041 }
0042 
0043 /* For clearing flags with the same atomic op as a put */
0044 void closure_sub(struct closure *cl, int v)
0045 {
0046     closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
0047 }
0048 
0049 /*
0050  * closure_put - decrement a closure's refcount
0051  */
0052 void closure_put(struct closure *cl)
0053 {
0054     closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
0055 }
0056 
0057 /*
0058  * closure_wake_up - wake up all closures on a wait list, without memory barrier
0059  */
0060 void __closure_wake_up(struct closure_waitlist *wait_list)
0061 {
0062     struct llist_node *list;
0063     struct closure *cl, *t;
0064     struct llist_node *reverse = NULL;
0065 
0066     list = llist_del_all(&wait_list->list);
0067 
0068     /* We first reverse the list to preserve FIFO ordering and fairness */
0069     reverse = llist_reverse_order(list);
0070 
0071     /* Then do the wakeups */
0072     llist_for_each_entry_safe(cl, t, reverse, list) {
0073         closure_set_waiting(cl, 0);
0074         closure_sub(cl, CLOSURE_WAITING + 1);
0075     }
0076 }
0077 
0078 /**
0079  * closure_wait - add a closure to a waitlist
0080  * @waitlist: will own a ref on @cl, which will be released when
0081  * closure_wake_up() is called on @waitlist.
0082  * @cl: closure pointer.
0083  *
0084  */
0085 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
0086 {
0087     if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
0088         return false;
0089 
0090     closure_set_waiting(cl, _RET_IP_);
0091     atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
0092     llist_add(&cl->list, &waitlist->list);
0093 
0094     return true;
0095 }
0096 
0097 struct closure_syncer {
0098     struct task_struct  *task;
0099     int         done;
0100 };
0101 
0102 static void closure_sync_fn(struct closure *cl)
0103 {
0104     struct closure_syncer *s = cl->s;
0105     struct task_struct *p;
0106 
0107     rcu_read_lock();
0108     p = READ_ONCE(s->task);
0109     s->done = 1;
0110     wake_up_process(p);
0111     rcu_read_unlock();
0112 }
0113 
0114 void __sched __closure_sync(struct closure *cl)
0115 {
0116     struct closure_syncer s = { .task = current };
0117 
0118     cl->s = &s;
0119     continue_at(cl, closure_sync_fn, NULL);
0120 
0121     while (1) {
0122         set_current_state(TASK_UNINTERRUPTIBLE);
0123         if (s.done)
0124             break;
0125         schedule();
0126     }
0127 
0128     __set_current_state(TASK_RUNNING);
0129 }
0130 
0131 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
0132 
0133 static LIST_HEAD(closure_list);
0134 static DEFINE_SPINLOCK(closure_list_lock);
0135 
0136 void closure_debug_create(struct closure *cl)
0137 {
0138     unsigned long flags;
0139 
0140     BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
0141     cl->magic = CLOSURE_MAGIC_ALIVE;
0142 
0143     spin_lock_irqsave(&closure_list_lock, flags);
0144     list_add(&cl->all, &closure_list);
0145     spin_unlock_irqrestore(&closure_list_lock, flags);
0146 }
0147 
0148 void closure_debug_destroy(struct closure *cl)
0149 {
0150     unsigned long flags;
0151 
0152     BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
0153     cl->magic = CLOSURE_MAGIC_DEAD;
0154 
0155     spin_lock_irqsave(&closure_list_lock, flags);
0156     list_del(&cl->all);
0157     spin_unlock_irqrestore(&closure_list_lock, flags);
0158 }
0159 
0160 static struct dentry *closure_debug;
0161 
0162 static int debug_show(struct seq_file *f, void *data)
0163 {
0164     struct closure *cl;
0165 
0166     spin_lock_irq(&closure_list_lock);
0167 
0168     list_for_each_entry(cl, &closure_list, all) {
0169         int r = atomic_read(&cl->remaining);
0170 
0171         seq_printf(f, "%p: %pS -> %pS p %p r %i ",
0172                cl, (void *) cl->ip, cl->fn, cl->parent,
0173                r & CLOSURE_REMAINING_MASK);
0174 
0175         seq_printf(f, "%s%s\n",
0176                test_bit(WORK_STRUCT_PENDING_BIT,
0177                     work_data_bits(&cl->work)) ? "Q" : "",
0178                r & CLOSURE_RUNNING  ? "R" : "");
0179 
0180         if (r & CLOSURE_WAITING)
0181             seq_printf(f, " W %pS\n",
0182                    (void *) cl->waiting_on);
0183 
0184         seq_printf(f, "\n");
0185     }
0186 
0187     spin_unlock_irq(&closure_list_lock);
0188     return 0;
0189 }
0190 
0191 DEFINE_SHOW_ATTRIBUTE(debug);
0192 
0193 void  __init closure_debug_init(void)
0194 {
0195     if (!IS_ERR_OR_NULL(bcache_debug))
0196         /*
0197          * it is unnecessary to check return value of
0198          * debugfs_create_file(), we should not care
0199          * about this.
0200          */
0201         closure_debug = debugfs_create_file(
0202             "closures", 0400, bcache_debug, NULL, &debug_fops);
0203 }
0204 #endif
0205 
0206 MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
0207 MODULE_LICENSE("GPL");