Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2007 Oracle.  All rights reserved.
0004  * Copyright (C) 2014 Fujitsu.  All rights reserved.
0005  */
0006 
0007 #include <linux/kthread.h>
0008 #include <linux/slab.h>
0009 #include <linux/list.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/freezer.h>
0012 #include "async-thread.h"
0013 #include "ctree.h"
0014 
0015 enum {
0016     WORK_DONE_BIT,
0017     WORK_ORDER_DONE_BIT,
0018 };
0019 
0020 #define NO_THRESHOLD (-1)
0021 #define DFT_THRESHOLD (32)
0022 
0023 struct btrfs_workqueue {
0024     struct workqueue_struct *normal_wq;
0025 
0026     /* File system this workqueue services */
0027     struct btrfs_fs_info *fs_info;
0028 
0029     /* List head pointing to ordered work list */
0030     struct list_head ordered_list;
0031 
0032     /* Spinlock for ordered_list */
0033     spinlock_t list_lock;
0034 
0035     /* Thresholding related variants */
0036     atomic_t pending;
0037 
0038     /* Up limit of concurrency workers */
0039     int limit_active;
0040 
0041     /* Current number of concurrency workers */
0042     int current_active;
0043 
0044     /* Threshold to change current_active */
0045     int thresh;
0046     unsigned int count;
0047     spinlock_t thres_lock;
0048 };
0049 
0050 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
0051 {
0052     return wq->fs_info;
0053 }
0054 
0055 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
0056 {
0057     return work->wq->fs_info;
0058 }
0059 
0060 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
0061 {
0062     /*
0063      * We could compare wq->pending with num_online_cpus()
0064      * to support "thresh == NO_THRESHOLD" case, but it requires
0065      * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
0066      * postpone it until someone needs the support of that case.
0067      */
0068     if (wq->thresh == NO_THRESHOLD)
0069         return false;
0070 
0071     return atomic_read(&wq->pending) > wq->thresh * 2;
0072 }
0073 
0074 struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
0075                           const char *name, unsigned int flags,
0076                           int limit_active, int thresh)
0077 {
0078     struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
0079 
0080     if (!ret)
0081         return NULL;
0082 
0083     ret->fs_info = fs_info;
0084     ret->limit_active = limit_active;
0085     atomic_set(&ret->pending, 0);
0086     if (thresh == 0)
0087         thresh = DFT_THRESHOLD;
0088     /* For low threshold, disabling threshold is a better choice */
0089     if (thresh < DFT_THRESHOLD) {
0090         ret->current_active = limit_active;
0091         ret->thresh = NO_THRESHOLD;
0092     } else {
0093         /*
0094          * For threshold-able wq, let its concurrency grow on demand.
0095          * Use minimal max_active at alloc time to reduce resource
0096          * usage.
0097          */
0098         ret->current_active = 1;
0099         ret->thresh = thresh;
0100     }
0101 
0102     ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
0103                      name);
0104     if (!ret->normal_wq) {
0105         kfree(ret);
0106         return NULL;
0107     }
0108 
0109     INIT_LIST_HEAD(&ret->ordered_list);
0110     spin_lock_init(&ret->list_lock);
0111     spin_lock_init(&ret->thres_lock);
0112     trace_btrfs_workqueue_alloc(ret, name);
0113     return ret;
0114 }
0115 
0116 /*
0117  * Hook for threshold which will be called in btrfs_queue_work.
0118  * This hook WILL be called in IRQ handler context,
0119  * so workqueue_set_max_active MUST NOT be called in this hook
0120  */
0121 static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
0122 {
0123     if (wq->thresh == NO_THRESHOLD)
0124         return;
0125     atomic_inc(&wq->pending);
0126 }
0127 
0128 /*
0129  * Hook for threshold which will be called before executing the work,
0130  * This hook is called in kthread content.
0131  * So workqueue_set_max_active is called here.
0132  */
0133 static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
0134 {
0135     int new_current_active;
0136     long pending;
0137     int need_change = 0;
0138 
0139     if (wq->thresh == NO_THRESHOLD)
0140         return;
0141 
0142     atomic_dec(&wq->pending);
0143     spin_lock(&wq->thres_lock);
0144     /*
0145      * Use wq->count to limit the calling frequency of
0146      * workqueue_set_max_active.
0147      */
0148     wq->count++;
0149     wq->count %= (wq->thresh / 4);
0150     if (!wq->count)
0151         goto  out;
0152     new_current_active = wq->current_active;
0153 
0154     /*
0155      * pending may be changed later, but it's OK since we really
0156      * don't need it so accurate to calculate new_max_active.
0157      */
0158     pending = atomic_read(&wq->pending);
0159     if (pending > wq->thresh)
0160         new_current_active++;
0161     if (pending < wq->thresh / 2)
0162         new_current_active--;
0163     new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
0164     if (new_current_active != wq->current_active)  {
0165         need_change = 1;
0166         wq->current_active = new_current_active;
0167     }
0168 out:
0169     spin_unlock(&wq->thres_lock);
0170 
0171     if (need_change) {
0172         workqueue_set_max_active(wq->normal_wq, wq->current_active);
0173     }
0174 }
0175 
0176 static void run_ordered_work(struct btrfs_workqueue *wq,
0177                  struct btrfs_work *self)
0178 {
0179     struct list_head *list = &wq->ordered_list;
0180     struct btrfs_work *work;
0181     spinlock_t *lock = &wq->list_lock;
0182     unsigned long flags;
0183     bool free_self = false;
0184 
0185     while (1) {
0186         spin_lock_irqsave(lock, flags);
0187         if (list_empty(list))
0188             break;
0189         work = list_entry(list->next, struct btrfs_work,
0190                   ordered_list);
0191         if (!test_bit(WORK_DONE_BIT, &work->flags))
0192             break;
0193         /*
0194          * Orders all subsequent loads after reading WORK_DONE_BIT,
0195          * paired with the smp_mb__before_atomic in btrfs_work_helper
0196          * this guarantees that the ordered function will see all
0197          * updates from ordinary work function.
0198          */
0199         smp_rmb();
0200 
0201         /*
0202          * we are going to call the ordered done function, but
0203          * we leave the work item on the list as a barrier so
0204          * that later work items that are done don't have their
0205          * functions called before this one returns
0206          */
0207         if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
0208             break;
0209         trace_btrfs_ordered_sched(work);
0210         spin_unlock_irqrestore(lock, flags);
0211         work->ordered_func(work);
0212 
0213         /* now take the lock again and drop our item from the list */
0214         spin_lock_irqsave(lock, flags);
0215         list_del(&work->ordered_list);
0216         spin_unlock_irqrestore(lock, flags);
0217 
0218         if (work == self) {
0219             /*
0220              * This is the work item that the worker is currently
0221              * executing.
0222              *
0223              * The kernel workqueue code guarantees non-reentrancy
0224              * of work items. I.e., if a work item with the same
0225              * address and work function is queued twice, the second
0226              * execution is blocked until the first one finishes. A
0227              * work item may be freed and recycled with the same
0228              * work function; the workqueue code assumes that the
0229              * original work item cannot depend on the recycled work
0230              * item in that case (see find_worker_executing_work()).
0231              *
0232              * Note that different types of Btrfs work can depend on
0233              * each other, and one type of work on one Btrfs
0234              * filesystem may even depend on the same type of work
0235              * on another Btrfs filesystem via, e.g., a loop device.
0236              * Therefore, we must not allow the current work item to
0237              * be recycled until we are really done, otherwise we
0238              * break the above assumption and can deadlock.
0239              */
0240             free_self = true;
0241         } else {
0242             /*
0243              * We don't want to call the ordered free functions with
0244              * the lock held.
0245              */
0246             work->ordered_free(work);
0247             /* NB: work must not be dereferenced past this point. */
0248             trace_btrfs_all_work_done(wq->fs_info, work);
0249         }
0250     }
0251     spin_unlock_irqrestore(lock, flags);
0252 
0253     if (free_self) {
0254         self->ordered_free(self);
0255         /* NB: self must not be dereferenced past this point. */
0256         trace_btrfs_all_work_done(wq->fs_info, self);
0257     }
0258 }
0259 
0260 static void btrfs_work_helper(struct work_struct *normal_work)
0261 {
0262     struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
0263                            normal_work);
0264     struct btrfs_workqueue *wq = work->wq;
0265     int need_order = 0;
0266 
0267     /*
0268      * We should not touch things inside work in the following cases:
0269      * 1) after work->func() if it has no ordered_free
0270      *    Since the struct is freed in work->func().
0271      * 2) after setting WORK_DONE_BIT
0272      *    The work may be freed in other threads almost instantly.
0273      * So we save the needed things here.
0274      */
0275     if (work->ordered_func)
0276         need_order = 1;
0277 
0278     trace_btrfs_work_sched(work);
0279     thresh_exec_hook(wq);
0280     work->func(work);
0281     if (need_order) {
0282         /*
0283          * Ensures all memory accesses done in the work function are
0284          * ordered before setting the WORK_DONE_BIT. Ensuring the thread
0285          * which is going to executed the ordered work sees them.
0286          * Pairs with the smp_rmb in run_ordered_work.
0287          */
0288         smp_mb__before_atomic();
0289         set_bit(WORK_DONE_BIT, &work->flags);
0290         run_ordered_work(wq, work);
0291     } else {
0292         /* NB: work must not be dereferenced past this point. */
0293         trace_btrfs_all_work_done(wq->fs_info, work);
0294     }
0295 }
0296 
0297 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
0298              btrfs_func_t ordered_func, btrfs_func_t ordered_free)
0299 {
0300     work->func = func;
0301     work->ordered_func = ordered_func;
0302     work->ordered_free = ordered_free;
0303     INIT_WORK(&work->normal_work, btrfs_work_helper);
0304     INIT_LIST_HEAD(&work->ordered_list);
0305     work->flags = 0;
0306 }
0307 
0308 void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
0309 {
0310     unsigned long flags;
0311 
0312     work->wq = wq;
0313     thresh_queue_hook(wq);
0314     if (work->ordered_func) {
0315         spin_lock_irqsave(&wq->list_lock, flags);
0316         list_add_tail(&work->ordered_list, &wq->ordered_list);
0317         spin_unlock_irqrestore(&wq->list_lock, flags);
0318     }
0319     trace_btrfs_work_queued(work);
0320     queue_work(wq->normal_wq, &work->normal_work);
0321 }
0322 
0323 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
0324 {
0325     if (!wq)
0326         return;
0327     destroy_workqueue(wq->normal_wq);
0328     trace_btrfs_workqueue_destroy(wq);
0329     kfree(wq);
0330 }
0331 
0332 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
0333 {
0334     if (wq)
0335         wq->limit_active = limit_active;
0336 }
0337 
0338 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
0339 {
0340     flush_workqueue(wq->normal_wq);
0341 }