0001
0002
0003
0004
0005
0006
0007 #include <linux/kthread.h>
0008 #include <linux/slab.h>
0009 #include <linux/list.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/freezer.h>
0012 #include "async-thread.h"
0013 #include "ctree.h"
0014
0015 enum {
0016 WORK_DONE_BIT,
0017 WORK_ORDER_DONE_BIT,
0018 };
0019
0020 #define NO_THRESHOLD (-1)
0021 #define DFT_THRESHOLD (32)
0022
0023 struct btrfs_workqueue {
0024 struct workqueue_struct *normal_wq;
0025
0026
0027 struct btrfs_fs_info *fs_info;
0028
0029
0030 struct list_head ordered_list;
0031
0032
0033 spinlock_t list_lock;
0034
0035
0036 atomic_t pending;
0037
0038
0039 int limit_active;
0040
0041
0042 int current_active;
0043
0044
0045 int thresh;
0046 unsigned int count;
0047 spinlock_t thres_lock;
0048 };
0049
0050 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
0051 {
0052 return wq->fs_info;
0053 }
0054
0055 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
0056 {
0057 return work->wq->fs_info;
0058 }
0059
0060 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
0061 {
0062
0063
0064
0065
0066
0067
0068 if (wq->thresh == NO_THRESHOLD)
0069 return false;
0070
0071 return atomic_read(&wq->pending) > wq->thresh * 2;
0072 }
0073
0074 struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
0075 const char *name, unsigned int flags,
0076 int limit_active, int thresh)
0077 {
0078 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
0079
0080 if (!ret)
0081 return NULL;
0082
0083 ret->fs_info = fs_info;
0084 ret->limit_active = limit_active;
0085 atomic_set(&ret->pending, 0);
0086 if (thresh == 0)
0087 thresh = DFT_THRESHOLD;
0088
0089 if (thresh < DFT_THRESHOLD) {
0090 ret->current_active = limit_active;
0091 ret->thresh = NO_THRESHOLD;
0092 } else {
0093
0094
0095
0096
0097
0098 ret->current_active = 1;
0099 ret->thresh = thresh;
0100 }
0101
0102 ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
0103 name);
0104 if (!ret->normal_wq) {
0105 kfree(ret);
0106 return NULL;
0107 }
0108
0109 INIT_LIST_HEAD(&ret->ordered_list);
0110 spin_lock_init(&ret->list_lock);
0111 spin_lock_init(&ret->thres_lock);
0112 trace_btrfs_workqueue_alloc(ret, name);
0113 return ret;
0114 }
0115
0116
0117
0118
0119
0120
0121 static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
0122 {
0123 if (wq->thresh == NO_THRESHOLD)
0124 return;
0125 atomic_inc(&wq->pending);
0126 }
0127
0128
0129
0130
0131
0132
0133 static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
0134 {
0135 int new_current_active;
0136 long pending;
0137 int need_change = 0;
0138
0139 if (wq->thresh == NO_THRESHOLD)
0140 return;
0141
0142 atomic_dec(&wq->pending);
0143 spin_lock(&wq->thres_lock);
0144
0145
0146
0147
0148 wq->count++;
0149 wq->count %= (wq->thresh / 4);
0150 if (!wq->count)
0151 goto out;
0152 new_current_active = wq->current_active;
0153
0154
0155
0156
0157
0158 pending = atomic_read(&wq->pending);
0159 if (pending > wq->thresh)
0160 new_current_active++;
0161 if (pending < wq->thresh / 2)
0162 new_current_active--;
0163 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
0164 if (new_current_active != wq->current_active) {
0165 need_change = 1;
0166 wq->current_active = new_current_active;
0167 }
0168 out:
0169 spin_unlock(&wq->thres_lock);
0170
0171 if (need_change) {
0172 workqueue_set_max_active(wq->normal_wq, wq->current_active);
0173 }
0174 }
0175
0176 static void run_ordered_work(struct btrfs_workqueue *wq,
0177 struct btrfs_work *self)
0178 {
0179 struct list_head *list = &wq->ordered_list;
0180 struct btrfs_work *work;
0181 spinlock_t *lock = &wq->list_lock;
0182 unsigned long flags;
0183 bool free_self = false;
0184
0185 while (1) {
0186 spin_lock_irqsave(lock, flags);
0187 if (list_empty(list))
0188 break;
0189 work = list_entry(list->next, struct btrfs_work,
0190 ordered_list);
0191 if (!test_bit(WORK_DONE_BIT, &work->flags))
0192 break;
0193
0194
0195
0196
0197
0198
0199 smp_rmb();
0200
0201
0202
0203
0204
0205
0206
0207 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
0208 break;
0209 trace_btrfs_ordered_sched(work);
0210 spin_unlock_irqrestore(lock, flags);
0211 work->ordered_func(work);
0212
0213
0214 spin_lock_irqsave(lock, flags);
0215 list_del(&work->ordered_list);
0216 spin_unlock_irqrestore(lock, flags);
0217
0218 if (work == self) {
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 free_self = true;
0241 } else {
0242
0243
0244
0245
0246 work->ordered_free(work);
0247
0248 trace_btrfs_all_work_done(wq->fs_info, work);
0249 }
0250 }
0251 spin_unlock_irqrestore(lock, flags);
0252
0253 if (free_self) {
0254 self->ordered_free(self);
0255
0256 trace_btrfs_all_work_done(wq->fs_info, self);
0257 }
0258 }
0259
0260 static void btrfs_work_helper(struct work_struct *normal_work)
0261 {
0262 struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
0263 normal_work);
0264 struct btrfs_workqueue *wq = work->wq;
0265 int need_order = 0;
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 if (work->ordered_func)
0276 need_order = 1;
0277
0278 trace_btrfs_work_sched(work);
0279 thresh_exec_hook(wq);
0280 work->func(work);
0281 if (need_order) {
0282
0283
0284
0285
0286
0287
0288 smp_mb__before_atomic();
0289 set_bit(WORK_DONE_BIT, &work->flags);
0290 run_ordered_work(wq, work);
0291 } else {
0292
0293 trace_btrfs_all_work_done(wq->fs_info, work);
0294 }
0295 }
0296
0297 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
0298 btrfs_func_t ordered_func, btrfs_func_t ordered_free)
0299 {
0300 work->func = func;
0301 work->ordered_func = ordered_func;
0302 work->ordered_free = ordered_free;
0303 INIT_WORK(&work->normal_work, btrfs_work_helper);
0304 INIT_LIST_HEAD(&work->ordered_list);
0305 work->flags = 0;
0306 }
0307
0308 void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
0309 {
0310 unsigned long flags;
0311
0312 work->wq = wq;
0313 thresh_queue_hook(wq);
0314 if (work->ordered_func) {
0315 spin_lock_irqsave(&wq->list_lock, flags);
0316 list_add_tail(&work->ordered_list, &wq->ordered_list);
0317 spin_unlock_irqrestore(&wq->list_lock, flags);
0318 }
0319 trace_btrfs_work_queued(work);
0320 queue_work(wq->normal_wq, &work->normal_work);
0321 }
0322
0323 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
0324 {
0325 if (!wq)
0326 return;
0327 destroy_workqueue(wq->normal_wq);
0328 trace_btrfs_workqueue_destroy(wq);
0329 kfree(wq);
0330 }
0331
0332 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
0333 {
0334 if (wq)
0335 wq->limit_active = limit_active;
0336 }
0337
0338 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
0339 {
0340 flush_workqueue(wq->normal_wq);
0341 }