Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * kernel/workqueue.c - generic async execution with shared worker pool
0004  *
0005  * Copyright (C) 2002       Ingo Molnar
0006  *
0007  *   Derived from the taskqueue/keventd code by:
0008  *     David Woodhouse <dwmw2@infradead.org>
0009  *     Andrew Morton
0010  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
0011  *     Theodore Ts'o <tytso@mit.edu>
0012  *
0013  * Made to use alloc_percpu by Christoph Lameter.
0014  *
0015  * Copyright (C) 2010       SUSE Linux Products GmbH
0016  * Copyright (C) 2010       Tejun Heo <tj@kernel.org>
0017  *
0018  * This is the generic async execution mechanism.  Work items as are
0019  * executed in process context.  The worker pool is shared and
0020  * automatically managed.  There are two worker pools for each CPU (one for
0021  * normal work items and the other for high priority ones) and some extra
0022  * pools for workqueues which are not bound to any specific CPU - the
0023  * number of these backing pools is dynamic.
0024  *
0025  * Please read Documentation/core-api/workqueue.rst for details.
0026  */
0027 
0028 #include <linux/export.h>
0029 #include <linux/kernel.h>
0030 #include <linux/sched.h>
0031 #include <linux/init.h>
0032 #include <linux/signal.h>
0033 #include <linux/completion.h>
0034 #include <linux/workqueue.h>
0035 #include <linux/slab.h>
0036 #include <linux/cpu.h>
0037 #include <linux/notifier.h>
0038 #include <linux/kthread.h>
0039 #include <linux/hardirq.h>
0040 #include <linux/mempolicy.h>
0041 #include <linux/freezer.h>
0042 #include <linux/debug_locks.h>
0043 #include <linux/lockdep.h>
0044 #include <linux/idr.h>
0045 #include <linux/jhash.h>
0046 #include <linux/hashtable.h>
0047 #include <linux/rculist.h>
0048 #include <linux/nodemask.h>
0049 #include <linux/moduleparam.h>
0050 #include <linux/uaccess.h>
0051 #include <linux/sched/isolation.h>
0052 #include <linux/nmi.h>
0053 #include <linux/kvm_para.h>
0054 
0055 #include "workqueue_internal.h"
0056 
0057 enum {
0058     /*
0059      * worker_pool flags
0060      *
0061      * A bound pool is either associated or disassociated with its CPU.
0062      * While associated (!DISASSOCIATED), all workers are bound to the
0063      * CPU and none has %WORKER_UNBOUND set and concurrency management
0064      * is in effect.
0065      *
0066      * While DISASSOCIATED, the cpu may be offline and all workers have
0067      * %WORKER_UNBOUND set and concurrency management disabled, and may
0068      * be executing on any CPU.  The pool behaves as an unbound one.
0069      *
0070      * Note that DISASSOCIATED should be flipped only while holding
0071      * wq_pool_attach_mutex to avoid changing binding state while
0072      * worker_attach_to_pool() is in progress.
0073      */
0074     POOL_MANAGER_ACTIVE = 1 << 0,   /* being managed */
0075     POOL_DISASSOCIATED  = 1 << 2,   /* cpu can't serve workers */
0076 
0077     /* worker flags */
0078     WORKER_DIE      = 1 << 1,   /* die die die */
0079     WORKER_IDLE     = 1 << 2,   /* is idle */
0080     WORKER_PREP     = 1 << 3,   /* preparing to run works */
0081     WORKER_CPU_INTENSIVE    = 1 << 6,   /* cpu intensive */
0082     WORKER_UNBOUND      = 1 << 7,   /* worker is unbound */
0083     WORKER_REBOUND      = 1 << 8,   /* worker was rebound */
0084 
0085     WORKER_NOT_RUNNING  = WORKER_PREP | WORKER_CPU_INTENSIVE |
0086                   WORKER_UNBOUND | WORKER_REBOUND,
0087 
0088     NR_STD_WORKER_POOLS = 2,        /* # standard pools per cpu */
0089 
0090     UNBOUND_POOL_HASH_ORDER = 6,        /* hashed by pool->attrs */
0091     BUSY_WORKER_HASH_ORDER  = 6,        /* 64 pointers */
0092 
0093     MAX_IDLE_WORKERS_RATIO  = 4,        /* 1/4 of busy can be idle */
0094     IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
0095 
0096     MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
0097                         /* call for help after 10ms
0098                            (min two ticks) */
0099     MAYDAY_INTERVAL     = HZ / 10,  /* and then every 100ms */
0100     CREATE_COOLDOWN     = HZ,       /* time to breath after fail */
0101 
0102     /*
0103      * Rescue workers are used only on emergencies and shared by
0104      * all cpus.  Give MIN_NICE.
0105      */
0106     RESCUER_NICE_LEVEL  = MIN_NICE,
0107     HIGHPRI_NICE_LEVEL  = MIN_NICE,
0108 
0109     WQ_NAME_LEN     = 24,
0110 };
0111 
0112 /*
0113  * Structure fields follow one of the following exclusion rules.
0114  *
0115  * I: Modifiable by initialization/destruction paths and read-only for
0116  *    everyone else.
0117  *
0118  * P: Preemption protected.  Disabling preemption is enough and should
0119  *    only be modified and accessed from the local cpu.
0120  *
0121  * L: pool->lock protected.  Access with pool->lock held.
0122  *
0123  * X: During normal operation, modification requires pool->lock and should
0124  *    be done only from local cpu.  Either disabling preemption on local
0125  *    cpu or grabbing pool->lock is enough for read access.  If
0126  *    POOL_DISASSOCIATED is set, it's identical to L.
0127  *
0128  * A: wq_pool_attach_mutex protected.
0129  *
0130  * PL: wq_pool_mutex protected.
0131  *
0132  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
0133  *
0134  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
0135  *
0136  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
0137  *      RCU for reads.
0138  *
0139  * WQ: wq->mutex protected.
0140  *
0141  * WR: wq->mutex protected for writes.  RCU protected for reads.
0142  *
0143  * MD: wq_mayday_lock protected.
0144  */
0145 
0146 /* struct worker is defined in workqueue_internal.h */
0147 
0148 struct worker_pool {
0149     raw_spinlock_t      lock;       /* the pool lock */
0150     int         cpu;        /* I: the associated cpu */
0151     int         node;       /* I: the associated node ID */
0152     int         id;     /* I: pool ID */
0153     unsigned int        flags;      /* X: flags */
0154 
0155     unsigned long       watchdog_ts;    /* L: watchdog timestamp */
0156 
0157     /*
0158      * The counter is incremented in a process context on the associated CPU
0159      * w/ preemption disabled, and decremented or reset in the same context
0160      * but w/ pool->lock held. The readers grab pool->lock and are
0161      * guaranteed to see if the counter reached zero.
0162      */
0163     int         nr_running;
0164 
0165     struct list_head    worklist;   /* L: list of pending works */
0166 
0167     int         nr_workers; /* L: total number of workers */
0168     int         nr_idle;    /* L: currently idle workers */
0169 
0170     struct list_head    idle_list;  /* L: list of idle workers */
0171     struct timer_list   idle_timer; /* L: worker idle timeout */
0172     struct timer_list   mayday_timer;   /* L: SOS timer for workers */
0173 
0174     /* a workers is either on busy_hash or idle_list, or the manager */
0175     DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
0176                         /* L: hash of busy workers */
0177 
0178     struct worker       *manager;   /* L: purely informational */
0179     struct list_head    workers;    /* A: attached workers */
0180     struct completion   *detach_completion; /* all workers detached */
0181 
0182     struct ida      worker_ida; /* worker IDs for task name */
0183 
0184     struct workqueue_attrs  *attrs;     /* I: worker attributes */
0185     struct hlist_node   hash_node;  /* PL: unbound_pool_hash node */
0186     int         refcnt;     /* PL: refcnt for unbound pools */
0187 
0188     /*
0189      * Destruction of pool is RCU protected to allow dereferences
0190      * from get_work_pool().
0191      */
0192     struct rcu_head     rcu;
0193 };
0194 
0195 /*
0196  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
0197  * of work_struct->data are used for flags and the remaining high bits
0198  * point to the pwq; thus, pwqs need to be aligned at two's power of the
0199  * number of flag bits.
0200  */
0201 struct pool_workqueue {
0202     struct worker_pool  *pool;      /* I: the associated pool */
0203     struct workqueue_struct *wq;        /* I: the owning workqueue */
0204     int         work_color; /* L: current color */
0205     int         flush_color;    /* L: flushing color */
0206     int         refcnt;     /* L: reference count */
0207     int         nr_in_flight[WORK_NR_COLORS];
0208                         /* L: nr of in_flight works */
0209 
0210     /*
0211      * nr_active management and WORK_STRUCT_INACTIVE:
0212      *
0213      * When pwq->nr_active >= max_active, new work item is queued to
0214      * pwq->inactive_works instead of pool->worklist and marked with
0215      * WORK_STRUCT_INACTIVE.
0216      *
0217      * All work items marked with WORK_STRUCT_INACTIVE do not participate
0218      * in pwq->nr_active and all work items in pwq->inactive_works are
0219      * marked with WORK_STRUCT_INACTIVE.  But not all WORK_STRUCT_INACTIVE
0220      * work items are in pwq->inactive_works.  Some of them are ready to
0221      * run in pool->worklist or worker->scheduled.  Those work itmes are
0222      * only struct wq_barrier which is used for flush_work() and should
0223      * not participate in pwq->nr_active.  For non-barrier work item, it
0224      * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
0225      */
0226     int         nr_active;  /* L: nr of active works */
0227     int         max_active; /* L: max active works */
0228     struct list_head    inactive_works; /* L: inactive works */
0229     struct list_head    pwqs_node;  /* WR: node on wq->pwqs */
0230     struct list_head    mayday_node;    /* MD: node on wq->maydays */
0231 
0232     /*
0233      * Release of unbound pwq is punted to system_wq.  See put_pwq()
0234      * and pwq_unbound_release_workfn() for details.  pool_workqueue
0235      * itself is also RCU protected so that the first pwq can be
0236      * determined without grabbing wq->mutex.
0237      */
0238     struct work_struct  unbound_release_work;
0239     struct rcu_head     rcu;
0240 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
0241 
0242 /*
0243  * Structure used to wait for workqueue flush.
0244  */
0245 struct wq_flusher {
0246     struct list_head    list;       /* WQ: list of flushers */
0247     int         flush_color;    /* WQ: flush color waiting for */
0248     struct completion   done;       /* flush completion */
0249 };
0250 
0251 struct wq_device;
0252 
0253 /*
0254  * The externally visible workqueue.  It relays the issued work items to
0255  * the appropriate worker_pool through its pool_workqueues.
0256  */
0257 struct workqueue_struct {
0258     struct list_head    pwqs;       /* WR: all pwqs of this wq */
0259     struct list_head    list;       /* PR: list of all workqueues */
0260 
0261     struct mutex        mutex;      /* protects this wq */
0262     int         work_color; /* WQ: current work color */
0263     int         flush_color;    /* WQ: current flush color */
0264     atomic_t        nr_pwqs_to_flush; /* flush in progress */
0265     struct wq_flusher   *first_flusher; /* WQ: first flusher */
0266     struct list_head    flusher_queue;  /* WQ: flush waiters */
0267     struct list_head    flusher_overflow; /* WQ: flush overflow list */
0268 
0269     struct list_head    maydays;    /* MD: pwqs requesting rescue */
0270     struct worker       *rescuer;   /* MD: rescue worker */
0271 
0272     int         nr_drainers;    /* WQ: drain in progress */
0273     int         saved_max_active; /* WQ: saved pwq max_active */
0274 
0275     struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
0276     struct pool_workqueue   *dfl_pwq;   /* PW: only for unbound wqs */
0277 
0278 #ifdef CONFIG_SYSFS
0279     struct wq_device    *wq_dev;    /* I: for sysfs interface */
0280 #endif
0281 #ifdef CONFIG_LOCKDEP
0282     char            *lock_name;
0283     struct lock_class_key   key;
0284     struct lockdep_map  lockdep_map;
0285 #endif
0286     char            name[WQ_NAME_LEN]; /* I: workqueue name */
0287 
0288     /*
0289      * Destruction of workqueue_struct is RCU protected to allow walking
0290      * the workqueues list without grabbing wq_pool_mutex.
0291      * This is used to dump all workqueues from sysrq.
0292      */
0293     struct rcu_head     rcu;
0294 
0295     /* hot fields used during command issue, aligned to cacheline */
0296     unsigned int        flags ____cacheline_aligned; /* WQ: WQ_* flags */
0297     struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
0298     struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
0299 };
0300 
0301 static struct kmem_cache *pwq_cache;
0302 
0303 static cpumask_var_t *wq_numa_possible_cpumask;
0304                     /* possible CPUs of each node */
0305 
0306 static bool wq_disable_numa;
0307 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
0308 
0309 /* see the comment above the definition of WQ_POWER_EFFICIENT */
0310 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
0311 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
0312 
0313 static bool wq_online;          /* can kworkers be created yet? */
0314 
0315 static bool wq_numa_enabled;        /* unbound NUMA affinity enabled */
0316 
0317 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
0318 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
0319 
0320 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
0321 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
0322 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
0323 /* wait for manager to go away */
0324 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
0325 
0326 static LIST_HEAD(workqueues);       /* PR: list of all workqueues */
0327 static bool workqueue_freezing;     /* PL: have wqs started freezing? */
0328 
0329 /* PL: allowable cpus for unbound wqs and work items */
0330 static cpumask_var_t wq_unbound_cpumask;
0331 
0332 /* CPU where unbound work was last round robin scheduled from this CPU */
0333 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
0334 
0335 /*
0336  * Local execution of unbound work items is no longer guaranteed.  The
0337  * following always forces round-robin CPU selection on unbound work items
0338  * to uncover usages which depend on it.
0339  */
0340 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
0341 static bool wq_debug_force_rr_cpu = true;
0342 #else
0343 static bool wq_debug_force_rr_cpu = false;
0344 #endif
0345 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
0346 
0347 /* the per-cpu worker pools */
0348 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
0349 
0350 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
0351 
0352 /* PL: hash of all unbound pools keyed by pool->attrs */
0353 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
0354 
0355 /* I: attributes used when instantiating standard unbound pools on demand */
0356 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
0357 
0358 /* I: attributes used when instantiating ordered pools on demand */
0359 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
0360 
0361 struct workqueue_struct *system_wq __read_mostly;
0362 EXPORT_SYMBOL(system_wq);
0363 struct workqueue_struct *system_highpri_wq __read_mostly;
0364 EXPORT_SYMBOL_GPL(system_highpri_wq);
0365 struct workqueue_struct *system_long_wq __read_mostly;
0366 EXPORT_SYMBOL_GPL(system_long_wq);
0367 struct workqueue_struct *system_unbound_wq __read_mostly;
0368 EXPORT_SYMBOL_GPL(system_unbound_wq);
0369 struct workqueue_struct *system_freezable_wq __read_mostly;
0370 EXPORT_SYMBOL_GPL(system_freezable_wq);
0371 struct workqueue_struct *system_power_efficient_wq __read_mostly;
0372 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
0373 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
0374 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
0375 
0376 static int worker_thread(void *__worker);
0377 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
0378 static void show_pwq(struct pool_workqueue *pwq);
0379 static void show_one_worker_pool(struct worker_pool *pool);
0380 
0381 #define CREATE_TRACE_POINTS
0382 #include <trace/events/workqueue.h>
0383 
0384 #define assert_rcu_or_pool_mutex()                  \
0385     RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&           \
0386              !lockdep_is_held(&wq_pool_mutex),      \
0387              "RCU or wq_pool_mutex should be held")
0388 
0389 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)            \
0390     RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&           \
0391              !lockdep_is_held(&wq->mutex) &&        \
0392              !lockdep_is_held(&wq_pool_mutex),      \
0393              "RCU, wq->mutex or wq_pool_mutex should be held")
0394 
0395 #define for_each_cpu_worker_pool(pool, cpu)             \
0396     for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];       \
0397          (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
0398          (pool)++)
0399 
0400 /**
0401  * for_each_pool - iterate through all worker_pools in the system
0402  * @pool: iteration cursor
0403  * @pi: integer used for iteration
0404  *
0405  * This must be called either with wq_pool_mutex held or RCU read
0406  * locked.  If the pool needs to be used beyond the locking in effect, the
0407  * caller is responsible for guaranteeing that the pool stays online.
0408  *
0409  * The if/else clause exists only for the lockdep assertion and can be
0410  * ignored.
0411  */
0412 #define for_each_pool(pool, pi)                     \
0413     idr_for_each_entry(&worker_pool_idr, pool, pi)          \
0414         if (({ assert_rcu_or_pool_mutex(); false; })) { }   \
0415         else
0416 
0417 /**
0418  * for_each_pool_worker - iterate through all workers of a worker_pool
0419  * @worker: iteration cursor
0420  * @pool: worker_pool to iterate workers of
0421  *
0422  * This must be called with wq_pool_attach_mutex.
0423  *
0424  * The if/else clause exists only for the lockdep assertion and can be
0425  * ignored.
0426  */
0427 #define for_each_pool_worker(worker, pool)              \
0428     list_for_each_entry((worker), &(pool)->workers, node)       \
0429         if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
0430         else
0431 
0432 /**
0433  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
0434  * @pwq: iteration cursor
0435  * @wq: the target workqueue
0436  *
0437  * This must be called either with wq->mutex held or RCU read locked.
0438  * If the pwq needs to be used beyond the locking in effect, the caller is
0439  * responsible for guaranteeing that the pwq stays online.
0440  *
0441  * The if/else clause exists only for the lockdep assertion and can be
0442  * ignored.
0443  */
0444 #define for_each_pwq(pwq, wq)                       \
0445     list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,      \
0446                  lockdep_is_held(&(wq->mutex)))
0447 
0448 #ifdef CONFIG_DEBUG_OBJECTS_WORK
0449 
0450 static const struct debug_obj_descr work_debug_descr;
0451 
0452 static void *work_debug_hint(void *addr)
0453 {
0454     return ((struct work_struct *) addr)->func;
0455 }
0456 
0457 static bool work_is_static_object(void *addr)
0458 {
0459     struct work_struct *work = addr;
0460 
0461     return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
0462 }
0463 
0464 /*
0465  * fixup_init is called when:
0466  * - an active object is initialized
0467  */
0468 static bool work_fixup_init(void *addr, enum debug_obj_state state)
0469 {
0470     struct work_struct *work = addr;
0471 
0472     switch (state) {
0473     case ODEBUG_STATE_ACTIVE:
0474         cancel_work_sync(work);
0475         debug_object_init(work, &work_debug_descr);
0476         return true;
0477     default:
0478         return false;
0479     }
0480 }
0481 
0482 /*
0483  * fixup_free is called when:
0484  * - an active object is freed
0485  */
0486 static bool work_fixup_free(void *addr, enum debug_obj_state state)
0487 {
0488     struct work_struct *work = addr;
0489 
0490     switch (state) {
0491     case ODEBUG_STATE_ACTIVE:
0492         cancel_work_sync(work);
0493         debug_object_free(work, &work_debug_descr);
0494         return true;
0495     default:
0496         return false;
0497     }
0498 }
0499 
0500 static const struct debug_obj_descr work_debug_descr = {
0501     .name       = "work_struct",
0502     .debug_hint = work_debug_hint,
0503     .is_static_object = work_is_static_object,
0504     .fixup_init = work_fixup_init,
0505     .fixup_free = work_fixup_free,
0506 };
0507 
0508 static inline void debug_work_activate(struct work_struct *work)
0509 {
0510     debug_object_activate(work, &work_debug_descr);
0511 }
0512 
0513 static inline void debug_work_deactivate(struct work_struct *work)
0514 {
0515     debug_object_deactivate(work, &work_debug_descr);
0516 }
0517 
0518 void __init_work(struct work_struct *work, int onstack)
0519 {
0520     if (onstack)
0521         debug_object_init_on_stack(work, &work_debug_descr);
0522     else
0523         debug_object_init(work, &work_debug_descr);
0524 }
0525 EXPORT_SYMBOL_GPL(__init_work);
0526 
0527 void destroy_work_on_stack(struct work_struct *work)
0528 {
0529     debug_object_free(work, &work_debug_descr);
0530 }
0531 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
0532 
0533 void destroy_delayed_work_on_stack(struct delayed_work *work)
0534 {
0535     destroy_timer_on_stack(&work->timer);
0536     debug_object_free(&work->work, &work_debug_descr);
0537 }
0538 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
0539 
0540 #else
0541 static inline void debug_work_activate(struct work_struct *work) { }
0542 static inline void debug_work_deactivate(struct work_struct *work) { }
0543 #endif
0544 
0545 /**
0546  * worker_pool_assign_id - allocate ID and assign it to @pool
0547  * @pool: the pool pointer of interest
0548  *
0549  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
0550  * successfully, -errno on failure.
0551  */
0552 static int worker_pool_assign_id(struct worker_pool *pool)
0553 {
0554     int ret;
0555 
0556     lockdep_assert_held(&wq_pool_mutex);
0557 
0558     ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
0559             GFP_KERNEL);
0560     if (ret >= 0) {
0561         pool->id = ret;
0562         return 0;
0563     }
0564     return ret;
0565 }
0566 
0567 /**
0568  * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
0569  * @wq: the target workqueue
0570  * @node: the node ID
0571  *
0572  * This must be called with any of wq_pool_mutex, wq->mutex or RCU
0573  * read locked.
0574  * If the pwq needs to be used beyond the locking in effect, the caller is
0575  * responsible for guaranteeing that the pwq stays online.
0576  *
0577  * Return: The unbound pool_workqueue for @node.
0578  */
0579 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
0580                           int node)
0581 {
0582     assert_rcu_or_wq_mutex_or_pool_mutex(wq);
0583 
0584     /*
0585      * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
0586      * delayed item is pending.  The plan is to keep CPU -> NODE
0587      * mapping valid and stable across CPU on/offlines.  Once that
0588      * happens, this workaround can be removed.
0589      */
0590     if (unlikely(node == NUMA_NO_NODE))
0591         return wq->dfl_pwq;
0592 
0593     return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
0594 }
0595 
0596 static unsigned int work_color_to_flags(int color)
0597 {
0598     return color << WORK_STRUCT_COLOR_SHIFT;
0599 }
0600 
0601 static int get_work_color(unsigned long work_data)
0602 {
0603     return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
0604         ((1 << WORK_STRUCT_COLOR_BITS) - 1);
0605 }
0606 
0607 static int work_next_color(int color)
0608 {
0609     return (color + 1) % WORK_NR_COLORS;
0610 }
0611 
0612 /*
0613  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
0614  * contain the pointer to the queued pwq.  Once execution starts, the flag
0615  * is cleared and the high bits contain OFFQ flags and pool ID.
0616  *
0617  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
0618  * and clear_work_data() can be used to set the pwq, pool or clear
0619  * work->data.  These functions should only be called while the work is
0620  * owned - ie. while the PENDING bit is set.
0621  *
0622  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
0623  * corresponding to a work.  Pool is available once the work has been
0624  * queued anywhere after initialization until it is sync canceled.  pwq is
0625  * available only while the work item is queued.
0626  *
0627  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
0628  * canceled.  While being canceled, a work item may have its PENDING set
0629  * but stay off timer and worklist for arbitrarily long and nobody should
0630  * try to steal the PENDING bit.
0631  */
0632 static inline void set_work_data(struct work_struct *work, unsigned long data,
0633                  unsigned long flags)
0634 {
0635     WARN_ON_ONCE(!work_pending(work));
0636     atomic_long_set(&work->data, data | flags | work_static(work));
0637 }
0638 
0639 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
0640              unsigned long extra_flags)
0641 {
0642     set_work_data(work, (unsigned long)pwq,
0643               WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
0644 }
0645 
0646 static void set_work_pool_and_keep_pending(struct work_struct *work,
0647                        int pool_id)
0648 {
0649     set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
0650               WORK_STRUCT_PENDING);
0651 }
0652 
0653 static void set_work_pool_and_clear_pending(struct work_struct *work,
0654                         int pool_id)
0655 {
0656     /*
0657      * The following wmb is paired with the implied mb in
0658      * test_and_set_bit(PENDING) and ensures all updates to @work made
0659      * here are visible to and precede any updates by the next PENDING
0660      * owner.
0661      */
0662     smp_wmb();
0663     set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
0664     /*
0665      * The following mb guarantees that previous clear of a PENDING bit
0666      * will not be reordered with any speculative LOADS or STORES from
0667      * work->current_func, which is executed afterwards.  This possible
0668      * reordering can lead to a missed execution on attempt to queue
0669      * the same @work.  E.g. consider this case:
0670      *
0671      *   CPU#0                         CPU#1
0672      *   ----------------------------  --------------------------------
0673      *
0674      * 1  STORE event_indicated
0675      * 2  queue_work_on() {
0676      * 3    test_and_set_bit(PENDING)
0677      * 4 }                             set_..._and_clear_pending() {
0678      * 5                                 set_work_data() # clear bit
0679      * 6                                 smp_mb()
0680      * 7                               work->current_func() {
0681      * 8                      LOAD event_indicated
0682      *                 }
0683      *
0684      * Without an explicit full barrier speculative LOAD on line 8 can
0685      * be executed before CPU#0 does STORE on line 1.  If that happens,
0686      * CPU#0 observes the PENDING bit is still set and new execution of
0687      * a @work is not queued in a hope, that CPU#1 will eventually
0688      * finish the queued @work.  Meanwhile CPU#1 does not see
0689      * event_indicated is set, because speculative LOAD was executed
0690      * before actual STORE.
0691      */
0692     smp_mb();
0693 }
0694 
0695 static void clear_work_data(struct work_struct *work)
0696 {
0697     smp_wmb();  /* see set_work_pool_and_clear_pending() */
0698     set_work_data(work, WORK_STRUCT_NO_POOL, 0);
0699 }
0700 
0701 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
0702 {
0703     unsigned long data = atomic_long_read(&work->data);
0704 
0705     if (data & WORK_STRUCT_PWQ)
0706         return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
0707     else
0708         return NULL;
0709 }
0710 
0711 /**
0712  * get_work_pool - return the worker_pool a given work was associated with
0713  * @work: the work item of interest
0714  *
0715  * Pools are created and destroyed under wq_pool_mutex, and allows read
0716  * access under RCU read lock.  As such, this function should be
0717  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
0718  *
0719  * All fields of the returned pool are accessible as long as the above
0720  * mentioned locking is in effect.  If the returned pool needs to be used
0721  * beyond the critical section, the caller is responsible for ensuring the
0722  * returned pool is and stays online.
0723  *
0724  * Return: The worker_pool @work was last associated with.  %NULL if none.
0725  */
0726 static struct worker_pool *get_work_pool(struct work_struct *work)
0727 {
0728     unsigned long data = atomic_long_read(&work->data);
0729     int pool_id;
0730 
0731     assert_rcu_or_pool_mutex();
0732 
0733     if (data & WORK_STRUCT_PWQ)
0734         return ((struct pool_workqueue *)
0735             (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
0736 
0737     pool_id = data >> WORK_OFFQ_POOL_SHIFT;
0738     if (pool_id == WORK_OFFQ_POOL_NONE)
0739         return NULL;
0740 
0741     return idr_find(&worker_pool_idr, pool_id);
0742 }
0743 
0744 /**
0745  * get_work_pool_id - return the worker pool ID a given work is associated with
0746  * @work: the work item of interest
0747  *
0748  * Return: The worker_pool ID @work was last associated with.
0749  * %WORK_OFFQ_POOL_NONE if none.
0750  */
0751 static int get_work_pool_id(struct work_struct *work)
0752 {
0753     unsigned long data = atomic_long_read(&work->data);
0754 
0755     if (data & WORK_STRUCT_PWQ)
0756         return ((struct pool_workqueue *)
0757             (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
0758 
0759     return data >> WORK_OFFQ_POOL_SHIFT;
0760 }
0761 
0762 static void mark_work_canceling(struct work_struct *work)
0763 {
0764     unsigned long pool_id = get_work_pool_id(work);
0765 
0766     pool_id <<= WORK_OFFQ_POOL_SHIFT;
0767     set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
0768 }
0769 
0770 static bool work_is_canceling(struct work_struct *work)
0771 {
0772     unsigned long data = atomic_long_read(&work->data);
0773 
0774     return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
0775 }
0776 
0777 /*
0778  * Policy functions.  These define the policies on how the global worker
0779  * pools are managed.  Unless noted otherwise, these functions assume that
0780  * they're being called with pool->lock held.
0781  */
0782 
0783 static bool __need_more_worker(struct worker_pool *pool)
0784 {
0785     return !pool->nr_running;
0786 }
0787 
0788 /*
0789  * Need to wake up a worker?  Called from anything but currently
0790  * running workers.
0791  *
0792  * Note that, because unbound workers never contribute to nr_running, this
0793  * function will always return %true for unbound pools as long as the
0794  * worklist isn't empty.
0795  */
0796 static bool need_more_worker(struct worker_pool *pool)
0797 {
0798     return !list_empty(&pool->worklist) && __need_more_worker(pool);
0799 }
0800 
0801 /* Can I start working?  Called from busy but !running workers. */
0802 static bool may_start_working(struct worker_pool *pool)
0803 {
0804     return pool->nr_idle;
0805 }
0806 
0807 /* Do I need to keep working?  Called from currently running workers. */
0808 static bool keep_working(struct worker_pool *pool)
0809 {
0810     return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
0811 }
0812 
0813 /* Do we need a new worker?  Called from manager. */
0814 static bool need_to_create_worker(struct worker_pool *pool)
0815 {
0816     return need_more_worker(pool) && !may_start_working(pool);
0817 }
0818 
0819 /* Do we have too many workers and should some go away? */
0820 static bool too_many_workers(struct worker_pool *pool)
0821 {
0822     bool managing = pool->flags & POOL_MANAGER_ACTIVE;
0823     int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
0824     int nr_busy = pool->nr_workers - nr_idle;
0825 
0826     return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
0827 }
0828 
0829 /*
0830  * Wake up functions.
0831  */
0832 
0833 /* Return the first idle worker.  Called with pool->lock held. */
0834 static struct worker *first_idle_worker(struct worker_pool *pool)
0835 {
0836     if (unlikely(list_empty(&pool->idle_list)))
0837         return NULL;
0838 
0839     return list_first_entry(&pool->idle_list, struct worker, entry);
0840 }
0841 
0842 /**
0843  * wake_up_worker - wake up an idle worker
0844  * @pool: worker pool to wake worker from
0845  *
0846  * Wake up the first idle worker of @pool.
0847  *
0848  * CONTEXT:
0849  * raw_spin_lock_irq(pool->lock).
0850  */
0851 static void wake_up_worker(struct worker_pool *pool)
0852 {
0853     struct worker *worker = first_idle_worker(pool);
0854 
0855     if (likely(worker))
0856         wake_up_process(worker->task);
0857 }
0858 
0859 /**
0860  * wq_worker_running - a worker is running again
0861  * @task: task waking up
0862  *
0863  * This function is called when a worker returns from schedule()
0864  */
0865 void wq_worker_running(struct task_struct *task)
0866 {
0867     struct worker *worker = kthread_data(task);
0868 
0869     if (!worker->sleeping)
0870         return;
0871 
0872     /*
0873      * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
0874      * and the nr_running increment below, we may ruin the nr_running reset
0875      * and leave with an unexpected pool->nr_running == 1 on the newly unbound
0876      * pool. Protect against such race.
0877      */
0878     preempt_disable();
0879     if (!(worker->flags & WORKER_NOT_RUNNING))
0880         worker->pool->nr_running++;
0881     preempt_enable();
0882     worker->sleeping = 0;
0883 }
0884 
0885 /**
0886  * wq_worker_sleeping - a worker is going to sleep
0887  * @task: task going to sleep
0888  *
0889  * This function is called from schedule() when a busy worker is
0890  * going to sleep.
0891  */
0892 void wq_worker_sleeping(struct task_struct *task)
0893 {
0894     struct worker *worker = kthread_data(task);
0895     struct worker_pool *pool;
0896 
0897     /*
0898      * Rescuers, which may not have all the fields set up like normal
0899      * workers, also reach here, let's not access anything before
0900      * checking NOT_RUNNING.
0901      */
0902     if (worker->flags & WORKER_NOT_RUNNING)
0903         return;
0904 
0905     pool = worker->pool;
0906 
0907     /* Return if preempted before wq_worker_running() was reached */
0908     if (worker->sleeping)
0909         return;
0910 
0911     worker->sleeping = 1;
0912     raw_spin_lock_irq(&pool->lock);
0913 
0914     /*
0915      * Recheck in case unbind_workers() preempted us. We don't
0916      * want to decrement nr_running after the worker is unbound
0917      * and nr_running has been reset.
0918      */
0919     if (worker->flags & WORKER_NOT_RUNNING) {
0920         raw_spin_unlock_irq(&pool->lock);
0921         return;
0922     }
0923 
0924     pool->nr_running--;
0925     if (need_more_worker(pool))
0926         wake_up_worker(pool);
0927     raw_spin_unlock_irq(&pool->lock);
0928 }
0929 
0930 /**
0931  * wq_worker_last_func - retrieve worker's last work function
0932  * @task: Task to retrieve last work function of.
0933  *
0934  * Determine the last function a worker executed. This is called from
0935  * the scheduler to get a worker's last known identity.
0936  *
0937  * CONTEXT:
0938  * raw_spin_lock_irq(rq->lock)
0939  *
0940  * This function is called during schedule() when a kworker is going
0941  * to sleep. It's used by psi to identify aggregation workers during
0942  * dequeuing, to allow periodic aggregation to shut-off when that
0943  * worker is the last task in the system or cgroup to go to sleep.
0944  *
0945  * As this function doesn't involve any workqueue-related locking, it
0946  * only returns stable values when called from inside the scheduler's
0947  * queuing and dequeuing paths, when @task, which must be a kworker,
0948  * is guaranteed to not be processing any works.
0949  *
0950  * Return:
0951  * The last work function %current executed as a worker, NULL if it
0952  * hasn't executed any work yet.
0953  */
0954 work_func_t wq_worker_last_func(struct task_struct *task)
0955 {
0956     struct worker *worker = kthread_data(task);
0957 
0958     return worker->last_func;
0959 }
0960 
0961 /**
0962  * worker_set_flags - set worker flags and adjust nr_running accordingly
0963  * @worker: self
0964  * @flags: flags to set
0965  *
0966  * Set @flags in @worker->flags and adjust nr_running accordingly.
0967  *
0968  * CONTEXT:
0969  * raw_spin_lock_irq(pool->lock)
0970  */
0971 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
0972 {
0973     struct worker_pool *pool = worker->pool;
0974 
0975     WARN_ON_ONCE(worker->task != current);
0976 
0977     /* If transitioning into NOT_RUNNING, adjust nr_running. */
0978     if ((flags & WORKER_NOT_RUNNING) &&
0979         !(worker->flags & WORKER_NOT_RUNNING)) {
0980         pool->nr_running--;
0981     }
0982 
0983     worker->flags |= flags;
0984 }
0985 
0986 /**
0987  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
0988  * @worker: self
0989  * @flags: flags to clear
0990  *
0991  * Clear @flags in @worker->flags and adjust nr_running accordingly.
0992  *
0993  * CONTEXT:
0994  * raw_spin_lock_irq(pool->lock)
0995  */
0996 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
0997 {
0998     struct worker_pool *pool = worker->pool;
0999     unsigned int oflags = worker->flags;
1000 
1001     WARN_ON_ONCE(worker->task != current);
1002 
1003     worker->flags &= ~flags;
1004 
1005     /*
1006      * If transitioning out of NOT_RUNNING, increment nr_running.  Note
1007      * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
1008      * of multiple flags, not a single flag.
1009      */
1010     if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
1011         if (!(worker->flags & WORKER_NOT_RUNNING))
1012             pool->nr_running++;
1013 }
1014 
1015 /**
1016  * find_worker_executing_work - find worker which is executing a work
1017  * @pool: pool of interest
1018  * @work: work to find worker for
1019  *
1020  * Find a worker which is executing @work on @pool by searching
1021  * @pool->busy_hash which is keyed by the address of @work.  For a worker
1022  * to match, its current execution should match the address of @work and
1023  * its work function.  This is to avoid unwanted dependency between
1024  * unrelated work executions through a work item being recycled while still
1025  * being executed.
1026  *
1027  * This is a bit tricky.  A work item may be freed once its execution
1028  * starts and nothing prevents the freed area from being recycled for
1029  * another work item.  If the same work item address ends up being reused
1030  * before the original execution finishes, workqueue will identify the
1031  * recycled work item as currently executing and make it wait until the
1032  * current execution finishes, introducing an unwanted dependency.
1033  *
1034  * This function checks the work item address and work function to avoid
1035  * false positives.  Note that this isn't complete as one may construct a
1036  * work function which can introduce dependency onto itself through a
1037  * recycled work item.  Well, if somebody wants to shoot oneself in the
1038  * foot that badly, there's only so much we can do, and if such deadlock
1039  * actually occurs, it should be easy to locate the culprit work function.
1040  *
1041  * CONTEXT:
1042  * raw_spin_lock_irq(pool->lock).
1043  *
1044  * Return:
1045  * Pointer to worker which is executing @work if found, %NULL
1046  * otherwise.
1047  */
1048 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1049                          struct work_struct *work)
1050 {
1051     struct worker *worker;
1052 
1053     hash_for_each_possible(pool->busy_hash, worker, hentry,
1054                    (unsigned long)work)
1055         if (worker->current_work == work &&
1056             worker->current_func == work->func)
1057             return worker;
1058 
1059     return NULL;
1060 }
1061 
1062 /**
1063  * move_linked_works - move linked works to a list
1064  * @work: start of series of works to be scheduled
1065  * @head: target list to append @work to
1066  * @nextp: out parameter for nested worklist walking
1067  *
1068  * Schedule linked works starting from @work to @head.  Work series to
1069  * be scheduled starts at @work and includes any consecutive work with
1070  * WORK_STRUCT_LINKED set in its predecessor.
1071  *
1072  * If @nextp is not NULL, it's updated to point to the next work of
1073  * the last scheduled work.  This allows move_linked_works() to be
1074  * nested inside outer list_for_each_entry_safe().
1075  *
1076  * CONTEXT:
1077  * raw_spin_lock_irq(pool->lock).
1078  */
1079 static void move_linked_works(struct work_struct *work, struct list_head *head,
1080                   struct work_struct **nextp)
1081 {
1082     struct work_struct *n;
1083 
1084     /*
1085      * Linked worklist will always end before the end of the list,
1086      * use NULL for list head.
1087      */
1088     list_for_each_entry_safe_from(work, n, NULL, entry) {
1089         list_move_tail(&work->entry, head);
1090         if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1091             break;
1092     }
1093 
1094     /*
1095      * If we're already inside safe list traversal and have moved
1096      * multiple works to the scheduled queue, the next position
1097      * needs to be updated.
1098      */
1099     if (nextp)
1100         *nextp = n;
1101 }
1102 
1103 /**
1104  * get_pwq - get an extra reference on the specified pool_workqueue
1105  * @pwq: pool_workqueue to get
1106  *
1107  * Obtain an extra reference on @pwq.  The caller should guarantee that
1108  * @pwq has positive refcnt and be holding the matching pool->lock.
1109  */
1110 static void get_pwq(struct pool_workqueue *pwq)
1111 {
1112     lockdep_assert_held(&pwq->pool->lock);
1113     WARN_ON_ONCE(pwq->refcnt <= 0);
1114     pwq->refcnt++;
1115 }
1116 
1117 /**
1118  * put_pwq - put a pool_workqueue reference
1119  * @pwq: pool_workqueue to put
1120  *
1121  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1122  * destruction.  The caller should be holding the matching pool->lock.
1123  */
1124 static void put_pwq(struct pool_workqueue *pwq)
1125 {
1126     lockdep_assert_held(&pwq->pool->lock);
1127     if (likely(--pwq->refcnt))
1128         return;
1129     if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1130         return;
1131     /*
1132      * @pwq can't be released under pool->lock, bounce to
1133      * pwq_unbound_release_workfn().  This never recurses on the same
1134      * pool->lock as this path is taken only for unbound workqueues and
1135      * the release work item is scheduled on a per-cpu workqueue.  To
1136      * avoid lockdep warning, unbound pool->locks are given lockdep
1137      * subclass of 1 in get_unbound_pool().
1138      */
1139     schedule_work(&pwq->unbound_release_work);
1140 }
1141 
1142 /**
1143  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1144  * @pwq: pool_workqueue to put (can be %NULL)
1145  *
1146  * put_pwq() with locking.  This function also allows %NULL @pwq.
1147  */
1148 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1149 {
1150     if (pwq) {
1151         /*
1152          * As both pwqs and pools are RCU protected, the
1153          * following lock operations are safe.
1154          */
1155         raw_spin_lock_irq(&pwq->pool->lock);
1156         put_pwq(pwq);
1157         raw_spin_unlock_irq(&pwq->pool->lock);
1158     }
1159 }
1160 
1161 static void pwq_activate_inactive_work(struct work_struct *work)
1162 {
1163     struct pool_workqueue *pwq = get_work_pwq(work);
1164 
1165     trace_workqueue_activate_work(work);
1166     if (list_empty(&pwq->pool->worklist))
1167         pwq->pool->watchdog_ts = jiffies;
1168     move_linked_works(work, &pwq->pool->worklist, NULL);
1169     __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1170     pwq->nr_active++;
1171 }
1172 
1173 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1174 {
1175     struct work_struct *work = list_first_entry(&pwq->inactive_works,
1176                             struct work_struct, entry);
1177 
1178     pwq_activate_inactive_work(work);
1179 }
1180 
1181 /**
1182  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1183  * @pwq: pwq of interest
1184  * @work_data: work_data of work which left the queue
1185  *
1186  * A work either has completed or is removed from pending queue,
1187  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1188  *
1189  * CONTEXT:
1190  * raw_spin_lock_irq(pool->lock).
1191  */
1192 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1193 {
1194     int color = get_work_color(work_data);
1195 
1196     if (!(work_data & WORK_STRUCT_INACTIVE)) {
1197         pwq->nr_active--;
1198         if (!list_empty(&pwq->inactive_works)) {
1199             /* one down, submit an inactive one */
1200             if (pwq->nr_active < pwq->max_active)
1201                 pwq_activate_first_inactive(pwq);
1202         }
1203     }
1204 
1205     pwq->nr_in_flight[color]--;
1206 
1207     /* is flush in progress and are we at the flushing tip? */
1208     if (likely(pwq->flush_color != color))
1209         goto out_put;
1210 
1211     /* are there still in-flight works? */
1212     if (pwq->nr_in_flight[color])
1213         goto out_put;
1214 
1215     /* this pwq is done, clear flush_color */
1216     pwq->flush_color = -1;
1217 
1218     /*
1219      * If this was the last pwq, wake up the first flusher.  It
1220      * will handle the rest.
1221      */
1222     if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1223         complete(&pwq->wq->first_flusher->done);
1224 out_put:
1225     put_pwq(pwq);
1226 }
1227 
1228 /**
1229  * try_to_grab_pending - steal work item from worklist and disable irq
1230  * @work: work item to steal
1231  * @is_dwork: @work is a delayed_work
1232  * @flags: place to store irq state
1233  *
1234  * Try to grab PENDING bit of @work.  This function can handle @work in any
1235  * stable state - idle, on timer or on worklist.
1236  *
1237  * Return:
1238  *
1239  *  ========    ================================================================
1240  *  1       if @work was pending and we successfully stole PENDING
1241  *  0       if @work was idle and we claimed PENDING
1242  *  -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1243  *  -ENOENT if someone else is canceling @work, this state may persist
1244  *      for arbitrarily long
1245  *  ========    ================================================================
1246  *
1247  * Note:
1248  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1249  * interrupted while holding PENDING and @work off queue, irq must be
1250  * disabled on entry.  This, combined with delayed_work->timer being
1251  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1252  *
1253  * On successful return, >= 0, irq is disabled and the caller is
1254  * responsible for releasing it using local_irq_restore(*@flags).
1255  *
1256  * This function is safe to call from any context including IRQ handler.
1257  */
1258 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1259                    unsigned long *flags)
1260 {
1261     struct worker_pool *pool;
1262     struct pool_workqueue *pwq;
1263 
1264     local_irq_save(*flags);
1265 
1266     /* try to steal the timer if it exists */
1267     if (is_dwork) {
1268         struct delayed_work *dwork = to_delayed_work(work);
1269 
1270         /*
1271          * dwork->timer is irqsafe.  If del_timer() fails, it's
1272          * guaranteed that the timer is not queued anywhere and not
1273          * running on the local CPU.
1274          */
1275         if (likely(del_timer(&dwork->timer)))
1276             return 1;
1277     }
1278 
1279     /* try to claim PENDING the normal way */
1280     if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1281         return 0;
1282 
1283     rcu_read_lock();
1284     /*
1285      * The queueing is in progress, or it is already queued. Try to
1286      * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1287      */
1288     pool = get_work_pool(work);
1289     if (!pool)
1290         goto fail;
1291 
1292     raw_spin_lock(&pool->lock);
1293     /*
1294      * work->data is guaranteed to point to pwq only while the work
1295      * item is queued on pwq->wq, and both updating work->data to point
1296      * to pwq on queueing and to pool on dequeueing are done under
1297      * pwq->pool->lock.  This in turn guarantees that, if work->data
1298      * points to pwq which is associated with a locked pool, the work
1299      * item is currently queued on that pool.
1300      */
1301     pwq = get_work_pwq(work);
1302     if (pwq && pwq->pool == pool) {
1303         debug_work_deactivate(work);
1304 
1305         /*
1306          * A cancelable inactive work item must be in the
1307          * pwq->inactive_works since a queued barrier can't be
1308          * canceled (see the comments in insert_wq_barrier()).
1309          *
1310          * An inactive work item cannot be grabbed directly because
1311          * it might have linked barrier work items which, if left
1312          * on the inactive_works list, will confuse pwq->nr_active
1313          * management later on and cause stall.  Make sure the work
1314          * item is activated before grabbing.
1315          */
1316         if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1317             pwq_activate_inactive_work(work);
1318 
1319         list_del_init(&work->entry);
1320         pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
1321 
1322         /* work->data points to pwq iff queued, point to pool */
1323         set_work_pool_and_keep_pending(work, pool->id);
1324 
1325         raw_spin_unlock(&pool->lock);
1326         rcu_read_unlock();
1327         return 1;
1328     }
1329     raw_spin_unlock(&pool->lock);
1330 fail:
1331     rcu_read_unlock();
1332     local_irq_restore(*flags);
1333     if (work_is_canceling(work))
1334         return -ENOENT;
1335     cpu_relax();
1336     return -EAGAIN;
1337 }
1338 
1339 /**
1340  * insert_work - insert a work into a pool
1341  * @pwq: pwq @work belongs to
1342  * @work: work to insert
1343  * @head: insertion point
1344  * @extra_flags: extra WORK_STRUCT_* flags to set
1345  *
1346  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1347  * work_struct flags.
1348  *
1349  * CONTEXT:
1350  * raw_spin_lock_irq(pool->lock).
1351  */
1352 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1353             struct list_head *head, unsigned int extra_flags)
1354 {
1355     struct worker_pool *pool = pwq->pool;
1356 
1357     /* record the work call stack in order to print it in KASAN reports */
1358     kasan_record_aux_stack_noalloc(work);
1359 
1360     /* we own @work, set data and link */
1361     set_work_pwq(work, pwq, extra_flags);
1362     list_add_tail(&work->entry, head);
1363     get_pwq(pwq);
1364 
1365     if (__need_more_worker(pool))
1366         wake_up_worker(pool);
1367 }
1368 
1369 /*
1370  * Test whether @work is being queued from another work executing on the
1371  * same workqueue.
1372  */
1373 static bool is_chained_work(struct workqueue_struct *wq)
1374 {
1375     struct worker *worker;
1376 
1377     worker = current_wq_worker();
1378     /*
1379      * Return %true iff I'm a worker executing a work item on @wq.  If
1380      * I'm @worker, it's safe to dereference it without locking.
1381      */
1382     return worker && worker->current_pwq->wq == wq;
1383 }
1384 
1385 /*
1386  * When queueing an unbound work item to a wq, prefer local CPU if allowed
1387  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1388  * avoid perturbing sensitive tasks.
1389  */
1390 static int wq_select_unbound_cpu(int cpu)
1391 {
1392     static bool printed_dbg_warning;
1393     int new_cpu;
1394 
1395     if (likely(!wq_debug_force_rr_cpu)) {
1396         if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1397             return cpu;
1398     } else if (!printed_dbg_warning) {
1399         pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1400         printed_dbg_warning = true;
1401     }
1402 
1403     if (cpumask_empty(wq_unbound_cpumask))
1404         return cpu;
1405 
1406     new_cpu = __this_cpu_read(wq_rr_cpu_last);
1407     new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1408     if (unlikely(new_cpu >= nr_cpu_ids)) {
1409         new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1410         if (unlikely(new_cpu >= nr_cpu_ids))
1411             return cpu;
1412     }
1413     __this_cpu_write(wq_rr_cpu_last, new_cpu);
1414 
1415     return new_cpu;
1416 }
1417 
1418 static void __queue_work(int cpu, struct workqueue_struct *wq,
1419              struct work_struct *work)
1420 {
1421     struct pool_workqueue *pwq;
1422     struct worker_pool *last_pool;
1423     struct list_head *worklist;
1424     unsigned int work_flags;
1425     unsigned int req_cpu = cpu;
1426 
1427     /*
1428      * While a work item is PENDING && off queue, a task trying to
1429      * steal the PENDING will busy-loop waiting for it to either get
1430      * queued or lose PENDING.  Grabbing PENDING and queueing should
1431      * happen with IRQ disabled.
1432      */
1433     lockdep_assert_irqs_disabled();
1434 
1435 
1436     /* if draining, only works from the same workqueue are allowed */
1437     if (unlikely(wq->flags & __WQ_DRAINING) &&
1438         WARN_ON_ONCE(!is_chained_work(wq)))
1439         return;
1440     rcu_read_lock();
1441 retry:
1442     /* pwq which will be used unless @work is executing elsewhere */
1443     if (wq->flags & WQ_UNBOUND) {
1444         if (req_cpu == WORK_CPU_UNBOUND)
1445             cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1446         pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1447     } else {
1448         if (req_cpu == WORK_CPU_UNBOUND)
1449             cpu = raw_smp_processor_id();
1450         pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1451     }
1452 
1453     /*
1454      * If @work was previously on a different pool, it might still be
1455      * running there, in which case the work needs to be queued on that
1456      * pool to guarantee non-reentrancy.
1457      */
1458     last_pool = get_work_pool(work);
1459     if (last_pool && last_pool != pwq->pool) {
1460         struct worker *worker;
1461 
1462         raw_spin_lock(&last_pool->lock);
1463 
1464         worker = find_worker_executing_work(last_pool, work);
1465 
1466         if (worker && worker->current_pwq->wq == wq) {
1467             pwq = worker->current_pwq;
1468         } else {
1469             /* meh... not running there, queue here */
1470             raw_spin_unlock(&last_pool->lock);
1471             raw_spin_lock(&pwq->pool->lock);
1472         }
1473     } else {
1474         raw_spin_lock(&pwq->pool->lock);
1475     }
1476 
1477     /*
1478      * pwq is determined and locked.  For unbound pools, we could have
1479      * raced with pwq release and it could already be dead.  If its
1480      * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1481      * without another pwq replacing it in the numa_pwq_tbl or while
1482      * work items are executing on it, so the retrying is guaranteed to
1483      * make forward-progress.
1484      */
1485     if (unlikely(!pwq->refcnt)) {
1486         if (wq->flags & WQ_UNBOUND) {
1487             raw_spin_unlock(&pwq->pool->lock);
1488             cpu_relax();
1489             goto retry;
1490         }
1491         /* oops */
1492         WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1493               wq->name, cpu);
1494     }
1495 
1496     /* pwq determined, queue */
1497     trace_workqueue_queue_work(req_cpu, pwq, work);
1498 
1499     if (WARN_ON(!list_empty(&work->entry)))
1500         goto out;
1501 
1502     pwq->nr_in_flight[pwq->work_color]++;
1503     work_flags = work_color_to_flags(pwq->work_color);
1504 
1505     if (likely(pwq->nr_active < pwq->max_active)) {
1506         trace_workqueue_activate_work(work);
1507         pwq->nr_active++;
1508         worklist = &pwq->pool->worklist;
1509         if (list_empty(worklist))
1510             pwq->pool->watchdog_ts = jiffies;
1511     } else {
1512         work_flags |= WORK_STRUCT_INACTIVE;
1513         worklist = &pwq->inactive_works;
1514     }
1515 
1516     debug_work_activate(work);
1517     insert_work(pwq, work, worklist, work_flags);
1518 
1519 out:
1520     raw_spin_unlock(&pwq->pool->lock);
1521     rcu_read_unlock();
1522 }
1523 
1524 /**
1525  * queue_work_on - queue work on specific cpu
1526  * @cpu: CPU number to execute work on
1527  * @wq: workqueue to use
1528  * @work: work to queue
1529  *
1530  * We queue the work to a specific CPU, the caller must ensure it
1531  * can't go away.  Callers that fail to ensure that the specified
1532  * CPU cannot go away will execute on a randomly chosen CPU.
1533  *
1534  * Return: %false if @work was already on a queue, %true otherwise.
1535  */
1536 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1537            struct work_struct *work)
1538 {
1539     bool ret = false;
1540     unsigned long flags;
1541 
1542     local_irq_save(flags);
1543 
1544     if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1545         __queue_work(cpu, wq, work);
1546         ret = true;
1547     }
1548 
1549     local_irq_restore(flags);
1550     return ret;
1551 }
1552 EXPORT_SYMBOL(queue_work_on);
1553 
1554 /**
1555  * workqueue_select_cpu_near - Select a CPU based on NUMA node
1556  * @node: NUMA node ID that we want to select a CPU from
1557  *
1558  * This function will attempt to find a "random" cpu available on a given
1559  * node. If there are no CPUs available on the given node it will return
1560  * WORK_CPU_UNBOUND indicating that we should just schedule to any
1561  * available CPU if we need to schedule this work.
1562  */
1563 static int workqueue_select_cpu_near(int node)
1564 {
1565     int cpu;
1566 
1567     /* No point in doing this if NUMA isn't enabled for workqueues */
1568     if (!wq_numa_enabled)
1569         return WORK_CPU_UNBOUND;
1570 
1571     /* Delay binding to CPU if node is not valid or online */
1572     if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1573         return WORK_CPU_UNBOUND;
1574 
1575     /* Use local node/cpu if we are already there */
1576     cpu = raw_smp_processor_id();
1577     if (node == cpu_to_node(cpu))
1578         return cpu;
1579 
1580     /* Use "random" otherwise know as "first" online CPU of node */
1581     cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1582 
1583     /* If CPU is valid return that, otherwise just defer */
1584     return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1585 }
1586 
1587 /**
1588  * queue_work_node - queue work on a "random" cpu for a given NUMA node
1589  * @node: NUMA node that we are targeting the work for
1590  * @wq: workqueue to use
1591  * @work: work to queue
1592  *
1593  * We queue the work to a "random" CPU within a given NUMA node. The basic
1594  * idea here is to provide a way to somehow associate work with a given
1595  * NUMA node.
1596  *
1597  * This function will only make a best effort attempt at getting this onto
1598  * the right NUMA node. If no node is requested or the requested node is
1599  * offline then we just fall back to standard queue_work behavior.
1600  *
1601  * Currently the "random" CPU ends up being the first available CPU in the
1602  * intersection of cpu_online_mask and the cpumask of the node, unless we
1603  * are running on the node. In that case we just use the current CPU.
1604  *
1605  * Return: %false if @work was already on a queue, %true otherwise.
1606  */
1607 bool queue_work_node(int node, struct workqueue_struct *wq,
1608              struct work_struct *work)
1609 {
1610     unsigned long flags;
1611     bool ret = false;
1612 
1613     /*
1614      * This current implementation is specific to unbound workqueues.
1615      * Specifically we only return the first available CPU for a given
1616      * node instead of cycling through individual CPUs within the node.
1617      *
1618      * If this is used with a per-cpu workqueue then the logic in
1619      * workqueue_select_cpu_near would need to be updated to allow for
1620      * some round robin type logic.
1621      */
1622     WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1623 
1624     local_irq_save(flags);
1625 
1626     if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1627         int cpu = workqueue_select_cpu_near(node);
1628 
1629         __queue_work(cpu, wq, work);
1630         ret = true;
1631     }
1632 
1633     local_irq_restore(flags);
1634     return ret;
1635 }
1636 EXPORT_SYMBOL_GPL(queue_work_node);
1637 
1638 void delayed_work_timer_fn(struct timer_list *t)
1639 {
1640     struct delayed_work *dwork = from_timer(dwork, t, timer);
1641 
1642     /* should have been called from irqsafe timer with irq already off */
1643     __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1644 }
1645 EXPORT_SYMBOL(delayed_work_timer_fn);
1646 
1647 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1648                 struct delayed_work *dwork, unsigned long delay)
1649 {
1650     struct timer_list *timer = &dwork->timer;
1651     struct work_struct *work = &dwork->work;
1652 
1653     WARN_ON_ONCE(!wq);
1654     WARN_ON_FUNCTION_MISMATCH(timer->function, delayed_work_timer_fn);
1655     WARN_ON_ONCE(timer_pending(timer));
1656     WARN_ON_ONCE(!list_empty(&work->entry));
1657 
1658     /*
1659      * If @delay is 0, queue @dwork->work immediately.  This is for
1660      * both optimization and correctness.  The earliest @timer can
1661      * expire is on the closest next tick and delayed_work users depend
1662      * on that there's no such delay when @delay is 0.
1663      */
1664     if (!delay) {
1665         __queue_work(cpu, wq, &dwork->work);
1666         return;
1667     }
1668 
1669     dwork->wq = wq;
1670     dwork->cpu = cpu;
1671     timer->expires = jiffies + delay;
1672 
1673     if (unlikely(cpu != WORK_CPU_UNBOUND))
1674         add_timer_on(timer, cpu);
1675     else
1676         add_timer(timer);
1677 }
1678 
1679 /**
1680  * queue_delayed_work_on - queue work on specific CPU after delay
1681  * @cpu: CPU number to execute work on
1682  * @wq: workqueue to use
1683  * @dwork: work to queue
1684  * @delay: number of jiffies to wait before queueing
1685  *
1686  * Return: %false if @work was already on a queue, %true otherwise.  If
1687  * @delay is zero and @dwork is idle, it will be scheduled for immediate
1688  * execution.
1689  */
1690 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1691                struct delayed_work *dwork, unsigned long delay)
1692 {
1693     struct work_struct *work = &dwork->work;
1694     bool ret = false;
1695     unsigned long flags;
1696 
1697     /* read the comment in __queue_work() */
1698     local_irq_save(flags);
1699 
1700     if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1701         __queue_delayed_work(cpu, wq, dwork, delay);
1702         ret = true;
1703     }
1704 
1705     local_irq_restore(flags);
1706     return ret;
1707 }
1708 EXPORT_SYMBOL(queue_delayed_work_on);
1709 
1710 /**
1711  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1712  * @cpu: CPU number to execute work on
1713  * @wq: workqueue to use
1714  * @dwork: work to queue
1715  * @delay: number of jiffies to wait before queueing
1716  *
1717  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1718  * modify @dwork's timer so that it expires after @delay.  If @delay is
1719  * zero, @work is guaranteed to be scheduled immediately regardless of its
1720  * current state.
1721  *
1722  * Return: %false if @dwork was idle and queued, %true if @dwork was
1723  * pending and its timer was modified.
1724  *
1725  * This function is safe to call from any context including IRQ handler.
1726  * See try_to_grab_pending() for details.
1727  */
1728 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1729              struct delayed_work *dwork, unsigned long delay)
1730 {
1731     unsigned long flags;
1732     int ret;
1733 
1734     do {
1735         ret = try_to_grab_pending(&dwork->work, true, &flags);
1736     } while (unlikely(ret == -EAGAIN));
1737 
1738     if (likely(ret >= 0)) {
1739         __queue_delayed_work(cpu, wq, dwork, delay);
1740         local_irq_restore(flags);
1741     }
1742 
1743     /* -ENOENT from try_to_grab_pending() becomes %true */
1744     return ret;
1745 }
1746 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1747 
1748 static void rcu_work_rcufn(struct rcu_head *rcu)
1749 {
1750     struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1751 
1752     /* read the comment in __queue_work() */
1753     local_irq_disable();
1754     __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1755     local_irq_enable();
1756 }
1757 
1758 /**
1759  * queue_rcu_work - queue work after a RCU grace period
1760  * @wq: workqueue to use
1761  * @rwork: work to queue
1762  *
1763  * Return: %false if @rwork was already pending, %true otherwise.  Note
1764  * that a full RCU grace period is guaranteed only after a %true return.
1765  * While @rwork is guaranteed to be executed after a %false return, the
1766  * execution may happen before a full RCU grace period has passed.
1767  */
1768 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1769 {
1770     struct work_struct *work = &rwork->work;
1771 
1772     if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1773         rwork->wq = wq;
1774         call_rcu(&rwork->rcu, rcu_work_rcufn);
1775         return true;
1776     }
1777 
1778     return false;
1779 }
1780 EXPORT_SYMBOL(queue_rcu_work);
1781 
1782 /**
1783  * worker_enter_idle - enter idle state
1784  * @worker: worker which is entering idle state
1785  *
1786  * @worker is entering idle state.  Update stats and idle timer if
1787  * necessary.
1788  *
1789  * LOCKING:
1790  * raw_spin_lock_irq(pool->lock).
1791  */
1792 static void worker_enter_idle(struct worker *worker)
1793 {
1794     struct worker_pool *pool = worker->pool;
1795 
1796     if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1797         WARN_ON_ONCE(!list_empty(&worker->entry) &&
1798              (worker->hentry.next || worker->hentry.pprev)))
1799         return;
1800 
1801     /* can't use worker_set_flags(), also called from create_worker() */
1802     worker->flags |= WORKER_IDLE;
1803     pool->nr_idle++;
1804     worker->last_active = jiffies;
1805 
1806     /* idle_list is LIFO */
1807     list_add(&worker->entry, &pool->idle_list);
1808 
1809     if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1810         mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1811 
1812     /* Sanity check nr_running. */
1813     WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
1814 }
1815 
1816 /**
1817  * worker_leave_idle - leave idle state
1818  * @worker: worker which is leaving idle state
1819  *
1820  * @worker is leaving idle state.  Update stats.
1821  *
1822  * LOCKING:
1823  * raw_spin_lock_irq(pool->lock).
1824  */
1825 static void worker_leave_idle(struct worker *worker)
1826 {
1827     struct worker_pool *pool = worker->pool;
1828 
1829     if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1830         return;
1831     worker_clr_flags(worker, WORKER_IDLE);
1832     pool->nr_idle--;
1833     list_del_init(&worker->entry);
1834 }
1835 
1836 static struct worker *alloc_worker(int node)
1837 {
1838     struct worker *worker;
1839 
1840     worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1841     if (worker) {
1842         INIT_LIST_HEAD(&worker->entry);
1843         INIT_LIST_HEAD(&worker->scheduled);
1844         INIT_LIST_HEAD(&worker->node);
1845         /* on creation a worker is in !idle && prep state */
1846         worker->flags = WORKER_PREP;
1847     }
1848     return worker;
1849 }
1850 
1851 /**
1852  * worker_attach_to_pool() - attach a worker to a pool
1853  * @worker: worker to be attached
1854  * @pool: the target pool
1855  *
1856  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1857  * cpu-binding of @worker are kept coordinated with the pool across
1858  * cpu-[un]hotplugs.
1859  */
1860 static void worker_attach_to_pool(struct worker *worker,
1861                    struct worker_pool *pool)
1862 {
1863     mutex_lock(&wq_pool_attach_mutex);
1864 
1865     /*
1866      * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1867      * stable across this function.  See the comments above the flag
1868      * definition for details.
1869      */
1870     if (pool->flags & POOL_DISASSOCIATED)
1871         worker->flags |= WORKER_UNBOUND;
1872     else
1873         kthread_set_per_cpu(worker->task, pool->cpu);
1874 
1875     if (worker->rescue_wq)
1876         set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1877 
1878     list_add_tail(&worker->node, &pool->workers);
1879     worker->pool = pool;
1880 
1881     mutex_unlock(&wq_pool_attach_mutex);
1882 }
1883 
1884 /**
1885  * worker_detach_from_pool() - detach a worker from its pool
1886  * @worker: worker which is attached to its pool
1887  *
1888  * Undo the attaching which had been done in worker_attach_to_pool().  The
1889  * caller worker shouldn't access to the pool after detached except it has
1890  * other reference to the pool.
1891  */
1892 static void worker_detach_from_pool(struct worker *worker)
1893 {
1894     struct worker_pool *pool = worker->pool;
1895     struct completion *detach_completion = NULL;
1896 
1897     mutex_lock(&wq_pool_attach_mutex);
1898 
1899     kthread_set_per_cpu(worker->task, -1);
1900     list_del(&worker->node);
1901     worker->pool = NULL;
1902 
1903     if (list_empty(&pool->workers))
1904         detach_completion = pool->detach_completion;
1905     mutex_unlock(&wq_pool_attach_mutex);
1906 
1907     /* clear leftover flags without pool->lock after it is detached */
1908     worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1909 
1910     if (detach_completion)
1911         complete(detach_completion);
1912 }
1913 
1914 /**
1915  * create_worker - create a new workqueue worker
1916  * @pool: pool the new worker will belong to
1917  *
1918  * Create and start a new worker which is attached to @pool.
1919  *
1920  * CONTEXT:
1921  * Might sleep.  Does GFP_KERNEL allocations.
1922  *
1923  * Return:
1924  * Pointer to the newly created worker.
1925  */
1926 static struct worker *create_worker(struct worker_pool *pool)
1927 {
1928     struct worker *worker;
1929     int id;
1930     char id_buf[16];
1931 
1932     /* ID is needed to determine kthread name */
1933     id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
1934     if (id < 0)
1935         return NULL;
1936 
1937     worker = alloc_worker(pool->node);
1938     if (!worker)
1939         goto fail;
1940 
1941     worker->id = id;
1942 
1943     if (pool->cpu >= 0)
1944         snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1945              pool->attrs->nice < 0  ? "H" : "");
1946     else
1947         snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1948 
1949     worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1950                           "kworker/%s", id_buf);
1951     if (IS_ERR(worker->task))
1952         goto fail;
1953 
1954     set_user_nice(worker->task, pool->attrs->nice);
1955     kthread_bind_mask(worker->task, pool->attrs->cpumask);
1956 
1957     /* successful, attach the worker to the pool */
1958     worker_attach_to_pool(worker, pool);
1959 
1960     /* start the newly created worker */
1961     raw_spin_lock_irq(&pool->lock);
1962     worker->pool->nr_workers++;
1963     worker_enter_idle(worker);
1964     wake_up_process(worker->task);
1965     raw_spin_unlock_irq(&pool->lock);
1966 
1967     return worker;
1968 
1969 fail:
1970     ida_free(&pool->worker_ida, id);
1971     kfree(worker);
1972     return NULL;
1973 }
1974 
1975 /**
1976  * destroy_worker - destroy a workqueue worker
1977  * @worker: worker to be destroyed
1978  *
1979  * Destroy @worker and adjust @pool stats accordingly.  The worker should
1980  * be idle.
1981  *
1982  * CONTEXT:
1983  * raw_spin_lock_irq(pool->lock).
1984  */
1985 static void destroy_worker(struct worker *worker)
1986 {
1987     struct worker_pool *pool = worker->pool;
1988 
1989     lockdep_assert_held(&pool->lock);
1990 
1991     /* sanity check frenzy */
1992     if (WARN_ON(worker->current_work) ||
1993         WARN_ON(!list_empty(&worker->scheduled)) ||
1994         WARN_ON(!(worker->flags & WORKER_IDLE)))
1995         return;
1996 
1997     pool->nr_workers--;
1998     pool->nr_idle--;
1999 
2000     list_del_init(&worker->entry);
2001     worker->flags |= WORKER_DIE;
2002     wake_up_process(worker->task);
2003 }
2004 
2005 static void idle_worker_timeout(struct timer_list *t)
2006 {
2007     struct worker_pool *pool = from_timer(pool, t, idle_timer);
2008 
2009     raw_spin_lock_irq(&pool->lock);
2010 
2011     while (too_many_workers(pool)) {
2012         struct worker *worker;
2013         unsigned long expires;
2014 
2015         /* idle_list is kept in LIFO order, check the last one */
2016         worker = list_entry(pool->idle_list.prev, struct worker, entry);
2017         expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2018 
2019         if (time_before(jiffies, expires)) {
2020             mod_timer(&pool->idle_timer, expires);
2021             break;
2022         }
2023 
2024         destroy_worker(worker);
2025     }
2026 
2027     raw_spin_unlock_irq(&pool->lock);
2028 }
2029 
2030 static void send_mayday(struct work_struct *work)
2031 {
2032     struct pool_workqueue *pwq = get_work_pwq(work);
2033     struct workqueue_struct *wq = pwq->wq;
2034 
2035     lockdep_assert_held(&wq_mayday_lock);
2036 
2037     if (!wq->rescuer)
2038         return;
2039 
2040     /* mayday mayday mayday */
2041     if (list_empty(&pwq->mayday_node)) {
2042         /*
2043          * If @pwq is for an unbound wq, its base ref may be put at
2044          * any time due to an attribute change.  Pin @pwq until the
2045          * rescuer is done with it.
2046          */
2047         get_pwq(pwq);
2048         list_add_tail(&pwq->mayday_node, &wq->maydays);
2049         wake_up_process(wq->rescuer->task);
2050     }
2051 }
2052 
2053 static void pool_mayday_timeout(struct timer_list *t)
2054 {
2055     struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2056     struct work_struct *work;
2057 
2058     raw_spin_lock_irq(&pool->lock);
2059     raw_spin_lock(&wq_mayday_lock);     /* for wq->maydays */
2060 
2061     if (need_to_create_worker(pool)) {
2062         /*
2063          * We've been trying to create a new worker but
2064          * haven't been successful.  We might be hitting an
2065          * allocation deadlock.  Send distress signals to
2066          * rescuers.
2067          */
2068         list_for_each_entry(work, &pool->worklist, entry)
2069             send_mayday(work);
2070     }
2071 
2072     raw_spin_unlock(&wq_mayday_lock);
2073     raw_spin_unlock_irq(&pool->lock);
2074 
2075     mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2076 }
2077 
2078 /**
2079  * maybe_create_worker - create a new worker if necessary
2080  * @pool: pool to create a new worker for
2081  *
2082  * Create a new worker for @pool if necessary.  @pool is guaranteed to
2083  * have at least one idle worker on return from this function.  If
2084  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2085  * sent to all rescuers with works scheduled on @pool to resolve
2086  * possible allocation deadlock.
2087  *
2088  * On return, need_to_create_worker() is guaranteed to be %false and
2089  * may_start_working() %true.
2090  *
2091  * LOCKING:
2092  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2093  * multiple times.  Does GFP_KERNEL allocations.  Called only from
2094  * manager.
2095  */
2096 static void maybe_create_worker(struct worker_pool *pool)
2097 __releases(&pool->lock)
2098 __acquires(&pool->lock)
2099 {
2100 restart:
2101     raw_spin_unlock_irq(&pool->lock);
2102 
2103     /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2104     mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2105 
2106     while (true) {
2107         if (create_worker(pool) || !need_to_create_worker(pool))
2108             break;
2109 
2110         schedule_timeout_interruptible(CREATE_COOLDOWN);
2111 
2112         if (!need_to_create_worker(pool))
2113             break;
2114     }
2115 
2116     del_timer_sync(&pool->mayday_timer);
2117     raw_spin_lock_irq(&pool->lock);
2118     /*
2119      * This is necessary even after a new worker was just successfully
2120      * created as @pool->lock was dropped and the new worker might have
2121      * already become busy.
2122      */
2123     if (need_to_create_worker(pool))
2124         goto restart;
2125 }
2126 
2127 /**
2128  * manage_workers - manage worker pool
2129  * @worker: self
2130  *
2131  * Assume the manager role and manage the worker pool @worker belongs
2132  * to.  At any given time, there can be only zero or one manager per
2133  * pool.  The exclusion is handled automatically by this function.
2134  *
2135  * The caller can safely start processing works on false return.  On
2136  * true return, it's guaranteed that need_to_create_worker() is false
2137  * and may_start_working() is true.
2138  *
2139  * CONTEXT:
2140  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2141  * multiple times.  Does GFP_KERNEL allocations.
2142  *
2143  * Return:
2144  * %false if the pool doesn't need management and the caller can safely
2145  * start processing works, %true if management function was performed and
2146  * the conditions that the caller verified before calling the function may
2147  * no longer be true.
2148  */
2149 static bool manage_workers(struct worker *worker)
2150 {
2151     struct worker_pool *pool = worker->pool;
2152 
2153     if (pool->flags & POOL_MANAGER_ACTIVE)
2154         return false;
2155 
2156     pool->flags |= POOL_MANAGER_ACTIVE;
2157     pool->manager = worker;
2158 
2159     maybe_create_worker(pool);
2160 
2161     pool->manager = NULL;
2162     pool->flags &= ~POOL_MANAGER_ACTIVE;
2163     rcuwait_wake_up(&manager_wait);
2164     return true;
2165 }
2166 
2167 /**
2168  * process_one_work - process single work
2169  * @worker: self
2170  * @work: work to process
2171  *
2172  * Process @work.  This function contains all the logics necessary to
2173  * process a single work including synchronization against and
2174  * interaction with other workers on the same cpu, queueing and
2175  * flushing.  As long as context requirement is met, any worker can
2176  * call this function to process a work.
2177  *
2178  * CONTEXT:
2179  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2180  */
2181 static void process_one_work(struct worker *worker, struct work_struct *work)
2182 __releases(&pool->lock)
2183 __acquires(&pool->lock)
2184 {
2185     struct pool_workqueue *pwq = get_work_pwq(work);
2186     struct worker_pool *pool = worker->pool;
2187     bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2188     unsigned long work_data;
2189     struct worker *collision;
2190 #ifdef CONFIG_LOCKDEP
2191     /*
2192      * It is permissible to free the struct work_struct from
2193      * inside the function that is called from it, this we need to
2194      * take into account for lockdep too.  To avoid bogus "held
2195      * lock freed" warnings as well as problems when looking into
2196      * work->lockdep_map, make a copy and use that here.
2197      */
2198     struct lockdep_map lockdep_map;
2199 
2200     lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2201 #endif
2202     /* ensure we're on the correct CPU */
2203     WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2204              raw_smp_processor_id() != pool->cpu);
2205 
2206     /*
2207      * A single work shouldn't be executed concurrently by
2208      * multiple workers on a single cpu.  Check whether anyone is
2209      * already processing the work.  If so, defer the work to the
2210      * currently executing one.
2211      */
2212     collision = find_worker_executing_work(pool, work);
2213     if (unlikely(collision)) {
2214         move_linked_works(work, &collision->scheduled, NULL);
2215         return;
2216     }
2217 
2218     /* claim and dequeue */
2219     debug_work_deactivate(work);
2220     hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2221     worker->current_work = work;
2222     worker->current_func = work->func;
2223     worker->current_pwq = pwq;
2224     work_data = *work_data_bits(work);
2225     worker->current_color = get_work_color(work_data);
2226 
2227     /*
2228      * Record wq name for cmdline and debug reporting, may get
2229      * overridden through set_worker_desc().
2230      */
2231     strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2232 
2233     list_del_init(&work->entry);
2234 
2235     /*
2236      * CPU intensive works don't participate in concurrency management.
2237      * They're the scheduler's responsibility.  This takes @worker out
2238      * of concurrency management and the next code block will chain
2239      * execution of the pending work items.
2240      */
2241     if (unlikely(cpu_intensive))
2242         worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2243 
2244     /*
2245      * Wake up another worker if necessary.  The condition is always
2246      * false for normal per-cpu workers since nr_running would always
2247      * be >= 1 at this point.  This is used to chain execution of the
2248      * pending work items for WORKER_NOT_RUNNING workers such as the
2249      * UNBOUND and CPU_INTENSIVE ones.
2250      */
2251     if (need_more_worker(pool))
2252         wake_up_worker(pool);
2253 
2254     /*
2255      * Record the last pool and clear PENDING which should be the last
2256      * update to @work.  Also, do this inside @pool->lock so that
2257      * PENDING and queued state changes happen together while IRQ is
2258      * disabled.
2259      */
2260     set_work_pool_and_clear_pending(work, pool->id);
2261 
2262     raw_spin_unlock_irq(&pool->lock);
2263 
2264     lock_map_acquire(&pwq->wq->lockdep_map);
2265     lock_map_acquire(&lockdep_map);
2266     /*
2267      * Strictly speaking we should mark the invariant state without holding
2268      * any locks, that is, before these two lock_map_acquire()'s.
2269      *
2270      * However, that would result in:
2271      *
2272      *   A(W1)
2273      *   WFC(C)
2274      *      A(W1)
2275      *      C(C)
2276      *
2277      * Which would create W1->C->W1 dependencies, even though there is no
2278      * actual deadlock possible. There are two solutions, using a
2279      * read-recursive acquire on the work(queue) 'locks', but this will then
2280      * hit the lockdep limitation on recursive locks, or simply discard
2281      * these locks.
2282      *
2283      * AFAICT there is no possible deadlock scenario between the
2284      * flush_work() and complete() primitives (except for single-threaded
2285      * workqueues), so hiding them isn't a problem.
2286      */
2287     lockdep_invariant_state(true);
2288     trace_workqueue_execute_start(work);
2289     worker->current_func(work);
2290     /*
2291      * While we must be careful to not use "work" after this, the trace
2292      * point will only record its address.
2293      */
2294     trace_workqueue_execute_end(work, worker->current_func);
2295     lock_map_release(&lockdep_map);
2296     lock_map_release(&pwq->wq->lockdep_map);
2297 
2298     if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2299         pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2300                "     last function: %ps\n",
2301                current->comm, preempt_count(), task_pid_nr(current),
2302                worker->current_func);
2303         debug_show_held_locks(current);
2304         dump_stack();
2305     }
2306 
2307     /*
2308      * The following prevents a kworker from hogging CPU on !PREEMPTION
2309      * kernels, where a requeueing work item waiting for something to
2310      * happen could deadlock with stop_machine as such work item could
2311      * indefinitely requeue itself while all other CPUs are trapped in
2312      * stop_machine. At the same time, report a quiescent RCU state so
2313      * the same condition doesn't freeze RCU.
2314      */
2315     cond_resched();
2316 
2317     raw_spin_lock_irq(&pool->lock);
2318 
2319     /* clear cpu intensive status */
2320     if (unlikely(cpu_intensive))
2321         worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2322 
2323     /* tag the worker for identification in schedule() */
2324     worker->last_func = worker->current_func;
2325 
2326     /* we're done with it, release */
2327     hash_del(&worker->hentry);
2328     worker->current_work = NULL;
2329     worker->current_func = NULL;
2330     worker->current_pwq = NULL;
2331     worker->current_color = INT_MAX;
2332     pwq_dec_nr_in_flight(pwq, work_data);
2333 }
2334 
2335 /**
2336  * process_scheduled_works - process scheduled works
2337  * @worker: self
2338  *
2339  * Process all scheduled works.  Please note that the scheduled list
2340  * may change while processing a work, so this function repeatedly
2341  * fetches a work from the top and executes it.
2342  *
2343  * CONTEXT:
2344  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2345  * multiple times.
2346  */
2347 static void process_scheduled_works(struct worker *worker)
2348 {
2349     while (!list_empty(&worker->scheduled)) {
2350         struct work_struct *work = list_first_entry(&worker->scheduled,
2351                         struct work_struct, entry);
2352         process_one_work(worker, work);
2353     }
2354 }
2355 
2356 static void set_pf_worker(bool val)
2357 {
2358     mutex_lock(&wq_pool_attach_mutex);
2359     if (val)
2360         current->flags |= PF_WQ_WORKER;
2361     else
2362         current->flags &= ~PF_WQ_WORKER;
2363     mutex_unlock(&wq_pool_attach_mutex);
2364 }
2365 
2366 /**
2367  * worker_thread - the worker thread function
2368  * @__worker: self
2369  *
2370  * The worker thread function.  All workers belong to a worker_pool -
2371  * either a per-cpu one or dynamic unbound one.  These workers process all
2372  * work items regardless of their specific target workqueue.  The only
2373  * exception is work items which belong to workqueues with a rescuer which
2374  * will be explained in rescuer_thread().
2375  *
2376  * Return: 0
2377  */
2378 static int worker_thread(void *__worker)
2379 {
2380     struct worker *worker = __worker;
2381     struct worker_pool *pool = worker->pool;
2382 
2383     /* tell the scheduler that this is a workqueue worker */
2384     set_pf_worker(true);
2385 woke_up:
2386     raw_spin_lock_irq(&pool->lock);
2387 
2388     /* am I supposed to die? */
2389     if (unlikely(worker->flags & WORKER_DIE)) {
2390         raw_spin_unlock_irq(&pool->lock);
2391         WARN_ON_ONCE(!list_empty(&worker->entry));
2392         set_pf_worker(false);
2393 
2394         set_task_comm(worker->task, "kworker/dying");
2395         ida_free(&pool->worker_ida, worker->id);
2396         worker_detach_from_pool(worker);
2397         kfree(worker);
2398         return 0;
2399     }
2400 
2401     worker_leave_idle(worker);
2402 recheck:
2403     /* no more worker necessary? */
2404     if (!need_more_worker(pool))
2405         goto sleep;
2406 
2407     /* do we need to manage? */
2408     if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2409         goto recheck;
2410 
2411     /*
2412      * ->scheduled list can only be filled while a worker is
2413      * preparing to process a work or actually processing it.
2414      * Make sure nobody diddled with it while I was sleeping.
2415      */
2416     WARN_ON_ONCE(!list_empty(&worker->scheduled));
2417 
2418     /*
2419      * Finish PREP stage.  We're guaranteed to have at least one idle
2420      * worker or that someone else has already assumed the manager
2421      * role.  This is where @worker starts participating in concurrency
2422      * management if applicable and concurrency management is restored
2423      * after being rebound.  See rebind_workers() for details.
2424      */
2425     worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2426 
2427     do {
2428         struct work_struct *work =
2429             list_first_entry(&pool->worklist,
2430                      struct work_struct, entry);
2431 
2432         pool->watchdog_ts = jiffies;
2433 
2434         if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2435             /* optimization path, not strictly necessary */
2436             process_one_work(worker, work);
2437             if (unlikely(!list_empty(&worker->scheduled)))
2438                 process_scheduled_works(worker);
2439         } else {
2440             move_linked_works(work, &worker->scheduled, NULL);
2441             process_scheduled_works(worker);
2442         }
2443     } while (keep_working(pool));
2444 
2445     worker_set_flags(worker, WORKER_PREP);
2446 sleep:
2447     /*
2448      * pool->lock is held and there's no work to process and no need to
2449      * manage, sleep.  Workers are woken up only while holding
2450      * pool->lock or from local cpu, so setting the current state
2451      * before releasing pool->lock is enough to prevent losing any
2452      * event.
2453      */
2454     worker_enter_idle(worker);
2455     __set_current_state(TASK_IDLE);
2456     raw_spin_unlock_irq(&pool->lock);
2457     schedule();
2458     goto woke_up;
2459 }
2460 
2461 /**
2462  * rescuer_thread - the rescuer thread function
2463  * @__rescuer: self
2464  *
2465  * Workqueue rescuer thread function.  There's one rescuer for each
2466  * workqueue which has WQ_MEM_RECLAIM set.
2467  *
2468  * Regular work processing on a pool may block trying to create a new
2469  * worker which uses GFP_KERNEL allocation which has slight chance of
2470  * developing into deadlock if some works currently on the same queue
2471  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2472  * the problem rescuer solves.
2473  *
2474  * When such condition is possible, the pool summons rescuers of all
2475  * workqueues which have works queued on the pool and let them process
2476  * those works so that forward progress can be guaranteed.
2477  *
2478  * This should happen rarely.
2479  *
2480  * Return: 0
2481  */
2482 static int rescuer_thread(void *__rescuer)
2483 {
2484     struct worker *rescuer = __rescuer;
2485     struct workqueue_struct *wq = rescuer->rescue_wq;
2486     struct list_head *scheduled = &rescuer->scheduled;
2487     bool should_stop;
2488 
2489     set_user_nice(current, RESCUER_NICE_LEVEL);
2490 
2491     /*
2492      * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2493      * doesn't participate in concurrency management.
2494      */
2495     set_pf_worker(true);
2496 repeat:
2497     set_current_state(TASK_IDLE);
2498 
2499     /*
2500      * By the time the rescuer is requested to stop, the workqueue
2501      * shouldn't have any work pending, but @wq->maydays may still have
2502      * pwq(s) queued.  This can happen by non-rescuer workers consuming
2503      * all the work items before the rescuer got to them.  Go through
2504      * @wq->maydays processing before acting on should_stop so that the
2505      * list is always empty on exit.
2506      */
2507     should_stop = kthread_should_stop();
2508 
2509     /* see whether any pwq is asking for help */
2510     raw_spin_lock_irq(&wq_mayday_lock);
2511 
2512     while (!list_empty(&wq->maydays)) {
2513         struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2514                     struct pool_workqueue, mayday_node);
2515         struct worker_pool *pool = pwq->pool;
2516         struct work_struct *work, *n;
2517         bool first = true;
2518 
2519         __set_current_state(TASK_RUNNING);
2520         list_del_init(&pwq->mayday_node);
2521 
2522         raw_spin_unlock_irq(&wq_mayday_lock);
2523 
2524         worker_attach_to_pool(rescuer, pool);
2525 
2526         raw_spin_lock_irq(&pool->lock);
2527 
2528         /*
2529          * Slurp in all works issued via this workqueue and
2530          * process'em.
2531          */
2532         WARN_ON_ONCE(!list_empty(scheduled));
2533         list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2534             if (get_work_pwq(work) == pwq) {
2535                 if (first)
2536                     pool->watchdog_ts = jiffies;
2537                 move_linked_works(work, scheduled, &n);
2538             }
2539             first = false;
2540         }
2541 
2542         if (!list_empty(scheduled)) {
2543             process_scheduled_works(rescuer);
2544 
2545             /*
2546              * The above execution of rescued work items could
2547              * have created more to rescue through
2548              * pwq_activate_first_inactive() or chained
2549              * queueing.  Let's put @pwq back on mayday list so
2550              * that such back-to-back work items, which may be
2551              * being used to relieve memory pressure, don't
2552              * incur MAYDAY_INTERVAL delay inbetween.
2553              */
2554             if (pwq->nr_active && need_to_create_worker(pool)) {
2555                 raw_spin_lock(&wq_mayday_lock);
2556                 /*
2557                  * Queue iff we aren't racing destruction
2558                  * and somebody else hasn't queued it already.
2559                  */
2560                 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2561                     get_pwq(pwq);
2562                     list_add_tail(&pwq->mayday_node, &wq->maydays);
2563                 }
2564                 raw_spin_unlock(&wq_mayday_lock);
2565             }
2566         }
2567 
2568         /*
2569          * Put the reference grabbed by send_mayday().  @pool won't
2570          * go away while we're still attached to it.
2571          */
2572         put_pwq(pwq);
2573 
2574         /*
2575          * Leave this pool.  If need_more_worker() is %true, notify a
2576          * regular worker; otherwise, we end up with 0 concurrency
2577          * and stalling the execution.
2578          */
2579         if (need_more_worker(pool))
2580             wake_up_worker(pool);
2581 
2582         raw_spin_unlock_irq(&pool->lock);
2583 
2584         worker_detach_from_pool(rescuer);
2585 
2586         raw_spin_lock_irq(&wq_mayday_lock);
2587     }
2588 
2589     raw_spin_unlock_irq(&wq_mayday_lock);
2590 
2591     if (should_stop) {
2592         __set_current_state(TASK_RUNNING);
2593         set_pf_worker(false);
2594         return 0;
2595     }
2596 
2597     /* rescuers should never participate in concurrency management */
2598     WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2599     schedule();
2600     goto repeat;
2601 }
2602 
2603 /**
2604  * check_flush_dependency - check for flush dependency sanity
2605  * @target_wq: workqueue being flushed
2606  * @target_work: work item being flushed (NULL for workqueue flushes)
2607  *
2608  * %current is trying to flush the whole @target_wq or @target_work on it.
2609  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2610  * reclaiming memory or running on a workqueue which doesn't have
2611  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2612  * a deadlock.
2613  */
2614 static void check_flush_dependency(struct workqueue_struct *target_wq,
2615                    struct work_struct *target_work)
2616 {
2617     work_func_t target_func = target_work ? target_work->func : NULL;
2618     struct worker *worker;
2619 
2620     if (target_wq->flags & WQ_MEM_RECLAIM)
2621         return;
2622 
2623     worker = current_wq_worker();
2624 
2625     WARN_ONCE(current->flags & PF_MEMALLOC,
2626           "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2627           current->pid, current->comm, target_wq->name, target_func);
2628     WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2629                   (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2630           "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2631           worker->current_pwq->wq->name, worker->current_func,
2632           target_wq->name, target_func);
2633 }
2634 
2635 struct wq_barrier {
2636     struct work_struct  work;
2637     struct completion   done;
2638     struct task_struct  *task;  /* purely informational */
2639 };
2640 
2641 static void wq_barrier_func(struct work_struct *work)
2642 {
2643     struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2644     complete(&barr->done);
2645 }
2646 
2647 /**
2648  * insert_wq_barrier - insert a barrier work
2649  * @pwq: pwq to insert barrier into
2650  * @barr: wq_barrier to insert
2651  * @target: target work to attach @barr to
2652  * @worker: worker currently executing @target, NULL if @target is not executing
2653  *
2654  * @barr is linked to @target such that @barr is completed only after
2655  * @target finishes execution.  Please note that the ordering
2656  * guarantee is observed only with respect to @target and on the local
2657  * cpu.
2658  *
2659  * Currently, a queued barrier can't be canceled.  This is because
2660  * try_to_grab_pending() can't determine whether the work to be
2661  * grabbed is at the head of the queue and thus can't clear LINKED
2662  * flag of the previous work while there must be a valid next work
2663  * after a work with LINKED flag set.
2664  *
2665  * Note that when @worker is non-NULL, @target may be modified
2666  * underneath us, so we can't reliably determine pwq from @target.
2667  *
2668  * CONTEXT:
2669  * raw_spin_lock_irq(pool->lock).
2670  */
2671 static void insert_wq_barrier(struct pool_workqueue *pwq,
2672                   struct wq_barrier *barr,
2673                   struct work_struct *target, struct worker *worker)
2674 {
2675     unsigned int work_flags = 0;
2676     unsigned int work_color;
2677     struct list_head *head;
2678 
2679     /*
2680      * debugobject calls are safe here even with pool->lock locked
2681      * as we know for sure that this will not trigger any of the
2682      * checks and call back into the fixup functions where we
2683      * might deadlock.
2684      */
2685     INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2686     __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2687 
2688     init_completion_map(&barr->done, &target->lockdep_map);
2689 
2690     barr->task = current;
2691 
2692     /* The barrier work item does not participate in pwq->nr_active. */
2693     work_flags |= WORK_STRUCT_INACTIVE;
2694 
2695     /*
2696      * If @target is currently being executed, schedule the
2697      * barrier to the worker; otherwise, put it after @target.
2698      */
2699     if (worker) {
2700         head = worker->scheduled.next;
2701         work_color = worker->current_color;
2702     } else {
2703         unsigned long *bits = work_data_bits(target);
2704 
2705         head = target->entry.next;
2706         /* there can already be other linked works, inherit and set */
2707         work_flags |= *bits & WORK_STRUCT_LINKED;
2708         work_color = get_work_color(*bits);
2709         __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2710     }
2711 
2712     pwq->nr_in_flight[work_color]++;
2713     work_flags |= work_color_to_flags(work_color);
2714 
2715     debug_work_activate(&barr->work);
2716     insert_work(pwq, &barr->work, head, work_flags);
2717 }
2718 
2719 /**
2720  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2721  * @wq: workqueue being flushed
2722  * @flush_color: new flush color, < 0 for no-op
2723  * @work_color: new work color, < 0 for no-op
2724  *
2725  * Prepare pwqs for workqueue flushing.
2726  *
2727  * If @flush_color is non-negative, flush_color on all pwqs should be
2728  * -1.  If no pwq has in-flight commands at the specified color, all
2729  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2730  * has in flight commands, its pwq->flush_color is set to
2731  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2732  * wakeup logic is armed and %true is returned.
2733  *
2734  * The caller should have initialized @wq->first_flusher prior to
2735  * calling this function with non-negative @flush_color.  If
2736  * @flush_color is negative, no flush color update is done and %false
2737  * is returned.
2738  *
2739  * If @work_color is non-negative, all pwqs should have the same
2740  * work_color which is previous to @work_color and all will be
2741  * advanced to @work_color.
2742  *
2743  * CONTEXT:
2744  * mutex_lock(wq->mutex).
2745  *
2746  * Return:
2747  * %true if @flush_color >= 0 and there's something to flush.  %false
2748  * otherwise.
2749  */
2750 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2751                       int flush_color, int work_color)
2752 {
2753     bool wait = false;
2754     struct pool_workqueue *pwq;
2755 
2756     if (flush_color >= 0) {
2757         WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2758         atomic_set(&wq->nr_pwqs_to_flush, 1);
2759     }
2760 
2761     for_each_pwq(pwq, wq) {
2762         struct worker_pool *pool = pwq->pool;
2763 
2764         raw_spin_lock_irq(&pool->lock);
2765 
2766         if (flush_color >= 0) {
2767             WARN_ON_ONCE(pwq->flush_color != -1);
2768 
2769             if (pwq->nr_in_flight[flush_color]) {
2770                 pwq->flush_color = flush_color;
2771                 atomic_inc(&wq->nr_pwqs_to_flush);
2772                 wait = true;
2773             }
2774         }
2775 
2776         if (work_color >= 0) {
2777             WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2778             pwq->work_color = work_color;
2779         }
2780 
2781         raw_spin_unlock_irq(&pool->lock);
2782     }
2783 
2784     if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2785         complete(&wq->first_flusher->done);
2786 
2787     return wait;
2788 }
2789 
2790 /**
2791  * __flush_workqueue - ensure that any scheduled work has run to completion.
2792  * @wq: workqueue to flush
2793  *
2794  * This function sleeps until all work items which were queued on entry
2795  * have finished execution, but it is not livelocked by new incoming ones.
2796  */
2797 void __flush_workqueue(struct workqueue_struct *wq)
2798 {
2799     struct wq_flusher this_flusher = {
2800         .list = LIST_HEAD_INIT(this_flusher.list),
2801         .flush_color = -1,
2802         .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2803     };
2804     int next_color;
2805 
2806     if (WARN_ON(!wq_online))
2807         return;
2808 
2809     lock_map_acquire(&wq->lockdep_map);
2810     lock_map_release(&wq->lockdep_map);
2811 
2812     mutex_lock(&wq->mutex);
2813 
2814     /*
2815      * Start-to-wait phase
2816      */
2817     next_color = work_next_color(wq->work_color);
2818 
2819     if (next_color != wq->flush_color) {
2820         /*
2821          * Color space is not full.  The current work_color
2822          * becomes our flush_color and work_color is advanced
2823          * by one.
2824          */
2825         WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2826         this_flusher.flush_color = wq->work_color;
2827         wq->work_color = next_color;
2828 
2829         if (!wq->first_flusher) {
2830             /* no flush in progress, become the first flusher */
2831             WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2832 
2833             wq->first_flusher = &this_flusher;
2834 
2835             if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2836                                wq->work_color)) {
2837                 /* nothing to flush, done */
2838                 wq->flush_color = next_color;
2839                 wq->first_flusher = NULL;
2840                 goto out_unlock;
2841             }
2842         } else {
2843             /* wait in queue */
2844             WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2845             list_add_tail(&this_flusher.list, &wq->flusher_queue);
2846             flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2847         }
2848     } else {
2849         /*
2850          * Oops, color space is full, wait on overflow queue.
2851          * The next flush completion will assign us
2852          * flush_color and transfer to flusher_queue.
2853          */
2854         list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2855     }
2856 
2857     check_flush_dependency(wq, NULL);
2858 
2859     mutex_unlock(&wq->mutex);
2860 
2861     wait_for_completion(&this_flusher.done);
2862 
2863     /*
2864      * Wake-up-and-cascade phase
2865      *
2866      * First flushers are responsible for cascading flushes and
2867      * handling overflow.  Non-first flushers can simply return.
2868      */
2869     if (READ_ONCE(wq->first_flusher) != &this_flusher)
2870         return;
2871 
2872     mutex_lock(&wq->mutex);
2873 
2874     /* we might have raced, check again with mutex held */
2875     if (wq->first_flusher != &this_flusher)
2876         goto out_unlock;
2877 
2878     WRITE_ONCE(wq->first_flusher, NULL);
2879 
2880     WARN_ON_ONCE(!list_empty(&this_flusher.list));
2881     WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2882 
2883     while (true) {
2884         struct wq_flusher *next, *tmp;
2885 
2886         /* complete all the flushers sharing the current flush color */
2887         list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2888             if (next->flush_color != wq->flush_color)
2889                 break;
2890             list_del_init(&next->list);
2891             complete(&next->done);
2892         }
2893 
2894         WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2895                  wq->flush_color != work_next_color(wq->work_color));
2896 
2897         /* this flush_color is finished, advance by one */
2898         wq->flush_color = work_next_color(wq->flush_color);
2899 
2900         /* one color has been freed, handle overflow queue */
2901         if (!list_empty(&wq->flusher_overflow)) {
2902             /*
2903              * Assign the same color to all overflowed
2904              * flushers, advance work_color and append to
2905              * flusher_queue.  This is the start-to-wait
2906              * phase for these overflowed flushers.
2907              */
2908             list_for_each_entry(tmp, &wq->flusher_overflow, list)
2909                 tmp->flush_color = wq->work_color;
2910 
2911             wq->work_color = work_next_color(wq->work_color);
2912 
2913             list_splice_tail_init(&wq->flusher_overflow,
2914                           &wq->flusher_queue);
2915             flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2916         }
2917 
2918         if (list_empty(&wq->flusher_queue)) {
2919             WARN_ON_ONCE(wq->flush_color != wq->work_color);
2920             break;
2921         }
2922 
2923         /*
2924          * Need to flush more colors.  Make the next flusher
2925          * the new first flusher and arm pwqs.
2926          */
2927         WARN_ON_ONCE(wq->flush_color == wq->work_color);
2928         WARN_ON_ONCE(wq->flush_color != next->flush_color);
2929 
2930         list_del_init(&next->list);
2931         wq->first_flusher = next;
2932 
2933         if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2934             break;
2935 
2936         /*
2937          * Meh... this color is already done, clear first
2938          * flusher and repeat cascading.
2939          */
2940         wq->first_flusher = NULL;
2941     }
2942 
2943 out_unlock:
2944     mutex_unlock(&wq->mutex);
2945 }
2946 EXPORT_SYMBOL(__flush_workqueue);
2947 
2948 /**
2949  * drain_workqueue - drain a workqueue
2950  * @wq: workqueue to drain
2951  *
2952  * Wait until the workqueue becomes empty.  While draining is in progress,
2953  * only chain queueing is allowed.  IOW, only currently pending or running
2954  * work items on @wq can queue further work items on it.  @wq is flushed
2955  * repeatedly until it becomes empty.  The number of flushing is determined
2956  * by the depth of chaining and should be relatively short.  Whine if it
2957  * takes too long.
2958  */
2959 void drain_workqueue(struct workqueue_struct *wq)
2960 {
2961     unsigned int flush_cnt = 0;
2962     struct pool_workqueue *pwq;
2963 
2964     /*
2965      * __queue_work() needs to test whether there are drainers, is much
2966      * hotter than drain_workqueue() and already looks at @wq->flags.
2967      * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2968      */
2969     mutex_lock(&wq->mutex);
2970     if (!wq->nr_drainers++)
2971         wq->flags |= __WQ_DRAINING;
2972     mutex_unlock(&wq->mutex);
2973 reflush:
2974     __flush_workqueue(wq);
2975 
2976     mutex_lock(&wq->mutex);
2977 
2978     for_each_pwq(pwq, wq) {
2979         bool drained;
2980 
2981         raw_spin_lock_irq(&pwq->pool->lock);
2982         drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
2983         raw_spin_unlock_irq(&pwq->pool->lock);
2984 
2985         if (drained)
2986             continue;
2987 
2988         if (++flush_cnt == 10 ||
2989             (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2990             pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
2991                 wq->name, __func__, flush_cnt);
2992 
2993         mutex_unlock(&wq->mutex);
2994         goto reflush;
2995     }
2996 
2997     if (!--wq->nr_drainers)
2998         wq->flags &= ~__WQ_DRAINING;
2999     mutex_unlock(&wq->mutex);
3000 }
3001 EXPORT_SYMBOL_GPL(drain_workqueue);
3002 
3003 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3004                  bool from_cancel)
3005 {
3006     struct worker *worker = NULL;
3007     struct worker_pool *pool;
3008     struct pool_workqueue *pwq;
3009 
3010     might_sleep();
3011 
3012     rcu_read_lock();
3013     pool = get_work_pool(work);
3014     if (!pool) {
3015         rcu_read_unlock();
3016         return false;
3017     }
3018 
3019     raw_spin_lock_irq(&pool->lock);
3020     /* see the comment in try_to_grab_pending() with the same code */
3021     pwq = get_work_pwq(work);
3022     if (pwq) {
3023         if (unlikely(pwq->pool != pool))
3024             goto already_gone;
3025     } else {
3026         worker = find_worker_executing_work(pool, work);
3027         if (!worker)
3028             goto already_gone;
3029         pwq = worker->current_pwq;
3030     }
3031 
3032     check_flush_dependency(pwq->wq, work);
3033 
3034     insert_wq_barrier(pwq, barr, work, worker);
3035     raw_spin_unlock_irq(&pool->lock);
3036 
3037     /*
3038      * Force a lock recursion deadlock when using flush_work() inside a
3039      * single-threaded or rescuer equipped workqueue.
3040      *
3041      * For single threaded workqueues the deadlock happens when the work
3042      * is after the work issuing the flush_work(). For rescuer equipped
3043      * workqueues the deadlock happens when the rescuer stalls, blocking
3044      * forward progress.
3045      */
3046     if (!from_cancel &&
3047         (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3048         lock_map_acquire(&pwq->wq->lockdep_map);
3049         lock_map_release(&pwq->wq->lockdep_map);
3050     }
3051     rcu_read_unlock();
3052     return true;
3053 already_gone:
3054     raw_spin_unlock_irq(&pool->lock);
3055     rcu_read_unlock();
3056     return false;
3057 }
3058 
3059 static bool __flush_work(struct work_struct *work, bool from_cancel)
3060 {
3061     struct wq_barrier barr;
3062 
3063     if (WARN_ON(!wq_online))
3064         return false;
3065 
3066     if (WARN_ON(!work->func))
3067         return false;
3068 
3069     lock_map_acquire(&work->lockdep_map);
3070     lock_map_release(&work->lockdep_map);
3071 
3072     if (start_flush_work(work, &barr, from_cancel)) {
3073         wait_for_completion(&barr.done);
3074         destroy_work_on_stack(&barr.work);
3075         return true;
3076     } else {
3077         return false;
3078     }
3079 }
3080 
3081 /**
3082  * flush_work - wait for a work to finish executing the last queueing instance
3083  * @work: the work to flush
3084  *
3085  * Wait until @work has finished execution.  @work is guaranteed to be idle
3086  * on return if it hasn't been requeued since flush started.
3087  *
3088  * Return:
3089  * %true if flush_work() waited for the work to finish execution,
3090  * %false if it was already idle.
3091  */
3092 bool flush_work(struct work_struct *work)
3093 {
3094     return __flush_work(work, false);
3095 }
3096 EXPORT_SYMBOL_GPL(flush_work);
3097 
3098 struct cwt_wait {
3099     wait_queue_entry_t      wait;
3100     struct work_struct  *work;
3101 };
3102 
3103 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3104 {
3105     struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3106 
3107     if (cwait->work != key)
3108         return 0;
3109     return autoremove_wake_function(wait, mode, sync, key);
3110 }
3111 
3112 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3113 {
3114     static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3115     unsigned long flags;
3116     int ret;
3117 
3118     do {
3119         ret = try_to_grab_pending(work, is_dwork, &flags);
3120         /*
3121          * If someone else is already canceling, wait for it to
3122          * finish.  flush_work() doesn't work for PREEMPT_NONE
3123          * because we may get scheduled between @work's completion
3124          * and the other canceling task resuming and clearing
3125          * CANCELING - flush_work() will return false immediately
3126          * as @work is no longer busy, try_to_grab_pending() will
3127          * return -ENOENT as @work is still being canceled and the
3128          * other canceling task won't be able to clear CANCELING as
3129          * we're hogging the CPU.
3130          *
3131          * Let's wait for completion using a waitqueue.  As this
3132          * may lead to the thundering herd problem, use a custom
3133          * wake function which matches @work along with exclusive
3134          * wait and wakeup.
3135          */
3136         if (unlikely(ret == -ENOENT)) {
3137             struct cwt_wait cwait;
3138 
3139             init_wait(&cwait.wait);
3140             cwait.wait.func = cwt_wakefn;
3141             cwait.work = work;
3142 
3143             prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3144                           TASK_UNINTERRUPTIBLE);
3145             if (work_is_canceling(work))
3146                 schedule();
3147             finish_wait(&cancel_waitq, &cwait.wait);
3148         }
3149     } while (unlikely(ret < 0));
3150 
3151     /* tell other tasks trying to grab @work to back off */
3152     mark_work_canceling(work);
3153     local_irq_restore(flags);
3154 
3155     /*
3156      * This allows canceling during early boot.  We know that @work
3157      * isn't executing.
3158      */
3159     if (wq_online)
3160         __flush_work(work, true);
3161 
3162     clear_work_data(work);
3163 
3164     /*
3165      * Paired with prepare_to_wait() above so that either
3166      * waitqueue_active() is visible here or !work_is_canceling() is
3167      * visible there.
3168      */
3169     smp_mb();
3170     if (waitqueue_active(&cancel_waitq))
3171         __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3172 
3173     return ret;
3174 }
3175 
3176 /**
3177  * cancel_work_sync - cancel a work and wait for it to finish
3178  * @work: the work to cancel
3179  *
3180  * Cancel @work and wait for its execution to finish.  This function
3181  * can be used even if the work re-queues itself or migrates to
3182  * another workqueue.  On return from this function, @work is
3183  * guaranteed to be not pending or executing on any CPU.
3184  *
3185  * cancel_work_sync(&delayed_work->work) must not be used for
3186  * delayed_work's.  Use cancel_delayed_work_sync() instead.
3187  *
3188  * The caller must ensure that the workqueue on which @work was last
3189  * queued can't be destroyed before this function returns.
3190  *
3191  * Return:
3192  * %true if @work was pending, %false otherwise.
3193  */
3194 bool cancel_work_sync(struct work_struct *work)
3195 {
3196     return __cancel_work_timer(work, false);
3197 }
3198 EXPORT_SYMBOL_GPL(cancel_work_sync);
3199 
3200 /**
3201  * flush_delayed_work - wait for a dwork to finish executing the last queueing
3202  * @dwork: the delayed work to flush
3203  *
3204  * Delayed timer is cancelled and the pending work is queued for
3205  * immediate execution.  Like flush_work(), this function only
3206  * considers the last queueing instance of @dwork.
3207  *
3208  * Return:
3209  * %true if flush_work() waited for the work to finish execution,
3210  * %false if it was already idle.
3211  */
3212 bool flush_delayed_work(struct delayed_work *dwork)
3213 {
3214     local_irq_disable();
3215     if (del_timer_sync(&dwork->timer))
3216         __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3217     local_irq_enable();
3218     return flush_work(&dwork->work);
3219 }
3220 EXPORT_SYMBOL(flush_delayed_work);
3221 
3222 /**
3223  * flush_rcu_work - wait for a rwork to finish executing the last queueing
3224  * @rwork: the rcu work to flush
3225  *
3226  * Return:
3227  * %true if flush_rcu_work() waited for the work to finish execution,
3228  * %false if it was already idle.
3229  */
3230 bool flush_rcu_work(struct rcu_work *rwork)
3231 {
3232     if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3233         rcu_barrier();
3234         flush_work(&rwork->work);
3235         return true;
3236     } else {
3237         return flush_work(&rwork->work);
3238     }
3239 }
3240 EXPORT_SYMBOL(flush_rcu_work);
3241 
3242 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3243 {
3244     unsigned long flags;
3245     int ret;
3246 
3247     do {
3248         ret = try_to_grab_pending(work, is_dwork, &flags);
3249     } while (unlikely(ret == -EAGAIN));
3250 
3251     if (unlikely(ret < 0))
3252         return false;
3253 
3254     set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3255     local_irq_restore(flags);
3256     return ret;
3257 }
3258 
3259 /*
3260  * See cancel_delayed_work()
3261  */
3262 bool cancel_work(struct work_struct *work)
3263 {
3264     return __cancel_work(work, false);
3265 }
3266 EXPORT_SYMBOL(cancel_work);
3267 
3268 /**
3269  * cancel_delayed_work - cancel a delayed work
3270  * @dwork: delayed_work to cancel
3271  *
3272  * Kill off a pending delayed_work.
3273  *
3274  * Return: %true if @dwork was pending and canceled; %false if it wasn't
3275  * pending.
3276  *
3277  * Note:
3278  * The work callback function may still be running on return, unless
3279  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3280  * use cancel_delayed_work_sync() to wait on it.
3281  *
3282  * This function is safe to call from any context including IRQ handler.
3283  */
3284 bool cancel_delayed_work(struct delayed_work *dwork)
3285 {
3286     return __cancel_work(&dwork->work, true);
3287 }
3288 EXPORT_SYMBOL(cancel_delayed_work);
3289 
3290 /**
3291  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3292  * @dwork: the delayed work cancel
3293  *
3294  * This is cancel_work_sync() for delayed works.
3295  *
3296  * Return:
3297  * %true if @dwork was pending, %false otherwise.
3298  */
3299 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3300 {
3301     return __cancel_work_timer(&dwork->work, true);
3302 }
3303 EXPORT_SYMBOL(cancel_delayed_work_sync);
3304 
3305 /**
3306  * schedule_on_each_cpu - execute a function synchronously on each online CPU
3307  * @func: the function to call
3308  *
3309  * schedule_on_each_cpu() executes @func on each online CPU using the
3310  * system workqueue and blocks until all CPUs have completed.
3311  * schedule_on_each_cpu() is very slow.
3312  *
3313  * Return:
3314  * 0 on success, -errno on failure.
3315  */
3316 int schedule_on_each_cpu(work_func_t func)
3317 {
3318     int cpu;
3319     struct work_struct __percpu *works;
3320 
3321     works = alloc_percpu(struct work_struct);
3322     if (!works)
3323         return -ENOMEM;
3324 
3325     cpus_read_lock();
3326 
3327     for_each_online_cpu(cpu) {
3328         struct work_struct *work = per_cpu_ptr(works, cpu);
3329 
3330         INIT_WORK(work, func);
3331         schedule_work_on(cpu, work);
3332     }
3333 
3334     for_each_online_cpu(cpu)
3335         flush_work(per_cpu_ptr(works, cpu));
3336 
3337     cpus_read_unlock();
3338     free_percpu(works);
3339     return 0;
3340 }
3341 
3342 /**
3343  * execute_in_process_context - reliably execute the routine with user context
3344  * @fn:     the function to execute
3345  * @ew:     guaranteed storage for the execute work structure (must
3346  *      be available when the work executes)
3347  *
3348  * Executes the function immediately if process context is available,
3349  * otherwise schedules the function for delayed execution.
3350  *
3351  * Return:  0 - function was executed
3352  *      1 - function was scheduled for execution
3353  */
3354 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3355 {
3356     if (!in_interrupt()) {
3357         fn(&ew->work);
3358         return 0;
3359     }
3360 
3361     INIT_WORK(&ew->work, fn);
3362     schedule_work(&ew->work);
3363 
3364     return 1;
3365 }
3366 EXPORT_SYMBOL_GPL(execute_in_process_context);
3367 
3368 /**
3369  * free_workqueue_attrs - free a workqueue_attrs
3370  * @attrs: workqueue_attrs to free
3371  *
3372  * Undo alloc_workqueue_attrs().
3373  */
3374 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3375 {
3376     if (attrs) {
3377         free_cpumask_var(attrs->cpumask);
3378         kfree(attrs);
3379     }
3380 }
3381 
3382 /**
3383  * alloc_workqueue_attrs - allocate a workqueue_attrs
3384  *
3385  * Allocate a new workqueue_attrs, initialize with default settings and
3386  * return it.
3387  *
3388  * Return: The allocated new workqueue_attr on success. %NULL on failure.
3389  */
3390 struct workqueue_attrs *alloc_workqueue_attrs(void)
3391 {
3392     struct workqueue_attrs *attrs;
3393 
3394     attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3395     if (!attrs)
3396         goto fail;
3397     if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3398         goto fail;
3399 
3400     cpumask_copy(attrs->cpumask, cpu_possible_mask);
3401     return attrs;
3402 fail:
3403     free_workqueue_attrs(attrs);
3404     return NULL;
3405 }
3406 
3407 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3408                  const struct workqueue_attrs *from)
3409 {
3410     to->nice = from->nice;
3411     cpumask_copy(to->cpumask, from->cpumask);
3412     /*
3413      * Unlike hash and equality test, this function doesn't ignore
3414      * ->no_numa as it is used for both pool and wq attrs.  Instead,
3415      * get_unbound_pool() explicitly clears ->no_numa after copying.
3416      */
3417     to->no_numa = from->no_numa;
3418 }
3419 
3420 /* hash value of the content of @attr */
3421 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3422 {
3423     u32 hash = 0;
3424 
3425     hash = jhash_1word(attrs->nice, hash);
3426     hash = jhash(cpumask_bits(attrs->cpumask),
3427              BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3428     return hash;
3429 }
3430 
3431 /* content equality test */
3432 static bool wqattrs_equal(const struct workqueue_attrs *a,
3433               const struct workqueue_attrs *b)
3434 {
3435     if (a->nice != b->nice)
3436         return false;
3437     if (!cpumask_equal(a->cpumask, b->cpumask))
3438         return false;
3439     return true;
3440 }
3441 
3442 /**
3443  * init_worker_pool - initialize a newly zalloc'd worker_pool
3444  * @pool: worker_pool to initialize
3445  *
3446  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3447  *
3448  * Return: 0 on success, -errno on failure.  Even on failure, all fields
3449  * inside @pool proper are initialized and put_unbound_pool() can be called
3450  * on @pool safely to release it.
3451  */
3452 static int init_worker_pool(struct worker_pool *pool)
3453 {
3454     raw_spin_lock_init(&pool->lock);
3455     pool->id = -1;
3456     pool->cpu = -1;
3457     pool->node = NUMA_NO_NODE;
3458     pool->flags |= POOL_DISASSOCIATED;
3459     pool->watchdog_ts = jiffies;
3460     INIT_LIST_HEAD(&pool->worklist);
3461     INIT_LIST_HEAD(&pool->idle_list);
3462     hash_init(pool->busy_hash);
3463 
3464     timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3465 
3466     timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3467 
3468     INIT_LIST_HEAD(&pool->workers);
3469 
3470     ida_init(&pool->worker_ida);
3471     INIT_HLIST_NODE(&pool->hash_node);
3472     pool->refcnt = 1;
3473 
3474     /* shouldn't fail above this point */
3475     pool->attrs = alloc_workqueue_attrs();
3476     if (!pool->attrs)
3477         return -ENOMEM;
3478     return 0;
3479 }
3480 
3481 #ifdef CONFIG_LOCKDEP
3482 static void wq_init_lockdep(struct workqueue_struct *wq)
3483 {
3484     char *lock_name;
3485 
3486     lockdep_register_key(&wq->key);
3487     lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3488     if (!lock_name)
3489         lock_name = wq->name;
3490 
3491     wq->lock_name = lock_name;
3492     lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3493 }
3494 
3495 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3496 {
3497     lockdep_unregister_key(&wq->key);
3498 }
3499 
3500 static void wq_free_lockdep(struct workqueue_struct *wq)
3501 {
3502     if (wq->lock_name != wq->name)
3503         kfree(wq->lock_name);
3504 }
3505 #else
3506 static void wq_init_lockdep(struct workqueue_struct *wq)
3507 {
3508 }
3509 
3510 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3511 {
3512 }
3513 
3514 static void wq_free_lockdep(struct workqueue_struct *wq)
3515 {
3516 }
3517 #endif
3518 
3519 static void rcu_free_wq(struct rcu_head *rcu)
3520 {
3521     struct workqueue_struct *wq =
3522         container_of(rcu, struct workqueue_struct, rcu);
3523 
3524     wq_free_lockdep(wq);
3525 
3526     if (!(wq->flags & WQ_UNBOUND))
3527         free_percpu(wq->cpu_pwqs);
3528     else
3529         free_workqueue_attrs(wq->unbound_attrs);
3530 
3531     kfree(wq);
3532 }
3533 
3534 static void rcu_free_pool(struct rcu_head *rcu)
3535 {
3536     struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3537 
3538     ida_destroy(&pool->worker_ida);
3539     free_workqueue_attrs(pool->attrs);
3540     kfree(pool);
3541 }
3542 
3543 /* This returns with the lock held on success (pool manager is inactive). */
3544 static bool wq_manager_inactive(struct worker_pool *pool)
3545 {
3546     raw_spin_lock_irq(&pool->lock);
3547 
3548     if (pool->flags & POOL_MANAGER_ACTIVE) {
3549         raw_spin_unlock_irq(&pool->lock);
3550         return false;
3551     }
3552     return true;
3553 }
3554 
3555 /**
3556  * put_unbound_pool - put a worker_pool
3557  * @pool: worker_pool to put
3558  *
3559  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
3560  * safe manner.  get_unbound_pool() calls this function on its failure path
3561  * and this function should be able to release pools which went through,
3562  * successfully or not, init_worker_pool().
3563  *
3564  * Should be called with wq_pool_mutex held.
3565  */
3566 static void put_unbound_pool(struct worker_pool *pool)
3567 {
3568     DECLARE_COMPLETION_ONSTACK(detach_completion);
3569     struct worker *worker;
3570 
3571     lockdep_assert_held(&wq_pool_mutex);
3572 
3573     if (--pool->refcnt)
3574         return;
3575 
3576     /* sanity checks */
3577     if (WARN_ON(!(pool->cpu < 0)) ||
3578         WARN_ON(!list_empty(&pool->worklist)))
3579         return;
3580 
3581     /* release id and unhash */
3582     if (pool->id >= 0)
3583         idr_remove(&worker_pool_idr, pool->id);
3584     hash_del(&pool->hash_node);
3585 
3586     /*
3587      * Become the manager and destroy all workers.  This prevents
3588      * @pool's workers from blocking on attach_mutex.  We're the last
3589      * manager and @pool gets freed with the flag set.
3590      * Because of how wq_manager_inactive() works, we will hold the
3591      * spinlock after a successful wait.
3592      */
3593     rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
3594                TASK_UNINTERRUPTIBLE);
3595     pool->flags |= POOL_MANAGER_ACTIVE;
3596 
3597     while ((worker = first_idle_worker(pool)))
3598         destroy_worker(worker);
3599     WARN_ON(pool->nr_workers || pool->nr_idle);
3600     raw_spin_unlock_irq(&pool->lock);
3601 
3602     mutex_lock(&wq_pool_attach_mutex);
3603     if (!list_empty(&pool->workers))
3604         pool->detach_completion = &detach_completion;
3605     mutex_unlock(&wq_pool_attach_mutex);
3606 
3607     if (pool->detach_completion)
3608         wait_for_completion(pool->detach_completion);
3609 
3610     /* shut down the timers */
3611     del_timer_sync(&pool->idle_timer);
3612     del_timer_sync(&pool->mayday_timer);
3613 
3614     /* RCU protected to allow dereferences from get_work_pool() */
3615     call_rcu(&pool->rcu, rcu_free_pool);
3616 }
3617 
3618 /**
3619  * get_unbound_pool - get a worker_pool with the specified attributes
3620  * @attrs: the attributes of the worker_pool to get
3621  *
3622  * Obtain a worker_pool which has the same attributes as @attrs, bump the
3623  * reference count and return it.  If there already is a matching
3624  * worker_pool, it will be used; otherwise, this function attempts to
3625  * create a new one.
3626  *
3627  * Should be called with wq_pool_mutex held.
3628  *
3629  * Return: On success, a worker_pool with the same attributes as @attrs.
3630  * On failure, %NULL.
3631  */
3632 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3633 {
3634     u32 hash = wqattrs_hash(attrs);
3635     struct worker_pool *pool;
3636     int node;
3637     int target_node = NUMA_NO_NODE;
3638 
3639     lockdep_assert_held(&wq_pool_mutex);
3640 
3641     /* do we already have a matching pool? */
3642     hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3643         if (wqattrs_equal(pool->attrs, attrs)) {
3644             pool->refcnt++;
3645             return pool;
3646         }
3647     }
3648 
3649     /* if cpumask is contained inside a NUMA node, we belong to that node */
3650     if (wq_numa_enabled) {
3651         for_each_node(node) {
3652             if (cpumask_subset(attrs->cpumask,
3653                        wq_numa_possible_cpumask[node])) {
3654                 target_node = node;
3655                 break;
3656             }
3657         }
3658     }
3659 
3660     /* nope, create a new one */
3661     pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3662     if (!pool || init_worker_pool(pool) < 0)
3663         goto fail;
3664 
3665     lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
3666     copy_workqueue_attrs(pool->attrs, attrs);
3667     pool->node = target_node;
3668 
3669     /*
3670      * no_numa isn't a worker_pool attribute, always clear it.  See
3671      * 'struct workqueue_attrs' comments for detail.
3672      */
3673     pool->attrs->no_numa = false;
3674 
3675     if (worker_pool_assign_id(pool) < 0)
3676         goto fail;
3677 
3678     /* create and start the initial worker */
3679     if (wq_online && !create_worker(pool))
3680         goto fail;
3681 
3682     /* install */
3683     hash_add(unbound_pool_hash, &pool->hash_node, hash);
3684 
3685     return pool;
3686 fail:
3687     if (pool)
3688         put_unbound_pool(pool);
3689     return NULL;
3690 }
3691 
3692 static void rcu_free_pwq(struct rcu_head *rcu)
3693 {
3694     kmem_cache_free(pwq_cache,
3695             container_of(rcu, struct pool_workqueue, rcu));
3696 }
3697 
3698 /*
3699  * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3700  * and needs to be destroyed.
3701  */
3702 static void pwq_unbound_release_workfn(struct work_struct *work)
3703 {
3704     struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3705                           unbound_release_work);
3706     struct workqueue_struct *wq = pwq->wq;
3707     struct worker_pool *pool = pwq->pool;
3708     bool is_last = false;
3709 
3710     /*
3711      * when @pwq is not linked, it doesn't hold any reference to the
3712      * @wq, and @wq is invalid to access.
3713      */
3714     if (!list_empty(&pwq->pwqs_node)) {
3715         if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3716             return;
3717 
3718         mutex_lock(&wq->mutex);
3719         list_del_rcu(&pwq->pwqs_node);
3720         is_last = list_empty(&wq->pwqs);
3721         mutex_unlock(&wq->mutex);
3722     }
3723 
3724     mutex_lock(&wq_pool_mutex);
3725     put_unbound_pool(pool);
3726     mutex_unlock(&wq_pool_mutex);
3727 
3728     call_rcu(&pwq->rcu, rcu_free_pwq);
3729 
3730     /*
3731      * If we're the last pwq going away, @wq is already dead and no one
3732      * is gonna access it anymore.  Schedule RCU free.
3733      */
3734     if (is_last) {
3735         wq_unregister_lockdep(wq);
3736         call_rcu(&wq->rcu, rcu_free_wq);
3737     }
3738 }
3739 
3740 /**
3741  * pwq_adjust_max_active - update a pwq's max_active to the current setting
3742  * @pwq: target pool_workqueue
3743  *
3744  * If @pwq isn't freezing, set @pwq->max_active to the associated
3745  * workqueue's saved_max_active and activate inactive work items
3746  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3747  */
3748 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3749 {
3750     struct workqueue_struct *wq = pwq->wq;
3751     bool freezable = wq->flags & WQ_FREEZABLE;
3752     unsigned long flags;
3753 
3754     /* for @wq->saved_max_active */
3755     lockdep_assert_held(&wq->mutex);
3756 
3757     /* fast exit for non-freezable wqs */
3758     if (!freezable && pwq->max_active == wq->saved_max_active)
3759         return;
3760 
3761     /* this function can be called during early boot w/ irq disabled */
3762     raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3763 
3764     /*
3765      * During [un]freezing, the caller is responsible for ensuring that
3766      * this function is called at least once after @workqueue_freezing
3767      * is updated and visible.
3768      */
3769     if (!freezable || !workqueue_freezing) {
3770         bool kick = false;
3771 
3772         pwq->max_active = wq->saved_max_active;
3773 
3774         while (!list_empty(&pwq->inactive_works) &&
3775                pwq->nr_active < pwq->max_active) {
3776             pwq_activate_first_inactive(pwq);
3777             kick = true;
3778         }
3779 
3780         /*
3781          * Need to kick a worker after thawed or an unbound wq's
3782          * max_active is bumped. In realtime scenarios, always kicking a
3783          * worker will cause interference on the isolated cpu cores, so
3784          * let's kick iff work items were activated.
3785          */
3786         if (kick)
3787             wake_up_worker(pwq->pool);
3788     } else {
3789         pwq->max_active = 0;
3790     }
3791 
3792     raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
3793 }
3794 
3795 /* initialize newly allocated @pwq which is associated with @wq and @pool */
3796 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3797              struct worker_pool *pool)
3798 {
3799     BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3800 
3801     memset(pwq, 0, sizeof(*pwq));
3802 
3803     pwq->pool = pool;
3804     pwq->wq = wq;
3805     pwq->flush_color = -1;
3806     pwq->refcnt = 1;
3807     INIT_LIST_HEAD(&pwq->inactive_works);
3808     INIT_LIST_HEAD(&pwq->pwqs_node);
3809     INIT_LIST_HEAD(&pwq->mayday_node);
3810     INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3811 }
3812 
3813 /* sync @pwq with the current state of its associated wq and link it */
3814 static void link_pwq(struct pool_workqueue *pwq)
3815 {
3816     struct workqueue_struct *wq = pwq->wq;
3817 
3818     lockdep_assert_held(&wq->mutex);
3819 
3820     /* may be called multiple times, ignore if already linked */
3821     if (!list_empty(&pwq->pwqs_node))
3822         return;
3823 
3824     /* set the matching work_color */
3825     pwq->work_color = wq->work_color;
3826 
3827     /* sync max_active to the current setting */
3828     pwq_adjust_max_active(pwq);
3829 
3830     /* link in @pwq */
3831     list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3832 }
3833 
3834 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3835 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3836                     const struct workqueue_attrs *attrs)
3837 {
3838     struct worker_pool *pool;
3839     struct pool_workqueue *pwq;
3840 
3841     lockdep_assert_held(&wq_pool_mutex);
3842 
3843     pool = get_unbound_pool(attrs);
3844     if (!pool)
3845         return NULL;
3846 
3847     pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3848     if (!pwq) {
3849         put_unbound_pool(pool);
3850         return NULL;
3851     }
3852 
3853     init_pwq(pwq, wq, pool);
3854     return pwq;
3855 }
3856 
3857 /**
3858  * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3859  * @attrs: the wq_attrs of the default pwq of the target workqueue
3860  * @node: the target NUMA node
3861  * @cpu_going_down: if >= 0, the CPU to consider as offline
3862  * @cpumask: outarg, the resulting cpumask
3863  *
3864  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3865  * @cpu_going_down is >= 0, that cpu is considered offline during
3866  * calculation.  The result is stored in @cpumask.
3867  *
3868  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3869  * enabled and @node has online CPUs requested by @attrs, the returned
3870  * cpumask is the intersection of the possible CPUs of @node and
3871  * @attrs->cpumask.
3872  *
3873  * The caller is responsible for ensuring that the cpumask of @node stays
3874  * stable.
3875  *
3876  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3877  * %false if equal.
3878  */
3879 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3880                  int cpu_going_down, cpumask_t *cpumask)
3881 {
3882     if (!wq_numa_enabled || attrs->no_numa)
3883         goto use_dfl;
3884 
3885     /* does @node have any online CPUs @attrs wants? */
3886     cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3887     if (cpu_going_down >= 0)
3888         cpumask_clear_cpu(cpu_going_down, cpumask);
3889 
3890     if (cpumask_empty(cpumask))
3891         goto use_dfl;
3892 
3893     /* yeap, return possible CPUs in @node that @attrs wants */
3894     cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3895 
3896     if (cpumask_empty(cpumask)) {
3897         pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3898                 "possible intersect\n");
3899         return false;
3900     }
3901 
3902     return !cpumask_equal(cpumask, attrs->cpumask);
3903 
3904 use_dfl:
3905     cpumask_copy(cpumask, attrs->cpumask);
3906     return false;
3907 }
3908 
3909 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3910 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3911                            int node,
3912                            struct pool_workqueue *pwq)
3913 {
3914     struct pool_workqueue *old_pwq;
3915 
3916     lockdep_assert_held(&wq_pool_mutex);
3917     lockdep_assert_held(&wq->mutex);
3918 
3919     /* link_pwq() can handle duplicate calls */
3920     link_pwq(pwq);
3921 
3922     old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3923     rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3924     return old_pwq;
3925 }
3926 
3927 /* context to store the prepared attrs & pwqs before applying */
3928 struct apply_wqattrs_ctx {
3929     struct workqueue_struct *wq;        /* target workqueue */
3930     struct workqueue_attrs  *attrs;     /* attrs to apply */
3931     struct list_head    list;       /* queued for batching commit */
3932     struct pool_workqueue   *dfl_pwq;
3933     struct pool_workqueue   *pwq_tbl[];
3934 };
3935 
3936 /* free the resources after success or abort */
3937 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3938 {
3939     if (ctx) {
3940         int node;
3941 
3942         for_each_node(node)
3943             put_pwq_unlocked(ctx->pwq_tbl[node]);
3944         put_pwq_unlocked(ctx->dfl_pwq);
3945 
3946         free_workqueue_attrs(ctx->attrs);
3947 
3948         kfree(ctx);
3949     }
3950 }
3951 
3952 /* allocate the attrs and pwqs for later installation */
3953 static struct apply_wqattrs_ctx *
3954 apply_wqattrs_prepare(struct workqueue_struct *wq,
3955               const struct workqueue_attrs *attrs)
3956 {
3957     struct apply_wqattrs_ctx *ctx;
3958     struct workqueue_attrs *new_attrs, *tmp_attrs;
3959     int node;
3960 
3961     lockdep_assert_held(&wq_pool_mutex);
3962 
3963     ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3964 
3965     new_attrs = alloc_workqueue_attrs();
3966     tmp_attrs = alloc_workqueue_attrs();
3967     if (!ctx || !new_attrs || !tmp_attrs)
3968         goto out_free;
3969 
3970     /*
3971      * Calculate the attrs of the default pwq.
3972      * If the user configured cpumask doesn't overlap with the
3973      * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3974      */
3975     copy_workqueue_attrs(new_attrs, attrs);
3976     cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3977     if (unlikely(cpumask_empty(new_attrs->cpumask)))
3978         cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3979 
3980     /*
3981      * We may create multiple pwqs with differing cpumasks.  Make a
3982      * copy of @new_attrs which will be modified and used to obtain
3983      * pools.
3984      */
3985     copy_workqueue_attrs(tmp_attrs, new_attrs);
3986 
3987     /*
3988      * If something goes wrong during CPU up/down, we'll fall back to
3989      * the default pwq covering whole @attrs->cpumask.  Always create
3990      * it even if we don't use it immediately.
3991      */
3992     ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3993     if (!ctx->dfl_pwq)
3994         goto out_free;
3995 
3996     for_each_node(node) {
3997         if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3998             ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3999             if (!ctx->pwq_tbl[node])
4000                 goto out_free;
4001         } else {
4002             ctx->dfl_pwq->refcnt++;
4003             ctx->pwq_tbl[node] = ctx->dfl_pwq;
4004         }
4005     }
4006 
4007     /* save the user configured attrs and sanitize it. */
4008     copy_workqueue_attrs(new_attrs, attrs);
4009     cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
4010     ctx->attrs = new_attrs;
4011 
4012     ctx->wq = wq;
4013     free_workqueue_attrs(tmp_attrs);
4014     return ctx;
4015 
4016 out_free:
4017     free_workqueue_attrs(tmp_attrs);
4018     free_workqueue_attrs(new_attrs);
4019     apply_wqattrs_cleanup(ctx);
4020     return NULL;
4021 }
4022 
4023 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
4024 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4025 {
4026     int node;
4027 
4028     /* all pwqs have been created successfully, let's install'em */
4029     mutex_lock(&ctx->wq->mutex);
4030 
4031     copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4032 
4033     /* save the previous pwq and install the new one */
4034     for_each_node(node)
4035         ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
4036                               ctx->pwq_tbl[node]);
4037 
4038     /* @dfl_pwq might not have been used, ensure it's linked */
4039     link_pwq(ctx->dfl_pwq);
4040     swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
4041 
4042     mutex_unlock(&ctx->wq->mutex);
4043 }
4044 
4045 static void apply_wqattrs_lock(void)
4046 {
4047     /* CPUs should stay stable across pwq creations and installations */
4048     cpus_read_lock();
4049     mutex_lock(&wq_pool_mutex);
4050 }
4051 
4052 static void apply_wqattrs_unlock(void)
4053 {
4054     mutex_unlock(&wq_pool_mutex);
4055     cpus_read_unlock();
4056 }
4057 
4058 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4059                     const struct workqueue_attrs *attrs)
4060 {
4061     struct apply_wqattrs_ctx *ctx;
4062 
4063     /* only unbound workqueues can change attributes */
4064     if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4065         return -EINVAL;
4066 
4067     /* creating multiple pwqs breaks ordering guarantee */
4068     if (!list_empty(&wq->pwqs)) {
4069         if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4070             return -EINVAL;
4071 
4072         wq->flags &= ~__WQ_ORDERED;
4073     }
4074 
4075     ctx = apply_wqattrs_prepare(wq, attrs);
4076     if (!ctx)
4077         return -ENOMEM;
4078 
4079     /* the ctx has been prepared successfully, let's commit it */
4080     apply_wqattrs_commit(ctx);
4081     apply_wqattrs_cleanup(ctx);
4082 
4083     return 0;
4084 }
4085 
4086 /**
4087  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4088  * @wq: the target workqueue
4089  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4090  *
4091  * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
4092  * machines, this function maps a separate pwq to each NUMA node with
4093  * possibles CPUs in @attrs->cpumask so that work items are affine to the
4094  * NUMA node it was issued on.  Older pwqs are released as in-flight work
4095  * items finish.  Note that a work item which repeatedly requeues itself
4096  * back-to-back will stay on its current pwq.
4097  *
4098  * Performs GFP_KERNEL allocations.
4099  *
4100  * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
4101  *
4102  * Return: 0 on success and -errno on failure.
4103  */
4104 int apply_workqueue_attrs(struct workqueue_struct *wq,
4105               const struct workqueue_attrs *attrs)
4106 {
4107     int ret;
4108 
4109     lockdep_assert_cpus_held();
4110 
4111     mutex_lock(&wq_pool_mutex);
4112     ret = apply_workqueue_attrs_locked(wq, attrs);
4113     mutex_unlock(&wq_pool_mutex);
4114 
4115     return ret;
4116 }
4117 
4118 /**
4119  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4120  * @wq: the target workqueue
4121  * @cpu: the CPU coming up or going down
4122  * @online: whether @cpu is coming up or going down
4123  *
4124  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4125  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
4126  * @wq accordingly.
4127  *
4128  * If NUMA affinity can't be adjusted due to memory allocation failure, it
4129  * falls back to @wq->dfl_pwq which may not be optimal but is always
4130  * correct.
4131  *
4132  * Note that when the last allowed CPU of a NUMA node goes offline for a
4133  * workqueue with a cpumask spanning multiple nodes, the workers which were
4134  * already executing the work items for the workqueue will lose their CPU
4135  * affinity and may execute on any CPU.  This is similar to how per-cpu
4136  * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
4137  * affinity, it's the user's responsibility to flush the work item from
4138  * CPU_DOWN_PREPARE.
4139  */
4140 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4141                    bool online)
4142 {
4143     int node = cpu_to_node(cpu);
4144     int cpu_off = online ? -1 : cpu;
4145     struct pool_workqueue *old_pwq = NULL, *pwq;
4146     struct workqueue_attrs *target_attrs;
4147     cpumask_t *cpumask;
4148 
4149     lockdep_assert_held(&wq_pool_mutex);
4150 
4151     if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4152         wq->unbound_attrs->no_numa)
4153         return;
4154 
4155     /*
4156      * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4157      * Let's use a preallocated one.  The following buf is protected by
4158      * CPU hotplug exclusion.
4159      */
4160     target_attrs = wq_update_unbound_numa_attrs_buf;
4161     cpumask = target_attrs->cpumask;
4162 
4163     copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4164     pwq = unbound_pwq_by_node(wq, node);
4165 
4166     /*
4167      * Let's determine what needs to be done.  If the target cpumask is
4168      * different from the default pwq's, we need to compare it to @pwq's
4169      * and create a new one if they don't match.  If the target cpumask
4170      * equals the default pwq's, the default pwq should be used.
4171      */
4172     if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4173         if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4174             return;
4175     } else {
4176         goto use_dfl_pwq;
4177     }
4178 
4179     /* create a new pwq */
4180     pwq = alloc_unbound_pwq(wq, target_attrs);
4181     if (!pwq) {
4182         pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4183             wq->name);
4184         goto use_dfl_pwq;
4185     }
4186 
4187     /* Install the new pwq. */
4188     mutex_lock(&wq->mutex);
4189     old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4190     goto out_unlock;
4191 
4192 use_dfl_pwq:
4193     mutex_lock(&wq->mutex);
4194     raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4195     get_pwq(wq->dfl_pwq);
4196     raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4197     old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4198 out_unlock:
4199     mutex_unlock(&wq->mutex);
4200     put_pwq_unlocked(old_pwq);
4201 }
4202 
4203 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4204 {
4205     bool highpri = wq->flags & WQ_HIGHPRI;
4206     int cpu, ret;
4207 
4208     if (!(wq->flags & WQ_UNBOUND)) {
4209         wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4210         if (!wq->cpu_pwqs)
4211             return -ENOMEM;
4212 
4213         for_each_possible_cpu(cpu) {
4214             struct pool_workqueue *pwq =
4215                 per_cpu_ptr(wq->cpu_pwqs, cpu);
4216             struct worker_pool *cpu_pools =
4217                 per_cpu(cpu_worker_pools, cpu);
4218 
4219             init_pwq(pwq, wq, &cpu_pools[highpri]);
4220 
4221             mutex_lock(&wq->mutex);
4222             link_pwq(pwq);
4223             mutex_unlock(&wq->mutex);
4224         }
4225         return 0;
4226     }
4227 
4228     cpus_read_lock();
4229     if (wq->flags & __WQ_ORDERED) {
4230         ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4231         /* there should only be single pwq for ordering guarantee */
4232         WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4233                   wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4234              "ordering guarantee broken for workqueue %s\n", wq->name);
4235     } else {
4236         ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4237     }
4238     cpus_read_unlock();
4239 
4240     return ret;
4241 }
4242 
4243 static int wq_clamp_max_active(int max_active, unsigned int flags,
4244                    const char *name)
4245 {
4246     int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4247 
4248     if (max_active < 1 || max_active > lim)
4249         pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4250             max_active, name, 1, lim);
4251 
4252     return clamp_val(max_active, 1, lim);
4253 }
4254 
4255 /*
4256  * Workqueues which may be used during memory reclaim should have a rescuer
4257  * to guarantee forward progress.
4258  */
4259 static int init_rescuer(struct workqueue_struct *wq)
4260 {
4261     struct worker *rescuer;
4262     int ret;
4263 
4264     if (!(wq->flags & WQ_MEM_RECLAIM))
4265         return 0;
4266 
4267     rescuer = alloc_worker(NUMA_NO_NODE);
4268     if (!rescuer)
4269         return -ENOMEM;
4270 
4271     rescuer->rescue_wq = wq;
4272     rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4273     if (IS_ERR(rescuer->task)) {
4274         ret = PTR_ERR(rescuer->task);
4275         kfree(rescuer);
4276         return ret;
4277     }
4278 
4279     wq->rescuer = rescuer;
4280     kthread_bind_mask(rescuer->task, cpu_possible_mask);
4281     wake_up_process(rescuer->task);
4282 
4283     return 0;
4284 }
4285 
4286 __printf(1, 4)
4287 struct workqueue_struct *alloc_workqueue(const char *fmt,
4288                      unsigned int flags,
4289                      int max_active, ...)
4290 {
4291     size_t tbl_size = 0;
4292     va_list args;
4293     struct workqueue_struct *wq;
4294     struct pool_workqueue *pwq;
4295 
4296     /*
4297      * Unbound && max_active == 1 used to imply ordered, which is no
4298      * longer the case on NUMA machines due to per-node pools.  While
4299      * alloc_ordered_workqueue() is the right way to create an ordered
4300      * workqueue, keep the previous behavior to avoid subtle breakages
4301      * on NUMA.
4302      */
4303     if ((flags & WQ_UNBOUND) && max_active == 1)
4304         flags |= __WQ_ORDERED;
4305 
4306     /* see the comment above the definition of WQ_POWER_EFFICIENT */
4307     if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4308         flags |= WQ_UNBOUND;
4309 
4310     /* allocate wq and format name */
4311     if (flags & WQ_UNBOUND)
4312         tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4313 
4314     wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4315     if (!wq)
4316         return NULL;
4317 
4318     if (flags & WQ_UNBOUND) {
4319         wq->unbound_attrs = alloc_workqueue_attrs();
4320         if (!wq->unbound_attrs)
4321             goto err_free_wq;
4322     }
4323 
4324     va_start(args, max_active);
4325     vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4326     va_end(args);
4327 
4328     max_active = max_active ?: WQ_DFL_ACTIVE;
4329     max_active = wq_clamp_max_active(max_active, flags, wq->name);
4330 
4331     /* init wq */
4332     wq->flags = flags;
4333     wq->saved_max_active = max_active;
4334     mutex_init(&wq->mutex);
4335     atomic_set(&wq->nr_pwqs_to_flush, 0);
4336     INIT_LIST_HEAD(&wq->pwqs);
4337     INIT_LIST_HEAD(&wq->flusher_queue);
4338     INIT_LIST_HEAD(&wq->flusher_overflow);
4339     INIT_LIST_HEAD(&wq->maydays);
4340 
4341     wq_init_lockdep(wq);
4342     INIT_LIST_HEAD(&wq->list);
4343 
4344     if (alloc_and_link_pwqs(wq) < 0)
4345         goto err_unreg_lockdep;
4346 
4347     if (wq_online && init_rescuer(wq) < 0)
4348         goto err_destroy;
4349 
4350     if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4351         goto err_destroy;
4352 
4353     /*
4354      * wq_pool_mutex protects global freeze state and workqueues list.
4355      * Grab it, adjust max_active and add the new @wq to workqueues
4356      * list.
4357      */
4358     mutex_lock(&wq_pool_mutex);
4359 
4360     mutex_lock(&wq->mutex);
4361     for_each_pwq(pwq, wq)
4362         pwq_adjust_max_active(pwq);
4363     mutex_unlock(&wq->mutex);
4364 
4365     list_add_tail_rcu(&wq->list, &workqueues);
4366 
4367     mutex_unlock(&wq_pool_mutex);
4368 
4369     return wq;
4370 
4371 err_unreg_lockdep:
4372     wq_unregister_lockdep(wq);
4373     wq_free_lockdep(wq);
4374 err_free_wq:
4375     free_workqueue_attrs(wq->unbound_attrs);
4376     kfree(wq);
4377     return NULL;
4378 err_destroy:
4379     destroy_workqueue(wq);
4380     return NULL;
4381 }
4382 EXPORT_SYMBOL_GPL(alloc_workqueue);
4383 
4384 static bool pwq_busy(struct pool_workqueue *pwq)
4385 {
4386     int i;
4387 
4388     for (i = 0; i < WORK_NR_COLORS; i++)
4389         if (pwq->nr_in_flight[i])
4390             return true;
4391 
4392     if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4393         return true;
4394     if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4395         return true;
4396 
4397     return false;
4398 }
4399 
4400 /**
4401  * destroy_workqueue - safely terminate a workqueue
4402  * @wq: target workqueue
4403  *
4404  * Safely destroy a workqueue. All work currently pending will be done first.
4405  */
4406 void destroy_workqueue(struct workqueue_struct *wq)
4407 {
4408     struct pool_workqueue *pwq;
4409     int node;
4410 
4411     /*
4412      * Remove it from sysfs first so that sanity check failure doesn't
4413      * lead to sysfs name conflicts.
4414      */
4415     workqueue_sysfs_unregister(wq);
4416 
4417     /* drain it before proceeding with destruction */
4418     drain_workqueue(wq);
4419 
4420     /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4421     if (wq->rescuer) {
4422         struct worker *rescuer = wq->rescuer;
4423 
4424         /* this prevents new queueing */
4425         raw_spin_lock_irq(&wq_mayday_lock);
4426         wq->rescuer = NULL;
4427         raw_spin_unlock_irq(&wq_mayday_lock);
4428 
4429         /* rescuer will empty maydays list before exiting */
4430         kthread_stop(rescuer->task);
4431         kfree(rescuer);
4432     }
4433 
4434     /*
4435      * Sanity checks - grab all the locks so that we wait for all
4436      * in-flight operations which may do put_pwq().
4437      */
4438     mutex_lock(&wq_pool_mutex);
4439     mutex_lock(&wq->mutex);
4440     for_each_pwq(pwq, wq) {
4441         raw_spin_lock_irq(&pwq->pool->lock);
4442         if (WARN_ON(pwq_busy(pwq))) {
4443             pr_warn("%s: %s has the following busy pwq\n",
4444                 __func__, wq->name);
4445             show_pwq(pwq);
4446             raw_spin_unlock_irq(&pwq->pool->lock);
4447             mutex_unlock(&wq->mutex);
4448             mutex_unlock(&wq_pool_mutex);
4449             show_one_workqueue(wq);
4450             return;
4451         }
4452         raw_spin_unlock_irq(&pwq->pool->lock);
4453     }
4454     mutex_unlock(&wq->mutex);
4455 
4456     /*
4457      * wq list is used to freeze wq, remove from list after
4458      * flushing is complete in case freeze races us.
4459      */
4460     list_del_rcu(&wq->list);
4461     mutex_unlock(&wq_pool_mutex);
4462 
4463     if (!(wq->flags & WQ_UNBOUND)) {
4464         wq_unregister_lockdep(wq);
4465         /*
4466          * The base ref is never dropped on per-cpu pwqs.  Directly
4467          * schedule RCU free.
4468          */
4469         call_rcu(&wq->rcu, rcu_free_wq);
4470     } else {
4471         /*
4472          * We're the sole accessor of @wq at this point.  Directly
4473          * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4474          * @wq will be freed when the last pwq is released.
4475          */
4476         for_each_node(node) {
4477             pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4478             RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4479             put_pwq_unlocked(pwq);
4480         }
4481 
4482         /*
4483          * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4484          * put.  Don't access it afterwards.
4485          */
4486         pwq = wq->dfl_pwq;
4487         wq->dfl_pwq = NULL;
4488         put_pwq_unlocked(pwq);
4489     }
4490 }
4491 EXPORT_SYMBOL_GPL(destroy_workqueue);
4492 
4493 /**
4494  * workqueue_set_max_active - adjust max_active of a workqueue
4495  * @wq: target workqueue
4496  * @max_active: new max_active value.
4497  *
4498  * Set max_active of @wq to @max_active.
4499  *
4500  * CONTEXT:
4501  * Don't call from IRQ context.
4502  */
4503 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4504 {
4505     struct pool_workqueue *pwq;
4506 
4507     /* disallow meddling with max_active for ordered workqueues */
4508     if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4509         return;
4510 
4511     max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4512 
4513     mutex_lock(&wq->mutex);
4514 
4515     wq->flags &= ~__WQ_ORDERED;
4516     wq->saved_max_active = max_active;
4517 
4518     for_each_pwq(pwq, wq)
4519         pwq_adjust_max_active(pwq);
4520 
4521     mutex_unlock(&wq->mutex);
4522 }
4523 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4524 
4525 /**
4526  * current_work - retrieve %current task's work struct
4527  *
4528  * Determine if %current task is a workqueue worker and what it's working on.
4529  * Useful to find out the context that the %current task is running in.
4530  *
4531  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4532  */
4533 struct work_struct *current_work(void)
4534 {
4535     struct worker *worker = current_wq_worker();
4536 
4537     return worker ? worker->current_work : NULL;
4538 }
4539 EXPORT_SYMBOL(current_work);
4540 
4541 /**
4542  * current_is_workqueue_rescuer - is %current workqueue rescuer?
4543  *
4544  * Determine whether %current is a workqueue rescuer.  Can be used from
4545  * work functions to determine whether it's being run off the rescuer task.
4546  *
4547  * Return: %true if %current is a workqueue rescuer. %false otherwise.
4548  */
4549 bool current_is_workqueue_rescuer(void)
4550 {
4551     struct worker *worker = current_wq_worker();
4552 
4553     return worker && worker->rescue_wq;
4554 }
4555 
4556 /**
4557  * workqueue_congested - test whether a workqueue is congested
4558  * @cpu: CPU in question
4559  * @wq: target workqueue
4560  *
4561  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4562  * no synchronization around this function and the test result is
4563  * unreliable and only useful as advisory hints or for debugging.
4564  *
4565  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4566  * Note that both per-cpu and unbound workqueues may be associated with
4567  * multiple pool_workqueues which have separate congested states.  A
4568  * workqueue being congested on one CPU doesn't mean the workqueue is also
4569  * contested on other CPUs / NUMA nodes.
4570  *
4571  * Return:
4572  * %true if congested, %false otherwise.
4573  */
4574 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4575 {
4576     struct pool_workqueue *pwq;
4577     bool ret;
4578 
4579     rcu_read_lock();
4580     preempt_disable();
4581 
4582     if (cpu == WORK_CPU_UNBOUND)
4583         cpu = smp_processor_id();
4584 
4585     if (!(wq->flags & WQ_UNBOUND))
4586         pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4587     else
4588         pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4589 
4590     ret = !list_empty(&pwq->inactive_works);
4591     preempt_enable();
4592     rcu_read_unlock();
4593 
4594     return ret;
4595 }
4596 EXPORT_SYMBOL_GPL(workqueue_congested);
4597 
4598 /**
4599  * work_busy - test whether a work is currently pending or running
4600  * @work: the work to be tested
4601  *
4602  * Test whether @work is currently pending or running.  There is no
4603  * synchronization around this function and the test result is
4604  * unreliable and only useful as advisory hints or for debugging.
4605  *
4606  * Return:
4607  * OR'd bitmask of WORK_BUSY_* bits.
4608  */
4609 unsigned int work_busy(struct work_struct *work)
4610 {
4611     struct worker_pool *pool;
4612     unsigned long flags;
4613     unsigned int ret = 0;
4614 
4615     if (work_pending(work))
4616         ret |= WORK_BUSY_PENDING;
4617 
4618     rcu_read_lock();
4619     pool = get_work_pool(work);
4620     if (pool) {
4621         raw_spin_lock_irqsave(&pool->lock, flags);
4622         if (find_worker_executing_work(pool, work))
4623             ret |= WORK_BUSY_RUNNING;
4624         raw_spin_unlock_irqrestore(&pool->lock, flags);
4625     }
4626     rcu_read_unlock();
4627 
4628     return ret;
4629 }
4630 EXPORT_SYMBOL_GPL(work_busy);
4631 
4632 /**
4633  * set_worker_desc - set description for the current work item
4634  * @fmt: printf-style format string
4635  * @...: arguments for the format string
4636  *
4637  * This function can be called by a running work function to describe what
4638  * the work item is about.  If the worker task gets dumped, this
4639  * information will be printed out together to help debugging.  The
4640  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4641  */
4642 void set_worker_desc(const char *fmt, ...)
4643 {
4644     struct worker *worker = current_wq_worker();
4645     va_list args;
4646 
4647     if (worker) {
4648         va_start(args, fmt);
4649         vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4650         va_end(args);
4651     }
4652 }
4653 EXPORT_SYMBOL_GPL(set_worker_desc);
4654 
4655 /**
4656  * print_worker_info - print out worker information and description
4657  * @log_lvl: the log level to use when printing
4658  * @task: target task
4659  *
4660  * If @task is a worker and currently executing a work item, print out the
4661  * name of the workqueue being serviced and worker description set with
4662  * set_worker_desc() by the currently executing work item.
4663  *
4664  * This function can be safely called on any task as long as the
4665  * task_struct itself is accessible.  While safe, this function isn't
4666  * synchronized and may print out mixups or garbages of limited length.
4667  */
4668 void print_worker_info(const char *log_lvl, struct task_struct *task)
4669 {
4670     work_func_t *fn = NULL;
4671     char name[WQ_NAME_LEN] = { };
4672     char desc[WORKER_DESC_LEN] = { };
4673     struct pool_workqueue *pwq = NULL;
4674     struct workqueue_struct *wq = NULL;
4675     struct worker *worker;
4676 
4677     if (!(task->flags & PF_WQ_WORKER))
4678         return;
4679 
4680     /*
4681      * This function is called without any synchronization and @task
4682      * could be in any state.  Be careful with dereferences.
4683      */
4684     worker = kthread_probe_data(task);
4685 
4686     /*
4687      * Carefully copy the associated workqueue's workfn, name and desc.
4688      * Keep the original last '\0' in case the original is garbage.
4689      */
4690     copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
4691     copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4692     copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
4693     copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
4694     copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
4695 
4696     if (fn || name[0] || desc[0]) {
4697         printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4698         if (strcmp(name, desc))
4699             pr_cont(" (%s)", desc);
4700         pr_cont("\n");
4701     }
4702 }
4703 
4704 static void pr_cont_pool_info(struct worker_pool *pool)
4705 {
4706     pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4707     if (pool->node != NUMA_NO_NODE)
4708         pr_cont(" node=%d", pool->node);
4709     pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4710 }
4711 
4712 static void pr_cont_work(bool comma, struct work_struct *work)
4713 {
4714     if (work->func == wq_barrier_func) {
4715         struct wq_barrier *barr;
4716 
4717         barr = container_of(work, struct wq_barrier, work);
4718 
4719         pr_cont("%s BAR(%d)", comma ? "," : "",
4720             task_pid_nr(barr->task));
4721     } else {
4722         pr_cont("%s %ps", comma ? "," : "", work->func);
4723     }
4724 }
4725 
4726 static void show_pwq(struct pool_workqueue *pwq)
4727 {
4728     struct worker_pool *pool = pwq->pool;
4729     struct work_struct *work;
4730     struct worker *worker;
4731     bool has_in_flight = false, has_pending = false;
4732     int bkt;
4733 
4734     pr_info("  pwq %d:", pool->id);
4735     pr_cont_pool_info(pool);
4736 
4737     pr_cont(" active=%d/%d refcnt=%d%s\n",
4738         pwq->nr_active, pwq->max_active, pwq->refcnt,
4739         !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4740 
4741     hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4742         if (worker->current_pwq == pwq) {
4743             has_in_flight = true;
4744             break;
4745         }
4746     }
4747     if (has_in_flight) {
4748         bool comma = false;
4749 
4750         pr_info("    in-flight:");
4751         hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4752             if (worker->current_pwq != pwq)
4753                 continue;
4754 
4755             pr_cont("%s %d%s:%ps", comma ? "," : "",
4756                 task_pid_nr(worker->task),
4757                 worker->rescue_wq ? "(RESCUER)" : "",
4758                 worker->current_func);
4759             list_for_each_entry(work, &worker->scheduled, entry)
4760                 pr_cont_work(false, work);
4761             comma = true;
4762         }
4763         pr_cont("\n");
4764     }
4765 
4766     list_for_each_entry(work, &pool->worklist, entry) {
4767         if (get_work_pwq(work) == pwq) {
4768             has_pending = true;
4769             break;
4770         }
4771     }
4772     if (has_pending) {
4773         bool comma = false;
4774 
4775         pr_info("    pending:");
4776         list_for_each_entry(work, &pool->worklist, entry) {
4777             if (get_work_pwq(work) != pwq)
4778                 continue;
4779 
4780             pr_cont_work(comma, work);
4781             comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4782         }
4783         pr_cont("\n");
4784     }
4785 
4786     if (!list_empty(&pwq->inactive_works)) {
4787         bool comma = false;
4788 
4789         pr_info("    inactive:");
4790         list_for_each_entry(work, &pwq->inactive_works, entry) {
4791             pr_cont_work(comma, work);
4792             comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4793         }
4794         pr_cont("\n");
4795     }
4796 }
4797 
4798 /**
4799  * show_one_workqueue - dump state of specified workqueue
4800  * @wq: workqueue whose state will be printed
4801  */
4802 void show_one_workqueue(struct workqueue_struct *wq)
4803 {
4804     struct pool_workqueue *pwq;
4805     bool idle = true;
4806     unsigned long flags;
4807 
4808     for_each_pwq(pwq, wq) {
4809         if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4810             idle = false;
4811             break;
4812         }
4813     }
4814     if (idle) /* Nothing to print for idle workqueue */
4815         return;
4816 
4817     pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4818 
4819     for_each_pwq(pwq, wq) {
4820         raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4821         if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4822             /*
4823              * Defer printing to avoid deadlocks in console
4824              * drivers that queue work while holding locks
4825              * also taken in their write paths.
4826              */
4827             printk_deferred_enter();
4828             show_pwq(pwq);
4829             printk_deferred_exit();
4830         }
4831         raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4832         /*
4833          * We could be printing a lot from atomic context, e.g.
4834          * sysrq-t -> show_all_workqueues(). Avoid triggering
4835          * hard lockup.
4836          */
4837         touch_nmi_watchdog();
4838     }
4839 
4840 }
4841 
4842 /**
4843  * show_one_worker_pool - dump state of specified worker pool
4844  * @pool: worker pool whose state will be printed
4845  */
4846 static void show_one_worker_pool(struct worker_pool *pool)
4847 {
4848     struct worker *worker;
4849     bool first = true;
4850     unsigned long flags;
4851 
4852     raw_spin_lock_irqsave(&pool->lock, flags);
4853     if (pool->nr_workers == pool->nr_idle)
4854         goto next_pool;
4855     /*
4856      * Defer printing to avoid deadlocks in console drivers that
4857      * queue work while holding locks also taken in their write
4858      * paths.
4859      */
4860     printk_deferred_enter();
4861     pr_info("pool %d:", pool->id);
4862     pr_cont_pool_info(pool);
4863     pr_cont(" hung=%us workers=%d",
4864         jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4865         pool->nr_workers);
4866     if (pool->manager)
4867         pr_cont(" manager: %d",
4868             task_pid_nr(pool->manager->task));
4869     list_for_each_entry(worker, &pool->idle_list, entry) {
4870         pr_cont(" %s%d", first ? "idle: " : "",
4871             task_pid_nr(worker->task));
4872         first = false;
4873     }
4874     pr_cont("\n");
4875     printk_deferred_exit();
4876 next_pool:
4877     raw_spin_unlock_irqrestore(&pool->lock, flags);
4878     /*
4879      * We could be printing a lot from atomic context, e.g.
4880      * sysrq-t -> show_all_workqueues(). Avoid triggering
4881      * hard lockup.
4882      */
4883     touch_nmi_watchdog();
4884 
4885 }
4886 
4887 /**
4888  * show_all_workqueues - dump workqueue state
4889  *
4890  * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4891  * all busy workqueues and pools.
4892  */
4893 void show_all_workqueues(void)
4894 {
4895     struct workqueue_struct *wq;
4896     struct worker_pool *pool;
4897     int pi;
4898 
4899     rcu_read_lock();
4900 
4901     pr_info("Showing busy workqueues and worker pools:\n");
4902 
4903     list_for_each_entry_rcu(wq, &workqueues, list)
4904         show_one_workqueue(wq);
4905 
4906     for_each_pool(pool, pi)
4907         show_one_worker_pool(pool);
4908 
4909     rcu_read_unlock();
4910 }
4911 
4912 /* used to show worker information through /proc/PID/{comm,stat,status} */
4913 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4914 {
4915     int off;
4916 
4917     /* always show the actual comm */
4918     off = strscpy(buf, task->comm, size);
4919     if (off < 0)
4920         return;
4921 
4922     /* stabilize PF_WQ_WORKER and worker pool association */
4923     mutex_lock(&wq_pool_attach_mutex);
4924 
4925     if (task->flags & PF_WQ_WORKER) {
4926         struct worker *worker = kthread_data(task);
4927         struct worker_pool *pool = worker->pool;
4928 
4929         if (pool) {
4930             raw_spin_lock_irq(&pool->lock);
4931             /*
4932              * ->desc tracks information (wq name or
4933              * set_worker_desc()) for the latest execution.  If
4934              * current, prepend '+', otherwise '-'.
4935              */
4936             if (worker->desc[0] != '\0') {
4937                 if (worker->current_work)
4938                     scnprintf(buf + off, size - off, "+%s",
4939                           worker->desc);
4940                 else
4941                     scnprintf(buf + off, size - off, "-%s",
4942                           worker->desc);
4943             }
4944             raw_spin_unlock_irq(&pool->lock);
4945         }
4946     }
4947 
4948     mutex_unlock(&wq_pool_attach_mutex);
4949 }
4950 
4951 #ifdef CONFIG_SMP
4952 
4953 /*
4954  * CPU hotplug.
4955  *
4956  * There are two challenges in supporting CPU hotplug.  Firstly, there
4957  * are a lot of assumptions on strong associations among work, pwq and
4958  * pool which make migrating pending and scheduled works very
4959  * difficult to implement without impacting hot paths.  Secondly,
4960  * worker pools serve mix of short, long and very long running works making
4961  * blocked draining impractical.
4962  *
4963  * This is solved by allowing the pools to be disassociated from the CPU
4964  * running as an unbound one and allowing it to be reattached later if the
4965  * cpu comes back online.
4966  */
4967 
4968 static void unbind_workers(int cpu)
4969 {
4970     struct worker_pool *pool;
4971     struct worker *worker;
4972 
4973     for_each_cpu_worker_pool(pool, cpu) {
4974         mutex_lock(&wq_pool_attach_mutex);
4975         raw_spin_lock_irq(&pool->lock);
4976 
4977         /*
4978          * We've blocked all attach/detach operations. Make all workers
4979          * unbound and set DISASSOCIATED.  Before this, all workers
4980          * must be on the cpu.  After this, they may become diasporas.
4981          * And the preemption disabled section in their sched callbacks
4982          * are guaranteed to see WORKER_UNBOUND since the code here
4983          * is on the same cpu.
4984          */
4985         for_each_pool_worker(worker, pool)
4986             worker->flags |= WORKER_UNBOUND;
4987 
4988         pool->flags |= POOL_DISASSOCIATED;
4989 
4990         /*
4991          * The handling of nr_running in sched callbacks are disabled
4992          * now.  Zap nr_running.  After this, nr_running stays zero and
4993          * need_more_worker() and keep_working() are always true as
4994          * long as the worklist is not empty.  This pool now behaves as
4995          * an unbound (in terms of concurrency management) pool which
4996          * are served by workers tied to the pool.
4997          */
4998         pool->nr_running = 0;
4999 
5000         /*
5001          * With concurrency management just turned off, a busy
5002          * worker blocking could lead to lengthy stalls.  Kick off
5003          * unbound chain execution of currently pending work items.
5004          */
5005         wake_up_worker(pool);
5006 
5007         raw_spin_unlock_irq(&pool->lock);
5008 
5009         for_each_pool_worker(worker, pool) {
5010             kthread_set_per_cpu(worker->task, -1);
5011             if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
5012                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
5013             else
5014                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
5015         }
5016 
5017         mutex_unlock(&wq_pool_attach_mutex);
5018     }
5019 }
5020 
5021 /**
5022  * rebind_workers - rebind all workers of a pool to the associated CPU
5023  * @pool: pool of interest
5024  *
5025  * @pool->cpu is coming online.  Rebind all workers to the CPU.
5026  */
5027 static void rebind_workers(struct worker_pool *pool)
5028 {
5029     struct worker *worker;
5030 
5031     lockdep_assert_held(&wq_pool_attach_mutex);
5032 
5033     /*
5034      * Restore CPU affinity of all workers.  As all idle workers should
5035      * be on the run-queue of the associated CPU before any local
5036      * wake-ups for concurrency management happen, restore CPU affinity
5037      * of all workers first and then clear UNBOUND.  As we're called
5038      * from CPU_ONLINE, the following shouldn't fail.
5039      */
5040     for_each_pool_worker(worker, pool) {
5041         kthread_set_per_cpu(worker->task, pool->cpu);
5042         WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
5043                           pool->attrs->cpumask) < 0);
5044     }
5045 
5046     raw_spin_lock_irq(&pool->lock);
5047 
5048     pool->flags &= ~POOL_DISASSOCIATED;
5049 
5050     for_each_pool_worker(worker, pool) {
5051         unsigned int worker_flags = worker->flags;
5052 
5053         /*
5054          * We want to clear UNBOUND but can't directly call
5055          * worker_clr_flags() or adjust nr_running.  Atomically
5056          * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5057          * @worker will clear REBOUND using worker_clr_flags() when
5058          * it initiates the next execution cycle thus restoring
5059          * concurrency management.  Note that when or whether
5060          * @worker clears REBOUND doesn't affect correctness.
5061          *
5062          * WRITE_ONCE() is necessary because @worker->flags may be
5063          * tested without holding any lock in
5064          * wq_worker_running().  Without it, NOT_RUNNING test may
5065          * fail incorrectly leading to premature concurrency
5066          * management operations.
5067          */
5068         WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5069         worker_flags |= WORKER_REBOUND;
5070         worker_flags &= ~WORKER_UNBOUND;
5071         WRITE_ONCE(worker->flags, worker_flags);
5072     }
5073 
5074     raw_spin_unlock_irq(&pool->lock);
5075 }
5076 
5077 /**
5078  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5079  * @pool: unbound pool of interest
5080  * @cpu: the CPU which is coming up
5081  *
5082  * An unbound pool may end up with a cpumask which doesn't have any online
5083  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
5084  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
5085  * online CPU before, cpus_allowed of all its workers should be restored.
5086  */
5087 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5088 {
5089     static cpumask_t cpumask;
5090     struct worker *worker;
5091 
5092     lockdep_assert_held(&wq_pool_attach_mutex);
5093 
5094     /* is @cpu allowed for @pool? */
5095     if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5096         return;
5097 
5098     cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5099 
5100     /* as we're called from CPU_ONLINE, the following shouldn't fail */
5101     for_each_pool_worker(worker, pool)
5102         WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5103 }
5104 
5105 int workqueue_prepare_cpu(unsigned int cpu)
5106 {
5107     struct worker_pool *pool;
5108 
5109     for_each_cpu_worker_pool(pool, cpu) {
5110         if (pool->nr_workers)
5111             continue;
5112         if (!create_worker(pool))
5113             return -ENOMEM;
5114     }
5115     return 0;
5116 }
5117 
5118 int workqueue_online_cpu(unsigned int cpu)
5119 {
5120     struct worker_pool *pool;
5121     struct workqueue_struct *wq;
5122     int pi;
5123 
5124     mutex_lock(&wq_pool_mutex);
5125 
5126     for_each_pool(pool, pi) {
5127         mutex_lock(&wq_pool_attach_mutex);
5128 
5129         if (pool->cpu == cpu)
5130             rebind_workers(pool);
5131         else if (pool->cpu < 0)
5132             restore_unbound_workers_cpumask(pool, cpu);
5133 
5134         mutex_unlock(&wq_pool_attach_mutex);
5135     }
5136 
5137     /* update NUMA affinity of unbound workqueues */
5138     list_for_each_entry(wq, &workqueues, list)
5139         wq_update_unbound_numa(wq, cpu, true);
5140 
5141     mutex_unlock(&wq_pool_mutex);
5142     return 0;
5143 }
5144 
5145 int workqueue_offline_cpu(unsigned int cpu)
5146 {
5147     struct workqueue_struct *wq;
5148 
5149     /* unbinding per-cpu workers should happen on the local CPU */
5150     if (WARN_ON(cpu != smp_processor_id()))
5151         return -1;
5152 
5153     unbind_workers(cpu);
5154 
5155     /* update NUMA affinity of unbound workqueues */
5156     mutex_lock(&wq_pool_mutex);
5157     list_for_each_entry(wq, &workqueues, list)
5158         wq_update_unbound_numa(wq, cpu, false);
5159     mutex_unlock(&wq_pool_mutex);
5160 
5161     return 0;
5162 }
5163 
5164 struct work_for_cpu {
5165     struct work_struct work;
5166     long (*fn)(void *);
5167     void *arg;
5168     long ret;
5169 };
5170 
5171 static void work_for_cpu_fn(struct work_struct *work)
5172 {
5173     struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5174 
5175     wfc->ret = wfc->fn(wfc->arg);
5176 }
5177 
5178 /**
5179  * work_on_cpu - run a function in thread context on a particular cpu
5180  * @cpu: the cpu to run on
5181  * @fn: the function to run
5182  * @arg: the function arg
5183  *
5184  * It is up to the caller to ensure that the cpu doesn't go offline.
5185  * The caller must not hold any locks which would prevent @fn from completing.
5186  *
5187  * Return: The value @fn returns.
5188  */
5189 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5190 {
5191     struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5192 
5193     INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5194     schedule_work_on(cpu, &wfc.work);
5195     flush_work(&wfc.work);
5196     destroy_work_on_stack(&wfc.work);
5197     return wfc.ret;
5198 }
5199 EXPORT_SYMBOL_GPL(work_on_cpu);
5200 
5201 /**
5202  * work_on_cpu_safe - run a function in thread context on a particular cpu
5203  * @cpu: the cpu to run on
5204  * @fn:  the function to run
5205  * @arg: the function argument
5206  *
5207  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5208  * any locks which would prevent @fn from completing.
5209  *
5210  * Return: The value @fn returns.
5211  */
5212 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5213 {
5214     long ret = -ENODEV;
5215 
5216     cpus_read_lock();
5217     if (cpu_online(cpu))
5218         ret = work_on_cpu(cpu, fn, arg);
5219     cpus_read_unlock();
5220     return ret;
5221 }
5222 EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5223 #endif /* CONFIG_SMP */
5224 
5225 #ifdef CONFIG_FREEZER
5226 
5227 /**
5228  * freeze_workqueues_begin - begin freezing workqueues
5229  *
5230  * Start freezing workqueues.  After this function returns, all freezable
5231  * workqueues will queue new works to their inactive_works list instead of
5232  * pool->worklist.
5233  *
5234  * CONTEXT:
5235  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5236  */
5237 void freeze_workqueues_begin(void)
5238 {
5239     struct workqueue_struct *wq;
5240     struct pool_workqueue *pwq;
5241 
5242     mutex_lock(&wq_pool_mutex);
5243 
5244     WARN_ON_ONCE(workqueue_freezing);
5245     workqueue_freezing = true;
5246 
5247     list_for_each_entry(wq, &workqueues, list) {
5248         mutex_lock(&wq->mutex);
5249         for_each_pwq(pwq, wq)
5250             pwq_adjust_max_active(pwq);
5251         mutex_unlock(&wq->mutex);
5252     }
5253 
5254     mutex_unlock(&wq_pool_mutex);
5255 }
5256 
5257 /**
5258  * freeze_workqueues_busy - are freezable workqueues still busy?
5259  *
5260  * Check whether freezing is complete.  This function must be called
5261  * between freeze_workqueues_begin() and thaw_workqueues().
5262  *
5263  * CONTEXT:
5264  * Grabs and releases wq_pool_mutex.
5265  *
5266  * Return:
5267  * %true if some freezable workqueues are still busy.  %false if freezing
5268  * is complete.
5269  */
5270 bool freeze_workqueues_busy(void)
5271 {
5272     bool busy = false;
5273     struct workqueue_struct *wq;
5274     struct pool_workqueue *pwq;
5275 
5276     mutex_lock(&wq_pool_mutex);
5277 
5278     WARN_ON_ONCE(!workqueue_freezing);
5279 
5280     list_for_each_entry(wq, &workqueues, list) {
5281         if (!(wq->flags & WQ_FREEZABLE))
5282             continue;
5283         /*
5284          * nr_active is monotonically decreasing.  It's safe
5285          * to peek without lock.
5286          */
5287         rcu_read_lock();
5288         for_each_pwq(pwq, wq) {
5289             WARN_ON_ONCE(pwq->nr_active < 0);
5290             if (pwq->nr_active) {
5291                 busy = true;
5292                 rcu_read_unlock();
5293                 goto out_unlock;
5294             }
5295         }
5296         rcu_read_unlock();
5297     }
5298 out_unlock:
5299     mutex_unlock(&wq_pool_mutex);
5300     return busy;
5301 }
5302 
5303 /**
5304  * thaw_workqueues - thaw workqueues
5305  *
5306  * Thaw workqueues.  Normal queueing is restored and all collected
5307  * frozen works are transferred to their respective pool worklists.
5308  *
5309  * CONTEXT:
5310  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5311  */
5312 void thaw_workqueues(void)
5313 {
5314     struct workqueue_struct *wq;
5315     struct pool_workqueue *pwq;
5316 
5317     mutex_lock(&wq_pool_mutex);
5318 
5319     if (!workqueue_freezing)
5320         goto out_unlock;
5321 
5322     workqueue_freezing = false;
5323 
5324     /* restore max_active and repopulate worklist */
5325     list_for_each_entry(wq, &workqueues, list) {
5326         mutex_lock(&wq->mutex);
5327         for_each_pwq(pwq, wq)
5328             pwq_adjust_max_active(pwq);
5329         mutex_unlock(&wq->mutex);
5330     }
5331 
5332 out_unlock:
5333     mutex_unlock(&wq_pool_mutex);
5334 }
5335 #endif /* CONFIG_FREEZER */
5336 
5337 static int workqueue_apply_unbound_cpumask(void)
5338 {
5339     LIST_HEAD(ctxs);
5340     int ret = 0;
5341     struct workqueue_struct *wq;
5342     struct apply_wqattrs_ctx *ctx, *n;
5343 
5344     lockdep_assert_held(&wq_pool_mutex);
5345 
5346     list_for_each_entry(wq, &workqueues, list) {
5347         if (!(wq->flags & WQ_UNBOUND))
5348             continue;
5349         /* creating multiple pwqs breaks ordering guarantee */
5350         if (wq->flags & __WQ_ORDERED)
5351             continue;
5352 
5353         ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5354         if (!ctx) {
5355             ret = -ENOMEM;
5356             break;
5357         }
5358 
5359         list_add_tail(&ctx->list, &ctxs);
5360     }
5361 
5362     list_for_each_entry_safe(ctx, n, &ctxs, list) {
5363         if (!ret)
5364             apply_wqattrs_commit(ctx);
5365         apply_wqattrs_cleanup(ctx);
5366     }
5367 
5368     return ret;
5369 }
5370 
5371 /**
5372  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5373  *  @cpumask: the cpumask to set
5374  *
5375  *  The low-level workqueues cpumask is a global cpumask that limits
5376  *  the affinity of all unbound workqueues.  This function check the @cpumask
5377  *  and apply it to all unbound workqueues and updates all pwqs of them.
5378  *
5379  *  Return: 0   - Success
5380  *          -EINVAL - Invalid @cpumask
5381  *          -ENOMEM - Failed to allocate memory for attrs or pwqs.
5382  */
5383 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5384 {
5385     int ret = -EINVAL;
5386     cpumask_var_t saved_cpumask;
5387 
5388     /*
5389      * Not excluding isolated cpus on purpose.
5390      * If the user wishes to include them, we allow that.
5391      */
5392     cpumask_and(cpumask, cpumask, cpu_possible_mask);
5393     if (!cpumask_empty(cpumask)) {
5394         apply_wqattrs_lock();
5395         if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5396             ret = 0;
5397             goto out_unlock;
5398         }
5399 
5400         if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
5401             ret = -ENOMEM;
5402             goto out_unlock;
5403         }
5404 
5405         /* save the old wq_unbound_cpumask. */
5406         cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5407 
5408         /* update wq_unbound_cpumask at first and apply it to wqs. */
5409         cpumask_copy(wq_unbound_cpumask, cpumask);
5410         ret = workqueue_apply_unbound_cpumask();
5411 
5412         /* restore the wq_unbound_cpumask when failed. */
5413         if (ret < 0)
5414             cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5415 
5416         free_cpumask_var(saved_cpumask);
5417 out_unlock:
5418         apply_wqattrs_unlock();
5419     }
5420 
5421     return ret;
5422 }
5423 
5424 #ifdef CONFIG_SYSFS
5425 /*
5426  * Workqueues with WQ_SYSFS flag set is visible to userland via
5427  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
5428  * following attributes.
5429  *
5430  *  per_cpu RO bool : whether the workqueue is per-cpu or unbound
5431  *  max_active  RW int  : maximum number of in-flight work items
5432  *
5433  * Unbound workqueues have the following extra attributes.
5434  *
5435  *  pool_ids    RO int  : the associated pool IDs for each node
5436  *  nice    RW int  : nice value of the workers
5437  *  cpumask RW mask : bitmask of allowed CPUs for the workers
5438  *  numa    RW bool : whether enable NUMA affinity
5439  */
5440 struct wq_device {
5441     struct workqueue_struct     *wq;
5442     struct device           dev;
5443 };
5444 
5445 static struct workqueue_struct *dev_to_wq(struct device *dev)
5446 {
5447     struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5448 
5449     return wq_dev->wq;
5450 }
5451 
5452 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5453                 char *buf)
5454 {
5455     struct workqueue_struct *wq = dev_to_wq(dev);
5456 
5457     return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5458 }
5459 static DEVICE_ATTR_RO(per_cpu);
5460 
5461 static ssize_t max_active_show(struct device *dev,
5462                    struct device_attribute *attr, char *buf)
5463 {
5464     struct workqueue_struct *wq = dev_to_wq(dev);
5465 
5466     return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5467 }
5468 
5469 static ssize_t max_active_store(struct device *dev,
5470                 struct device_attribute *attr, const char *buf,
5471                 size_t count)
5472 {
5473     struct workqueue_struct *wq = dev_to_wq(dev);
5474     int val;
5475 
5476     if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5477         return -EINVAL;
5478 
5479     workqueue_set_max_active(wq, val);
5480     return count;
5481 }
5482 static DEVICE_ATTR_RW(max_active);
5483 
5484 static struct attribute *wq_sysfs_attrs[] = {
5485     &dev_attr_per_cpu.attr,
5486     &dev_attr_max_active.attr,
5487     NULL,
5488 };
5489 ATTRIBUTE_GROUPS(wq_sysfs);
5490 
5491 static ssize_t wq_pool_ids_show(struct device *dev,
5492                 struct device_attribute *attr, char *buf)
5493 {
5494     struct workqueue_struct *wq = dev_to_wq(dev);
5495     const char *delim = "";
5496     int node, written = 0;
5497 
5498     cpus_read_lock();
5499     rcu_read_lock();
5500     for_each_node(node) {
5501         written += scnprintf(buf + written, PAGE_SIZE - written,
5502                      "%s%d:%d", delim, node,
5503                      unbound_pwq_by_node(wq, node)->pool->id);
5504         delim = " ";
5505     }
5506     written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5507     rcu_read_unlock();
5508     cpus_read_unlock();
5509 
5510     return written;
5511 }
5512 
5513 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5514                 char *buf)
5515 {
5516     struct workqueue_struct *wq = dev_to_wq(dev);
5517     int written;
5518 
5519     mutex_lock(&wq->mutex);
5520     written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5521     mutex_unlock(&wq->mutex);
5522 
5523     return written;
5524 }
5525 
5526 /* prepare workqueue_attrs for sysfs store operations */
5527 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5528 {
5529     struct workqueue_attrs *attrs;
5530 
5531     lockdep_assert_held(&wq_pool_mutex);
5532 
5533     attrs = alloc_workqueue_attrs();
5534     if (!attrs)
5535         return NULL;
5536 
5537     copy_workqueue_attrs(attrs, wq->unbound_attrs);
5538     return attrs;
5539 }
5540 
5541 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5542                  const char *buf, size_t count)
5543 {
5544     struct workqueue_struct *wq = dev_to_wq(dev);
5545     struct workqueue_attrs *attrs;
5546     int ret = -ENOMEM;
5547 
5548     apply_wqattrs_lock();
5549 
5550     attrs = wq_sysfs_prep_attrs(wq);
5551     if (!attrs)
5552         goto out_unlock;
5553 
5554     if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5555         attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5556         ret = apply_workqueue_attrs_locked(wq, attrs);
5557     else
5558         ret = -EINVAL;
5559 
5560 out_unlock:
5561     apply_wqattrs_unlock();
5562     free_workqueue_attrs(attrs);
5563     return ret ?: count;
5564 }
5565 
5566 static ssize_t wq_cpumask_show(struct device *dev,
5567                    struct device_attribute *attr, char *buf)
5568 {
5569     struct workqueue_struct *wq = dev_to_wq(dev);
5570     int written;
5571 
5572     mutex_lock(&wq->mutex);
5573     written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5574                 cpumask_pr_args(wq->unbound_attrs->cpumask));
5575     mutex_unlock(&wq->mutex);
5576     return written;
5577 }
5578 
5579 static ssize_t wq_cpumask_store(struct device *dev,
5580                 struct device_attribute *attr,
5581                 const char *buf, size_t count)
5582 {
5583     struct workqueue_struct *wq = dev_to_wq(dev);
5584     struct workqueue_attrs *attrs;
5585     int ret = -ENOMEM;
5586 
5587     apply_wqattrs_lock();
5588 
5589     attrs = wq_sysfs_prep_attrs(wq);
5590     if (!attrs)
5591         goto out_unlock;
5592 
5593     ret = cpumask_parse(buf, attrs->cpumask);
5594     if (!ret)
5595         ret = apply_workqueue_attrs_locked(wq, attrs);
5596 
5597 out_unlock:
5598     apply_wqattrs_unlock();
5599     free_workqueue_attrs(attrs);
5600     return ret ?: count;
5601 }
5602 
5603 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5604                 char *buf)
5605 {
5606     struct workqueue_struct *wq = dev_to_wq(dev);
5607     int written;
5608 
5609     mutex_lock(&wq->mutex);
5610     written = scnprintf(buf, PAGE_SIZE, "%d\n",
5611                 !wq->unbound_attrs->no_numa);
5612     mutex_unlock(&wq->mutex);
5613 
5614     return written;
5615 }
5616 
5617 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5618                  const char *buf, size_t count)
5619 {
5620     struct workqueue_struct *wq = dev_to_wq(dev);
5621     struct workqueue_attrs *attrs;
5622     int v, ret = -ENOMEM;
5623 
5624     apply_wqattrs_lock();
5625 
5626     attrs = wq_sysfs_prep_attrs(wq);
5627     if (!attrs)
5628         goto out_unlock;
5629 
5630     ret = -EINVAL;
5631     if (sscanf(buf, "%d", &v) == 1) {
5632         attrs->no_numa = !v;
5633         ret = apply_workqueue_attrs_locked(wq, attrs);
5634     }
5635 
5636 out_unlock:
5637     apply_wqattrs_unlock();
5638     free_workqueue_attrs(attrs);
5639     return ret ?: count;
5640 }
5641 
5642 static struct device_attribute wq_sysfs_unbound_attrs[] = {
5643     __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5644     __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5645     __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5646     __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5647     __ATTR_NULL,
5648 };
5649 
5650 static struct bus_type wq_subsys = {
5651     .name               = "workqueue",
5652     .dev_groups         = wq_sysfs_groups,
5653 };
5654 
5655 static ssize_t wq_unbound_cpumask_show(struct device *dev,
5656         struct device_attribute *attr, char *buf)
5657 {
5658     int written;
5659 
5660     mutex_lock(&wq_pool_mutex);
5661     written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5662                 cpumask_pr_args(wq_unbound_cpumask));
5663     mutex_unlock(&wq_pool_mutex);
5664 
5665     return written;
5666 }
5667 
5668 static ssize_t wq_unbound_cpumask_store(struct device *dev,
5669         struct device_attribute *attr, const char *buf, size_t count)
5670 {
5671     cpumask_var_t cpumask;
5672     int ret;
5673 
5674     if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5675         return -ENOMEM;
5676 
5677     ret = cpumask_parse(buf, cpumask);
5678     if (!ret)
5679         ret = workqueue_set_unbound_cpumask(cpumask);
5680 
5681     free_cpumask_var(cpumask);
5682     return ret ? ret : count;
5683 }
5684 
5685 static struct device_attribute wq_sysfs_cpumask_attr =
5686     __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5687            wq_unbound_cpumask_store);
5688 
5689 static int __init wq_sysfs_init(void)
5690 {
5691     int err;
5692 
5693     err = subsys_virtual_register(&wq_subsys, NULL);
5694     if (err)
5695         return err;
5696 
5697     return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5698 }
5699 core_initcall(wq_sysfs_init);
5700 
5701 static void wq_device_release(struct device *dev)
5702 {
5703     struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5704 
5705     kfree(wq_dev);
5706 }
5707 
5708 /**
5709  * workqueue_sysfs_register - make a workqueue visible in sysfs
5710  * @wq: the workqueue to register
5711  *
5712  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5713  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5714  * which is the preferred method.
5715  *
5716  * Workqueue user should use this function directly iff it wants to apply
5717  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5718  * apply_workqueue_attrs() may race against userland updating the
5719  * attributes.
5720  *
5721  * Return: 0 on success, -errno on failure.
5722  */
5723 int workqueue_sysfs_register(struct workqueue_struct *wq)
5724 {
5725     struct wq_device *wq_dev;
5726     int ret;
5727 
5728     /*
5729      * Adjusting max_active or creating new pwqs by applying
5730      * attributes breaks ordering guarantee.  Disallow exposing ordered
5731      * workqueues.
5732      */
5733     if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5734         return -EINVAL;
5735 
5736     wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5737     if (!wq_dev)
5738         return -ENOMEM;
5739 
5740     wq_dev->wq = wq;
5741     wq_dev->dev.bus = &wq_subsys;
5742     wq_dev->dev.release = wq_device_release;
5743     dev_set_name(&wq_dev->dev, "%s", wq->name);
5744 
5745     /*
5746      * unbound_attrs are created separately.  Suppress uevent until
5747      * everything is ready.
5748      */
5749     dev_set_uevent_suppress(&wq_dev->dev, true);
5750 
5751     ret = device_register(&wq_dev->dev);
5752     if (ret) {
5753         put_device(&wq_dev->dev);
5754         wq->wq_dev = NULL;
5755         return ret;
5756     }
5757 
5758     if (wq->flags & WQ_UNBOUND) {
5759         struct device_attribute *attr;
5760 
5761         for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5762             ret = device_create_file(&wq_dev->dev, attr);
5763             if (ret) {
5764                 device_unregister(&wq_dev->dev);
5765                 wq->wq_dev = NULL;
5766                 return ret;
5767             }
5768         }
5769     }
5770 
5771     dev_set_uevent_suppress(&wq_dev->dev, false);
5772     kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5773     return 0;
5774 }
5775 
5776 /**
5777  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5778  * @wq: the workqueue to unregister
5779  *
5780  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5781  */
5782 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5783 {
5784     struct wq_device *wq_dev = wq->wq_dev;
5785 
5786     if (!wq->wq_dev)
5787         return;
5788 
5789     wq->wq_dev = NULL;
5790     device_unregister(&wq_dev->dev);
5791 }
5792 #else   /* CONFIG_SYSFS */
5793 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5794 #endif  /* CONFIG_SYSFS */
5795 
5796 /*
5797  * Workqueue watchdog.
5798  *
5799  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5800  * flush dependency, a concurrency managed work item which stays RUNNING
5801  * indefinitely.  Workqueue stalls can be very difficult to debug as the
5802  * usual warning mechanisms don't trigger and internal workqueue state is
5803  * largely opaque.
5804  *
5805  * Workqueue watchdog monitors all worker pools periodically and dumps
5806  * state if some pools failed to make forward progress for a while where
5807  * forward progress is defined as the first item on ->worklist changing.
5808  *
5809  * This mechanism is controlled through the kernel parameter
5810  * "workqueue.watchdog_thresh" which can be updated at runtime through the
5811  * corresponding sysfs parameter file.
5812  */
5813 #ifdef CONFIG_WQ_WATCHDOG
5814 
5815 static unsigned long wq_watchdog_thresh = 30;
5816 static struct timer_list wq_watchdog_timer;
5817 
5818 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5819 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5820 
5821 static void wq_watchdog_reset_touched(void)
5822 {
5823     int cpu;
5824 
5825     wq_watchdog_touched = jiffies;
5826     for_each_possible_cpu(cpu)
5827         per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5828 }
5829 
5830 static void wq_watchdog_timer_fn(struct timer_list *unused)
5831 {
5832     unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5833     bool lockup_detected = false;
5834     unsigned long now = jiffies;
5835     struct worker_pool *pool;
5836     int pi;
5837 
5838     if (!thresh)
5839         return;
5840 
5841     rcu_read_lock();
5842 
5843     for_each_pool(pool, pi) {
5844         unsigned long pool_ts, touched, ts;
5845 
5846         if (list_empty(&pool->worklist))
5847             continue;
5848 
5849         /*
5850          * If a virtual machine is stopped by the host it can look to
5851          * the watchdog like a stall.
5852          */
5853         kvm_check_and_clear_guest_paused();
5854 
5855         /* get the latest of pool and touched timestamps */
5856         if (pool->cpu >= 0)
5857             touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
5858         else
5859             touched = READ_ONCE(wq_watchdog_touched);
5860         pool_ts = READ_ONCE(pool->watchdog_ts);
5861 
5862         if (time_after(pool_ts, touched))
5863             ts = pool_ts;
5864         else
5865             ts = touched;
5866 
5867         /* did we stall? */
5868         if (time_after(now, ts + thresh)) {
5869             lockup_detected = true;
5870             pr_emerg("BUG: workqueue lockup - pool");
5871             pr_cont_pool_info(pool);
5872             pr_cont(" stuck for %us!\n",
5873                 jiffies_to_msecs(now - pool_ts) / 1000);
5874         }
5875     }
5876 
5877     rcu_read_unlock();
5878 
5879     if (lockup_detected)
5880         show_all_workqueues();
5881 
5882     wq_watchdog_reset_touched();
5883     mod_timer(&wq_watchdog_timer, jiffies + thresh);
5884 }
5885 
5886 notrace void wq_watchdog_touch(int cpu)
5887 {
5888     if (cpu >= 0)
5889         per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5890 
5891     wq_watchdog_touched = jiffies;
5892 }
5893 
5894 static void wq_watchdog_set_thresh(unsigned long thresh)
5895 {
5896     wq_watchdog_thresh = 0;
5897     del_timer_sync(&wq_watchdog_timer);
5898 
5899     if (thresh) {
5900         wq_watchdog_thresh = thresh;
5901         wq_watchdog_reset_touched();
5902         mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5903     }
5904 }
5905 
5906 static int wq_watchdog_param_set_thresh(const char *val,
5907                     const struct kernel_param *kp)
5908 {
5909     unsigned long thresh;
5910     int ret;
5911 
5912     ret = kstrtoul(val, 0, &thresh);
5913     if (ret)
5914         return ret;
5915 
5916     if (system_wq)
5917         wq_watchdog_set_thresh(thresh);
5918     else
5919         wq_watchdog_thresh = thresh;
5920 
5921     return 0;
5922 }
5923 
5924 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5925     .set    = wq_watchdog_param_set_thresh,
5926     .get    = param_get_ulong,
5927 };
5928 
5929 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5930         0644);
5931 
5932 static void wq_watchdog_init(void)
5933 {
5934     timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5935     wq_watchdog_set_thresh(wq_watchdog_thresh);
5936 }
5937 
5938 #else   /* CONFIG_WQ_WATCHDOG */
5939 
5940 static inline void wq_watchdog_init(void) { }
5941 
5942 #endif  /* CONFIG_WQ_WATCHDOG */
5943 
5944 static void __init wq_numa_init(void)
5945 {
5946     cpumask_var_t *tbl;
5947     int node, cpu;
5948 
5949     if (num_possible_nodes() <= 1)
5950         return;
5951 
5952     if (wq_disable_numa) {
5953         pr_info("workqueue: NUMA affinity support disabled\n");
5954         return;
5955     }
5956 
5957     for_each_possible_cpu(cpu) {
5958         if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
5959             pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5960             return;
5961         }
5962     }
5963 
5964     wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5965     BUG_ON(!wq_update_unbound_numa_attrs_buf);
5966 
5967     /*
5968      * We want masks of possible CPUs of each node which isn't readily
5969      * available.  Build one from cpu_to_node() which should have been
5970      * fully initialized by now.
5971      */
5972     tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
5973     BUG_ON(!tbl);
5974 
5975     for_each_node(node)
5976         BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5977                 node_online(node) ? node : NUMA_NO_NODE));
5978 
5979     for_each_possible_cpu(cpu) {
5980         node = cpu_to_node(cpu);
5981         cpumask_set_cpu(cpu, tbl[node]);
5982     }
5983 
5984     wq_numa_possible_cpumask = tbl;
5985     wq_numa_enabled = true;
5986 }
5987 
5988 /**
5989  * workqueue_init_early - early init for workqueue subsystem
5990  *
5991  * This is the first half of two-staged workqueue subsystem initialization
5992  * and invoked as soon as the bare basics - memory allocation, cpumasks and
5993  * idr are up.  It sets up all the data structures and system workqueues
5994  * and allows early boot code to create workqueues and queue/cancel work
5995  * items.  Actual work item execution starts only after kthreads can be
5996  * created and scheduled right before early initcalls.
5997  */
5998 void __init workqueue_init_early(void)
5999 {
6000     int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
6001     int i, cpu;
6002 
6003     BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
6004 
6005     BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
6006     cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
6007     cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
6008 
6009     pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6010 
6011     /* initialize CPU pools */
6012     for_each_possible_cpu(cpu) {
6013         struct worker_pool *pool;
6014 
6015         i = 0;
6016         for_each_cpu_worker_pool(pool, cpu) {
6017             BUG_ON(init_worker_pool(pool));
6018             pool->cpu = cpu;
6019             cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
6020             pool->attrs->nice = std_nice[i++];
6021             pool->node = cpu_to_node(cpu);
6022 
6023             /* alloc pool ID */
6024             mutex_lock(&wq_pool_mutex);
6025             BUG_ON(worker_pool_assign_id(pool));
6026             mutex_unlock(&wq_pool_mutex);
6027         }
6028     }
6029 
6030     /* create default unbound and ordered wq attrs */
6031     for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
6032         struct workqueue_attrs *attrs;
6033 
6034         BUG_ON(!(attrs = alloc_workqueue_attrs()));
6035         attrs->nice = std_nice[i];
6036         unbound_std_wq_attrs[i] = attrs;
6037 
6038         /*
6039          * An ordered wq should have only one pwq as ordering is
6040          * guaranteed by max_active which is enforced by pwqs.
6041          * Turn off NUMA so that dfl_pwq is used for all nodes.
6042          */
6043         BUG_ON(!(attrs = alloc_workqueue_attrs()));
6044         attrs->nice = std_nice[i];
6045         attrs->no_numa = true;
6046         ordered_wq_attrs[i] = attrs;
6047     }
6048 
6049     system_wq = alloc_workqueue("events", 0, 0);
6050     system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
6051     system_long_wq = alloc_workqueue("events_long", 0, 0);
6052     system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
6053                         WQ_UNBOUND_MAX_ACTIVE);
6054     system_freezable_wq = alloc_workqueue("events_freezable",
6055                           WQ_FREEZABLE, 0);
6056     system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6057                           WQ_POWER_EFFICIENT, 0);
6058     system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6059                           WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6060                           0);
6061     BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
6062            !system_unbound_wq || !system_freezable_wq ||
6063            !system_power_efficient_wq ||
6064            !system_freezable_power_efficient_wq);
6065 }
6066 
6067 /**
6068  * workqueue_init - bring workqueue subsystem fully online
6069  *
6070  * This is the latter half of two-staged workqueue subsystem initialization
6071  * and invoked as soon as kthreads can be created and scheduled.
6072  * Workqueues have been created and work items queued on them, but there
6073  * are no kworkers executing the work items yet.  Populate the worker pools
6074  * with the initial workers and enable future kworker creations.
6075  */
6076 void __init workqueue_init(void)
6077 {
6078     struct workqueue_struct *wq;
6079     struct worker_pool *pool;
6080     int cpu, bkt;
6081 
6082     /*
6083      * It'd be simpler to initialize NUMA in workqueue_init_early() but
6084      * CPU to node mapping may not be available that early on some
6085      * archs such as power and arm64.  As per-cpu pools created
6086      * previously could be missing node hint and unbound pools NUMA
6087      * affinity, fix them up.
6088      *
6089      * Also, while iterating workqueues, create rescuers if requested.
6090      */
6091     wq_numa_init();
6092 
6093     mutex_lock(&wq_pool_mutex);
6094 
6095     for_each_possible_cpu(cpu) {
6096         for_each_cpu_worker_pool(pool, cpu) {
6097             pool->node = cpu_to_node(cpu);
6098         }
6099     }
6100 
6101     list_for_each_entry(wq, &workqueues, list) {
6102         wq_update_unbound_numa(wq, smp_processor_id(), true);
6103         WARN(init_rescuer(wq),
6104              "workqueue: failed to create early rescuer for %s",
6105              wq->name);
6106     }
6107 
6108     mutex_unlock(&wq_pool_mutex);
6109 
6110     /* create the initial workers */
6111     for_each_online_cpu(cpu) {
6112         for_each_cpu_worker_pool(pool, cpu) {
6113             pool->flags &= ~POOL_DISASSOCIATED;
6114             BUG_ON(!create_worker(pool));
6115         }
6116     }
6117 
6118     hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6119         BUG_ON(!create_worker(pool));
6120 
6121     wq_online = true;
6122     wq_watchdog_init();
6123 }
6124 
6125 /*
6126  * Despite the naming, this is a no-op function which is here only for avoiding
6127  * link error. Since compile-time warning may fail to catch, we will need to
6128  * emit run-time warning from __flush_workqueue().
6129  */
6130 void __warn_flushing_systemwide_wq(void) { }
6131 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);