Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * umh - the kernel usermode helper
0004  */
0005 #include <linux/module.h>
0006 #include <linux/sched.h>
0007 #include <linux/sched/task.h>
0008 #include <linux/binfmts.h>
0009 #include <linux/syscalls.h>
0010 #include <linux/unistd.h>
0011 #include <linux/kmod.h>
0012 #include <linux/slab.h>
0013 #include <linux/completion.h>
0014 #include <linux/cred.h>
0015 #include <linux/file.h>
0016 #include <linux/fdtable.h>
0017 #include <linux/fs_struct.h>
0018 #include <linux/workqueue.h>
0019 #include <linux/security.h>
0020 #include <linux/mount.h>
0021 #include <linux/kernel.h>
0022 #include <linux/init.h>
0023 #include <linux/resource.h>
0024 #include <linux/notifier.h>
0025 #include <linux/suspend.h>
0026 #include <linux/rwsem.h>
0027 #include <linux/ptrace.h>
0028 #include <linux/async.h>
0029 #include <linux/uaccess.h>
0030 #include <linux/initrd.h>
0031 
0032 #include <trace/events/module.h>
0033 
0034 #define CAP_BSET    (void *)1
0035 #define CAP_PI      (void *)2
0036 
0037 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
0038 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
0039 static DEFINE_SPINLOCK(umh_sysctl_lock);
0040 static DECLARE_RWSEM(umhelper_sem);
0041 
0042 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
0043 {
0044     if (info->cleanup)
0045         (*info->cleanup)(info);
0046     kfree(info);
0047 }
0048 
0049 static void umh_complete(struct subprocess_info *sub_info)
0050 {
0051     struct completion *comp = xchg(&sub_info->complete, NULL);
0052     /*
0053      * See call_usermodehelper_exec(). If xchg() returns NULL
0054      * we own sub_info, the UMH_KILLABLE caller has gone away
0055      * or the caller used UMH_NO_WAIT.
0056      */
0057     if (comp)
0058         complete(comp);
0059     else
0060         call_usermodehelper_freeinfo(sub_info);
0061 }
0062 
0063 /*
0064  * This is the task which runs the usermode application
0065  */
0066 static int call_usermodehelper_exec_async(void *data)
0067 {
0068     struct subprocess_info *sub_info = data;
0069     struct cred *new;
0070     int retval;
0071 
0072     spin_lock_irq(&current->sighand->siglock);
0073     flush_signal_handlers(current, 1);
0074     spin_unlock_irq(&current->sighand->siglock);
0075 
0076     /*
0077      * Initial kernel threads share ther FS with init, in order to
0078      * get the init root directory. But we've now created a new
0079      * thread that is going to execve a user process and has its own
0080      * 'struct fs_struct'. Reset umask to the default.
0081      */
0082     current->fs->umask = 0022;
0083 
0084     /*
0085      * Our parent (unbound workqueue) runs with elevated scheduling
0086      * priority. Avoid propagating that into the userspace child.
0087      */
0088     set_user_nice(current, 0);
0089 
0090     retval = -ENOMEM;
0091     new = prepare_kernel_cred(current);
0092     if (!new)
0093         goto out;
0094 
0095     spin_lock(&umh_sysctl_lock);
0096     new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
0097     new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
0098                          new->cap_inheritable);
0099     spin_unlock(&umh_sysctl_lock);
0100 
0101     if (sub_info->init) {
0102         retval = sub_info->init(sub_info, new);
0103         if (retval) {
0104             abort_creds(new);
0105             goto out;
0106         }
0107     }
0108 
0109     commit_creds(new);
0110 
0111     wait_for_initramfs();
0112     retval = kernel_execve(sub_info->path,
0113                    (const char *const *)sub_info->argv,
0114                    (const char *const *)sub_info->envp);
0115 out:
0116     sub_info->retval = retval;
0117     /*
0118      * call_usermodehelper_exec_sync() will call umh_complete
0119      * if UHM_WAIT_PROC.
0120      */
0121     if (!(sub_info->wait & UMH_WAIT_PROC))
0122         umh_complete(sub_info);
0123     if (!retval)
0124         return 0;
0125     do_exit(0);
0126 }
0127 
0128 /* Handles UMH_WAIT_PROC.  */
0129 static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
0130 {
0131     pid_t pid;
0132 
0133     /* If SIGCLD is ignored do_wait won't populate the status. */
0134     kernel_sigaction(SIGCHLD, SIG_DFL);
0135     pid = user_mode_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
0136     if (pid < 0)
0137         sub_info->retval = pid;
0138     else
0139         kernel_wait(pid, &sub_info->retval);
0140 
0141     /* Restore default kernel sig handler */
0142     kernel_sigaction(SIGCHLD, SIG_IGN);
0143     umh_complete(sub_info);
0144 }
0145 
0146 /*
0147  * We need to create the usermodehelper kernel thread from a task that is affine
0148  * to an optimized set of CPUs (or nohz housekeeping ones) such that they
0149  * inherit a widest affinity irrespective of call_usermodehelper() callers with
0150  * possibly reduced affinity (eg: per-cpu workqueues). We don't want
0151  * usermodehelper targets to contend a busy CPU.
0152  *
0153  * Unbound workqueues provide such wide affinity and allow to block on
0154  * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
0155  *
0156  * Besides, workqueues provide the privilege level that caller might not have
0157  * to perform the usermodehelper request.
0158  *
0159  */
0160 static void call_usermodehelper_exec_work(struct work_struct *work)
0161 {
0162     struct subprocess_info *sub_info =
0163         container_of(work, struct subprocess_info, work);
0164 
0165     if (sub_info->wait & UMH_WAIT_PROC) {
0166         call_usermodehelper_exec_sync(sub_info);
0167     } else {
0168         pid_t pid;
0169         /*
0170          * Use CLONE_PARENT to reparent it to kthreadd; we do not
0171          * want to pollute current->children, and we need a parent
0172          * that always ignores SIGCHLD to ensure auto-reaping.
0173          */
0174         pid = user_mode_thread(call_usermodehelper_exec_async, sub_info,
0175                        CLONE_PARENT | SIGCHLD);
0176         if (pid < 0) {
0177             sub_info->retval = pid;
0178             umh_complete(sub_info);
0179         }
0180     }
0181 }
0182 
0183 /*
0184  * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
0185  * (used for preventing user land processes from being created after the user
0186  * land has been frozen during a system-wide hibernation or suspend operation).
0187  * Should always be manipulated under umhelper_sem acquired for write.
0188  */
0189 static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
0190 
0191 /* Number of helpers running */
0192 static atomic_t running_helpers = ATOMIC_INIT(0);
0193 
0194 /*
0195  * Wait queue head used by usermodehelper_disable() to wait for all running
0196  * helpers to finish.
0197  */
0198 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
0199 
0200 /*
0201  * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
0202  * to become 'false'.
0203  */
0204 static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
0205 
0206 /*
0207  * Time to wait for running_helpers to become zero before the setting of
0208  * usermodehelper_disabled in usermodehelper_disable() fails
0209  */
0210 #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
0211 
0212 int usermodehelper_read_trylock(void)
0213 {
0214     DEFINE_WAIT(wait);
0215     int ret = 0;
0216 
0217     down_read(&umhelper_sem);
0218     for (;;) {
0219         prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
0220                 TASK_INTERRUPTIBLE);
0221         if (!usermodehelper_disabled)
0222             break;
0223 
0224         if (usermodehelper_disabled == UMH_DISABLED)
0225             ret = -EAGAIN;
0226 
0227         up_read(&umhelper_sem);
0228 
0229         if (ret)
0230             break;
0231 
0232         schedule();
0233         try_to_freeze();
0234 
0235         down_read(&umhelper_sem);
0236     }
0237     finish_wait(&usermodehelper_disabled_waitq, &wait);
0238     return ret;
0239 }
0240 EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
0241 
0242 long usermodehelper_read_lock_wait(long timeout)
0243 {
0244     DEFINE_WAIT(wait);
0245 
0246     if (timeout < 0)
0247         return -EINVAL;
0248 
0249     down_read(&umhelper_sem);
0250     for (;;) {
0251         prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
0252                 TASK_UNINTERRUPTIBLE);
0253         if (!usermodehelper_disabled)
0254             break;
0255 
0256         up_read(&umhelper_sem);
0257 
0258         timeout = schedule_timeout(timeout);
0259         if (!timeout)
0260             break;
0261 
0262         down_read(&umhelper_sem);
0263     }
0264     finish_wait(&usermodehelper_disabled_waitq, &wait);
0265     return timeout;
0266 }
0267 EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
0268 
0269 void usermodehelper_read_unlock(void)
0270 {
0271     up_read(&umhelper_sem);
0272 }
0273 EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
0274 
0275 /**
0276  * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
0277  * @depth: New value to assign to usermodehelper_disabled.
0278  *
0279  * Change the value of usermodehelper_disabled (under umhelper_sem locked for
0280  * writing) and wakeup tasks waiting for it to change.
0281  */
0282 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
0283 {
0284     down_write(&umhelper_sem);
0285     usermodehelper_disabled = depth;
0286     wake_up(&usermodehelper_disabled_waitq);
0287     up_write(&umhelper_sem);
0288 }
0289 
0290 /**
0291  * __usermodehelper_disable - Prevent new helpers from being started.
0292  * @depth: New value to assign to usermodehelper_disabled.
0293  *
0294  * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
0295  */
0296 int __usermodehelper_disable(enum umh_disable_depth depth)
0297 {
0298     long retval;
0299 
0300     if (!depth)
0301         return -EINVAL;
0302 
0303     down_write(&umhelper_sem);
0304     usermodehelper_disabled = depth;
0305     up_write(&umhelper_sem);
0306 
0307     /*
0308      * From now on call_usermodehelper_exec() won't start any new
0309      * helpers, so it is sufficient if running_helpers turns out to
0310      * be zero at one point (it may be increased later, but that
0311      * doesn't matter).
0312      */
0313     retval = wait_event_timeout(running_helpers_waitq,
0314                     atomic_read(&running_helpers) == 0,
0315                     RUNNING_HELPERS_TIMEOUT);
0316     if (retval)
0317         return 0;
0318 
0319     __usermodehelper_set_disable_depth(UMH_ENABLED);
0320     return -EAGAIN;
0321 }
0322 
0323 static void helper_lock(void)
0324 {
0325     atomic_inc(&running_helpers);
0326     smp_mb__after_atomic();
0327 }
0328 
0329 static void helper_unlock(void)
0330 {
0331     if (atomic_dec_and_test(&running_helpers))
0332         wake_up(&running_helpers_waitq);
0333 }
0334 
0335 /**
0336  * call_usermodehelper_setup - prepare to call a usermode helper
0337  * @path: path to usermode executable
0338  * @argv: arg vector for process
0339  * @envp: environment for process
0340  * @gfp_mask: gfp mask for memory allocation
0341  * @init: an init function
0342  * @cleanup: a cleanup function
0343  * @data: arbitrary context sensitive data
0344  *
0345  * Returns either %NULL on allocation failure, or a subprocess_info
0346  * structure.  This should be passed to call_usermodehelper_exec to
0347  * exec the process and free the structure.
0348  *
0349  * The init function is used to customize the helper process prior to
0350  * exec.  A non-zero return code causes the process to error out, exit,
0351  * and return the failure to the calling process
0352  *
0353  * The cleanup function is just before the subprocess_info is about to
0354  * be freed.  This can be used for freeing the argv and envp.  The
0355  * Function must be runnable in either a process context or the
0356  * context in which call_usermodehelper_exec is called.
0357  */
0358 struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
0359         char **envp, gfp_t gfp_mask,
0360         int (*init)(struct subprocess_info *info, struct cred *new),
0361         void (*cleanup)(struct subprocess_info *info),
0362         void *data)
0363 {
0364     struct subprocess_info *sub_info;
0365     sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
0366     if (!sub_info)
0367         goto out;
0368 
0369     INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
0370 
0371 #ifdef CONFIG_STATIC_USERMODEHELPER
0372     sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
0373 #else
0374     sub_info->path = path;
0375 #endif
0376     sub_info->argv = argv;
0377     sub_info->envp = envp;
0378 
0379     sub_info->cleanup = cleanup;
0380     sub_info->init = init;
0381     sub_info->data = data;
0382   out:
0383     return sub_info;
0384 }
0385 EXPORT_SYMBOL(call_usermodehelper_setup);
0386 
0387 /**
0388  * call_usermodehelper_exec - start a usermode application
0389  * @sub_info: information about the subprocess
0390  * @wait: wait for the application to finish and return status.
0391  *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
0392  *        when the program couldn't be exec'ed. This makes it safe to call
0393  *        from interrupt context.
0394  *
0395  * Runs a user-space application.  The application is started
0396  * asynchronously if wait is not set, and runs as a child of system workqueues.
0397  * (ie. it runs with full root capabilities and optimized affinity).
0398  *
0399  * Note: successful return value does not guarantee the helper was called at
0400  * all. You can't rely on sub_info->{init,cleanup} being called even for
0401  * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
0402  * into a successful no-op.
0403  */
0404 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
0405 {
0406     DECLARE_COMPLETION_ONSTACK(done);
0407     int retval = 0;
0408 
0409     if (!sub_info->path) {
0410         call_usermodehelper_freeinfo(sub_info);
0411         return -EINVAL;
0412     }
0413     helper_lock();
0414     if (usermodehelper_disabled) {
0415         retval = -EBUSY;
0416         goto out;
0417     }
0418 
0419     /*
0420      * If there is no binary for us to call, then just return and get out of
0421      * here.  This allows us to set STATIC_USERMODEHELPER_PATH to "" and
0422      * disable all call_usermodehelper() calls.
0423      */
0424     if (strlen(sub_info->path) == 0)
0425         goto out;
0426 
0427     /*
0428      * Set the completion pointer only if there is a waiter.
0429      * This makes it possible to use umh_complete to free
0430      * the data structure in case of UMH_NO_WAIT.
0431      */
0432     sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
0433     sub_info->wait = wait;
0434 
0435     queue_work(system_unbound_wq, &sub_info->work);
0436     if (wait == UMH_NO_WAIT)    /* task has freed sub_info */
0437         goto unlock;
0438 
0439     if (wait & UMH_KILLABLE) {
0440         retval = wait_for_completion_killable(&done);
0441         if (!retval)
0442             goto wait_done;
0443 
0444         /* umh_complete() will see NULL and free sub_info */
0445         if (xchg(&sub_info->complete, NULL))
0446             goto unlock;
0447         /* fallthrough, umh_complete() was already called */
0448     }
0449 
0450     wait_for_completion(&done);
0451 wait_done:
0452     retval = sub_info->retval;
0453 out:
0454     call_usermodehelper_freeinfo(sub_info);
0455 unlock:
0456     helper_unlock();
0457     return retval;
0458 }
0459 EXPORT_SYMBOL(call_usermodehelper_exec);
0460 
0461 /**
0462  * call_usermodehelper() - prepare and start a usermode application
0463  * @path: path to usermode executable
0464  * @argv: arg vector for process
0465  * @envp: environment for process
0466  * @wait: wait for the application to finish and return status.
0467  *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
0468  *        when the program couldn't be exec'ed. This makes it safe to call
0469  *        from interrupt context.
0470  *
0471  * This function is the equivalent to use call_usermodehelper_setup() and
0472  * call_usermodehelper_exec().
0473  */
0474 int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
0475 {
0476     struct subprocess_info *info;
0477     gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
0478 
0479     info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
0480                      NULL, NULL, NULL);
0481     if (info == NULL)
0482         return -ENOMEM;
0483 
0484     return call_usermodehelper_exec(info, wait);
0485 }
0486 EXPORT_SYMBOL(call_usermodehelper);
0487 
0488 static int proc_cap_handler(struct ctl_table *table, int write,
0489              void *buffer, size_t *lenp, loff_t *ppos)
0490 {
0491     struct ctl_table t;
0492     unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
0493     kernel_cap_t new_cap;
0494     int err, i;
0495 
0496     if (write && (!capable(CAP_SETPCAP) ||
0497               !capable(CAP_SYS_MODULE)))
0498         return -EPERM;
0499 
0500     /*
0501      * convert from the global kernel_cap_t to the ulong array to print to
0502      * userspace if this is a read.
0503      */
0504     spin_lock(&umh_sysctl_lock);
0505     for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
0506         if (table->data == CAP_BSET)
0507             cap_array[i] = usermodehelper_bset.cap[i];
0508         else if (table->data == CAP_PI)
0509             cap_array[i] = usermodehelper_inheritable.cap[i];
0510         else
0511             BUG();
0512     }
0513     spin_unlock(&umh_sysctl_lock);
0514 
0515     t = *table;
0516     t.data = &cap_array;
0517 
0518     /*
0519      * actually read or write and array of ulongs from userspace.  Remember
0520      * these are least significant 32 bits first
0521      */
0522     err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
0523     if (err < 0)
0524         return err;
0525 
0526     /*
0527      * convert from the sysctl array of ulongs to the kernel_cap_t
0528      * internal representation
0529      */
0530     for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
0531         new_cap.cap[i] = cap_array[i];
0532 
0533     /*
0534      * Drop everything not in the new_cap (but don't add things)
0535      */
0536     if (write) {
0537         spin_lock(&umh_sysctl_lock);
0538         if (table->data == CAP_BSET)
0539             usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
0540         if (table->data == CAP_PI)
0541             usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
0542         spin_unlock(&umh_sysctl_lock);
0543     }
0544 
0545     return 0;
0546 }
0547 
0548 struct ctl_table usermodehelper_table[] = {
0549     {
0550         .procname   = "bset",
0551         .data       = CAP_BSET,
0552         .maxlen     = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
0553         .mode       = 0600,
0554         .proc_handler   = proc_cap_handler,
0555     },
0556     {
0557         .procname   = "inheritable",
0558         .data       = CAP_PI,
0559         .maxlen     = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
0560         .mode       = 0600,
0561         .proc_handler   = proc_cap_handler,
0562     },
0563     { }
0564 };