Back to home page

LXR

 
 

    


0001 /*
0002     kmod, the new module loader (replaces kerneld)
0003     Kirk Petersen
0004 
0005     Reorganized not to be a daemon by Adam Richter, with guidance
0006     from Greg Zornetzer.
0007 
0008     Modified to avoid chroot and file sharing problems.
0009     Mikael Pettersson
0010 
0011     Limit the concurrent number of kmod modprobes to catch loops from
0012     "modprobe needs a service that is in a module".
0013     Keith Owens <kaos@ocs.com.au> December 1999
0014 
0015     Unblock all signals when we exec a usermode process.
0016     Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
0017 
0018     call_usermodehelper wait flag, and remove exec_usermodehelper.
0019     Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
0020 */
0021 #include <linux/module.h>
0022 #include <linux/sched.h>
0023 #include <linux/syscalls.h>
0024 #include <linux/unistd.h>
0025 #include <linux/kmod.h>
0026 #include <linux/slab.h>
0027 #include <linux/completion.h>
0028 #include <linux/cred.h>
0029 #include <linux/file.h>
0030 #include <linux/fdtable.h>
0031 #include <linux/workqueue.h>
0032 #include <linux/security.h>
0033 #include <linux/mount.h>
0034 #include <linux/kernel.h>
0035 #include <linux/init.h>
0036 #include <linux/resource.h>
0037 #include <linux/notifier.h>
0038 #include <linux/suspend.h>
0039 #include <linux/rwsem.h>
0040 #include <linux/ptrace.h>
0041 #include <linux/async.h>
0042 #include <linux/uaccess.h>
0043 
0044 #include <trace/events/module.h>
0045 
0046 extern int max_threads;
0047 
0048 #define CAP_BSET    (void *)1
0049 #define CAP_PI      (void *)2
0050 
0051 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
0052 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
0053 static DEFINE_SPINLOCK(umh_sysctl_lock);
0054 static DECLARE_RWSEM(umhelper_sem);
0055 
0056 #ifdef CONFIG_MODULES
0057 
0058 /*
0059     modprobe_path is set via /proc/sys.
0060 */
0061 char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
0062 
0063 static void free_modprobe_argv(struct subprocess_info *info)
0064 {
0065     kfree(info->argv[3]); /* check call_modprobe() */
0066     kfree(info->argv);
0067 }
0068 
0069 static int call_modprobe(char *module_name, int wait)
0070 {
0071     struct subprocess_info *info;
0072     static char *envp[] = {
0073         "HOME=/",
0074         "TERM=linux",
0075         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
0076         NULL
0077     };
0078 
0079     char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
0080     if (!argv)
0081         goto out;
0082 
0083     module_name = kstrdup(module_name, GFP_KERNEL);
0084     if (!module_name)
0085         goto free_argv;
0086 
0087     argv[0] = modprobe_path;
0088     argv[1] = "-q";
0089     argv[2] = "--";
0090     argv[3] = module_name;  /* check free_modprobe_argv() */
0091     argv[4] = NULL;
0092 
0093     info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
0094                      NULL, free_modprobe_argv, NULL);
0095     if (!info)
0096         goto free_module_name;
0097 
0098     return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
0099 
0100 free_module_name:
0101     kfree(module_name);
0102 free_argv:
0103     kfree(argv);
0104 out:
0105     return -ENOMEM;
0106 }
0107 
0108 /**
0109  * __request_module - try to load a kernel module
0110  * @wait: wait (or not) for the operation to complete
0111  * @fmt: printf style format string for the name of the module
0112  * @...: arguments as specified in the format string
0113  *
0114  * Load a module using the user mode module loader. The function returns
0115  * zero on success or a negative errno code or positive exit code from
0116  * "modprobe" on failure. Note that a successful module load does not mean
0117  * the module did not then unload and exit on an error of its own. Callers
0118  * must check that the service they requested is now available not blindly
0119  * invoke it.
0120  *
0121  * If module auto-loading support is disabled then this function
0122  * becomes a no-operation.
0123  */
0124 int __request_module(bool wait, const char *fmt, ...)
0125 {
0126     va_list args;
0127     char module_name[MODULE_NAME_LEN];
0128     unsigned int max_modprobes;
0129     int ret;
0130     static atomic_t kmod_concurrent = ATOMIC_INIT(0);
0131 #define MAX_KMOD_CONCURRENT 50  /* Completely arbitrary value - KAO */
0132     static int kmod_loop_msg;
0133 
0134     /*
0135      * We don't allow synchronous module loading from async.  Module
0136      * init may invoke async_synchronize_full() which will end up
0137      * waiting for this task which already is waiting for the module
0138      * loading to complete, leading to a deadlock.
0139      */
0140     WARN_ON_ONCE(wait && current_is_async());
0141 
0142     if (!modprobe_path[0])
0143         return 0;
0144 
0145     va_start(args, fmt);
0146     ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
0147     va_end(args);
0148     if (ret >= MODULE_NAME_LEN)
0149         return -ENAMETOOLONG;
0150 
0151     ret = security_kernel_module_request(module_name);
0152     if (ret)
0153         return ret;
0154 
0155     /* If modprobe needs a service that is in a module, we get a recursive
0156      * loop.  Limit the number of running kmod threads to max_threads/2 or
0157      * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
0158      * would be to run the parents of this process, counting how many times
0159      * kmod was invoked.  That would mean accessing the internals of the
0160      * process tables to get the command line, proc_pid_cmdline is static
0161      * and it is not worth changing the proc code just to handle this case. 
0162      * KAO.
0163      *
0164      * "trace the ppid" is simple, but will fail if someone's
0165      * parent exits.  I think this is as good as it gets. --RR
0166      */
0167     max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
0168     atomic_inc(&kmod_concurrent);
0169     if (atomic_read(&kmod_concurrent) > max_modprobes) {
0170         /* We may be blaming an innocent here, but unlikely */
0171         if (kmod_loop_msg < 5) {
0172             printk(KERN_ERR
0173                    "request_module: runaway loop modprobe %s\n",
0174                    module_name);
0175             kmod_loop_msg++;
0176         }
0177         atomic_dec(&kmod_concurrent);
0178         return -ENOMEM;
0179     }
0180 
0181     trace_module_request(module_name, wait, _RET_IP_);
0182 
0183     ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
0184 
0185     atomic_dec(&kmod_concurrent);
0186     return ret;
0187 }
0188 EXPORT_SYMBOL(__request_module);
0189 #endif /* CONFIG_MODULES */
0190 
0191 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
0192 {
0193     if (info->cleanup)
0194         (*info->cleanup)(info);
0195     kfree(info);
0196 }
0197 
0198 static void umh_complete(struct subprocess_info *sub_info)
0199 {
0200     struct completion *comp = xchg(&sub_info->complete, NULL);
0201     /*
0202      * See call_usermodehelper_exec(). If xchg() returns NULL
0203      * we own sub_info, the UMH_KILLABLE caller has gone away
0204      * or the caller used UMH_NO_WAIT.
0205      */
0206     if (comp)
0207         complete(comp);
0208     else
0209         call_usermodehelper_freeinfo(sub_info);
0210 }
0211 
0212 /*
0213  * This is the task which runs the usermode application
0214  */
0215 static int call_usermodehelper_exec_async(void *data)
0216 {
0217     struct subprocess_info *sub_info = data;
0218     struct cred *new;
0219     int retval;
0220 
0221     spin_lock_irq(&current->sighand->siglock);
0222     flush_signal_handlers(current, 1);
0223     spin_unlock_irq(&current->sighand->siglock);
0224 
0225     /*
0226      * Our parent (unbound workqueue) runs with elevated scheduling
0227      * priority. Avoid propagating that into the userspace child.
0228      */
0229     set_user_nice(current, 0);
0230 
0231     retval = -ENOMEM;
0232     new = prepare_kernel_cred(current);
0233     if (!new)
0234         goto out;
0235 
0236     spin_lock(&umh_sysctl_lock);
0237     new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
0238     new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
0239                          new->cap_inheritable);
0240     spin_unlock(&umh_sysctl_lock);
0241 
0242     if (sub_info->init) {
0243         retval = sub_info->init(sub_info, new);
0244         if (retval) {
0245             abort_creds(new);
0246             goto out;
0247         }
0248     }
0249 
0250     commit_creds(new);
0251 
0252     retval = do_execve(getname_kernel(sub_info->path),
0253                (const char __user *const __user *)sub_info->argv,
0254                (const char __user *const __user *)sub_info->envp);
0255 out:
0256     sub_info->retval = retval;
0257     /*
0258      * call_usermodehelper_exec_sync() will call umh_complete
0259      * if UHM_WAIT_PROC.
0260      */
0261     if (!(sub_info->wait & UMH_WAIT_PROC))
0262         umh_complete(sub_info);
0263     if (!retval)
0264         return 0;
0265     do_exit(0);
0266 }
0267 
0268 /* Handles UMH_WAIT_PROC.  */
0269 static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
0270 {
0271     pid_t pid;
0272 
0273     /* If SIGCLD is ignored sys_wait4 won't populate the status. */
0274     kernel_sigaction(SIGCHLD, SIG_DFL);
0275     pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
0276     if (pid < 0) {
0277         sub_info->retval = pid;
0278     } else {
0279         int ret = -ECHILD;
0280         /*
0281          * Normally it is bogus to call wait4() from in-kernel because
0282          * wait4() wants to write the exit code to a userspace address.
0283          * But call_usermodehelper_exec_sync() always runs as kernel
0284          * thread (workqueue) and put_user() to a kernel address works
0285          * OK for kernel threads, due to their having an mm_segment_t
0286          * which spans the entire address space.
0287          *
0288          * Thus the __user pointer cast is valid here.
0289          */
0290         sys_wait4(pid, (int __user *)&ret, 0, NULL);
0291 
0292         /*
0293          * If ret is 0, either call_usermodehelper_exec_async failed and
0294          * the real error code is already in sub_info->retval or
0295          * sub_info->retval is 0 anyway, so don't mess with it then.
0296          */
0297         if (ret)
0298             sub_info->retval = ret;
0299     }
0300 
0301     /* Restore default kernel sig handler */
0302     kernel_sigaction(SIGCHLD, SIG_IGN);
0303 
0304     umh_complete(sub_info);
0305 }
0306 
0307 /*
0308  * We need to create the usermodehelper kernel thread from a task that is affine
0309  * to an optimized set of CPUs (or nohz housekeeping ones) such that they
0310  * inherit a widest affinity irrespective of call_usermodehelper() callers with
0311  * possibly reduced affinity (eg: per-cpu workqueues). We don't want
0312  * usermodehelper targets to contend a busy CPU.
0313  *
0314  * Unbound workqueues provide such wide affinity and allow to block on
0315  * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
0316  *
0317  * Besides, workqueues provide the privilege level that caller might not have
0318  * to perform the usermodehelper request.
0319  *
0320  */
0321 static void call_usermodehelper_exec_work(struct work_struct *work)
0322 {
0323     struct subprocess_info *sub_info =
0324         container_of(work, struct subprocess_info, work);
0325 
0326     if (sub_info->wait & UMH_WAIT_PROC) {
0327         call_usermodehelper_exec_sync(sub_info);
0328     } else {
0329         pid_t pid;
0330         /*
0331          * Use CLONE_PARENT to reparent it to kthreadd; we do not
0332          * want to pollute current->children, and we need a parent
0333          * that always ignores SIGCHLD to ensure auto-reaping.
0334          */
0335         pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
0336                     CLONE_PARENT | SIGCHLD);
0337         if (pid < 0) {
0338             sub_info->retval = pid;
0339             umh_complete(sub_info);
0340         }
0341     }
0342 }
0343 
0344 /*
0345  * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
0346  * (used for preventing user land processes from being created after the user
0347  * land has been frozen during a system-wide hibernation or suspend operation).
0348  * Should always be manipulated under umhelper_sem acquired for write.
0349  */
0350 static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
0351 
0352 /* Number of helpers running */
0353 static atomic_t running_helpers = ATOMIC_INIT(0);
0354 
0355 /*
0356  * Wait queue head used by usermodehelper_disable() to wait for all running
0357  * helpers to finish.
0358  */
0359 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
0360 
0361 /*
0362  * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
0363  * to become 'false'.
0364  */
0365 static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
0366 
0367 /*
0368  * Time to wait for running_helpers to become zero before the setting of
0369  * usermodehelper_disabled in usermodehelper_disable() fails
0370  */
0371 #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
0372 
0373 int usermodehelper_read_trylock(void)
0374 {
0375     DEFINE_WAIT(wait);
0376     int ret = 0;
0377 
0378     down_read(&umhelper_sem);
0379     for (;;) {
0380         prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
0381                 TASK_INTERRUPTIBLE);
0382         if (!usermodehelper_disabled)
0383             break;
0384 
0385         if (usermodehelper_disabled == UMH_DISABLED)
0386             ret = -EAGAIN;
0387 
0388         up_read(&umhelper_sem);
0389 
0390         if (ret)
0391             break;
0392 
0393         schedule();
0394         try_to_freeze();
0395 
0396         down_read(&umhelper_sem);
0397     }
0398     finish_wait(&usermodehelper_disabled_waitq, &wait);
0399     return ret;
0400 }
0401 EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
0402 
0403 long usermodehelper_read_lock_wait(long timeout)
0404 {
0405     DEFINE_WAIT(wait);
0406 
0407     if (timeout < 0)
0408         return -EINVAL;
0409 
0410     down_read(&umhelper_sem);
0411     for (;;) {
0412         prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
0413                 TASK_UNINTERRUPTIBLE);
0414         if (!usermodehelper_disabled)
0415             break;
0416 
0417         up_read(&umhelper_sem);
0418 
0419         timeout = schedule_timeout(timeout);
0420         if (!timeout)
0421             break;
0422 
0423         down_read(&umhelper_sem);
0424     }
0425     finish_wait(&usermodehelper_disabled_waitq, &wait);
0426     return timeout;
0427 }
0428 EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
0429 
0430 void usermodehelper_read_unlock(void)
0431 {
0432     up_read(&umhelper_sem);
0433 }
0434 EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
0435 
0436 /**
0437  * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
0438  * @depth: New value to assign to usermodehelper_disabled.
0439  *
0440  * Change the value of usermodehelper_disabled (under umhelper_sem locked for
0441  * writing) and wakeup tasks waiting for it to change.
0442  */
0443 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
0444 {
0445     down_write(&umhelper_sem);
0446     usermodehelper_disabled = depth;
0447     wake_up(&usermodehelper_disabled_waitq);
0448     up_write(&umhelper_sem);
0449 }
0450 
0451 /**
0452  * __usermodehelper_disable - Prevent new helpers from being started.
0453  * @depth: New value to assign to usermodehelper_disabled.
0454  *
0455  * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
0456  */
0457 int __usermodehelper_disable(enum umh_disable_depth depth)
0458 {
0459     long retval;
0460 
0461     if (!depth)
0462         return -EINVAL;
0463 
0464     down_write(&umhelper_sem);
0465     usermodehelper_disabled = depth;
0466     up_write(&umhelper_sem);
0467 
0468     /*
0469      * From now on call_usermodehelper_exec() won't start any new
0470      * helpers, so it is sufficient if running_helpers turns out to
0471      * be zero at one point (it may be increased later, but that
0472      * doesn't matter).
0473      */
0474     retval = wait_event_timeout(running_helpers_waitq,
0475                     atomic_read(&running_helpers) == 0,
0476                     RUNNING_HELPERS_TIMEOUT);
0477     if (retval)
0478         return 0;
0479 
0480     __usermodehelper_set_disable_depth(UMH_ENABLED);
0481     return -EAGAIN;
0482 }
0483 
0484 static void helper_lock(void)
0485 {
0486     atomic_inc(&running_helpers);
0487     smp_mb__after_atomic();
0488 }
0489 
0490 static void helper_unlock(void)
0491 {
0492     if (atomic_dec_and_test(&running_helpers))
0493         wake_up(&running_helpers_waitq);
0494 }
0495 
0496 /**
0497  * call_usermodehelper_setup - prepare to call a usermode helper
0498  * @path: path to usermode executable
0499  * @argv: arg vector for process
0500  * @envp: environment for process
0501  * @gfp_mask: gfp mask for memory allocation
0502  * @cleanup: a cleanup function
0503  * @init: an init function
0504  * @data: arbitrary context sensitive data
0505  *
0506  * Returns either %NULL on allocation failure, or a subprocess_info
0507  * structure.  This should be passed to call_usermodehelper_exec to
0508  * exec the process and free the structure.
0509  *
0510  * The init function is used to customize the helper process prior to
0511  * exec.  A non-zero return code causes the process to error out, exit,
0512  * and return the failure to the calling process
0513  *
0514  * The cleanup function is just before ethe subprocess_info is about to
0515  * be freed.  This can be used for freeing the argv and envp.  The
0516  * Function must be runnable in either a process context or the
0517  * context in which call_usermodehelper_exec is called.
0518  */
0519 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
0520         char **envp, gfp_t gfp_mask,
0521         int (*init)(struct subprocess_info *info, struct cred *new),
0522         void (*cleanup)(struct subprocess_info *info),
0523         void *data)
0524 {
0525     struct subprocess_info *sub_info;
0526     sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
0527     if (!sub_info)
0528         goto out;
0529 
0530     INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
0531     sub_info->path = path;
0532     sub_info->argv = argv;
0533     sub_info->envp = envp;
0534 
0535     sub_info->cleanup = cleanup;
0536     sub_info->init = init;
0537     sub_info->data = data;
0538   out:
0539     return sub_info;
0540 }
0541 EXPORT_SYMBOL(call_usermodehelper_setup);
0542 
0543 /**
0544  * call_usermodehelper_exec - start a usermode application
0545  * @sub_info: information about the subprocessa
0546  * @wait: wait for the application to finish and return status.
0547  *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
0548  *        when the program couldn't be exec'ed. This makes it safe to call
0549  *        from interrupt context.
0550  *
0551  * Runs a user-space application.  The application is started
0552  * asynchronously if wait is not set, and runs as a child of system workqueues.
0553  * (ie. it runs with full root capabilities and optimized affinity).
0554  */
0555 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
0556 {
0557     DECLARE_COMPLETION_ONSTACK(done);
0558     int retval = 0;
0559 
0560     if (!sub_info->path) {
0561         call_usermodehelper_freeinfo(sub_info);
0562         return -EINVAL;
0563     }
0564     helper_lock();
0565     if (usermodehelper_disabled) {
0566         retval = -EBUSY;
0567         goto out;
0568     }
0569     /*
0570      * Set the completion pointer only if there is a waiter.
0571      * This makes it possible to use umh_complete to free
0572      * the data structure in case of UMH_NO_WAIT.
0573      */
0574     sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
0575     sub_info->wait = wait;
0576 
0577     queue_work(system_unbound_wq, &sub_info->work);
0578     if (wait == UMH_NO_WAIT)    /* task has freed sub_info */
0579         goto unlock;
0580 
0581     if (wait & UMH_KILLABLE) {
0582         retval = wait_for_completion_killable(&done);
0583         if (!retval)
0584             goto wait_done;
0585 
0586         /* umh_complete() will see NULL and free sub_info */
0587         if (xchg(&sub_info->complete, NULL))
0588             goto unlock;
0589         /* fallthrough, umh_complete() was already called */
0590     }
0591 
0592     wait_for_completion(&done);
0593 wait_done:
0594     retval = sub_info->retval;
0595 out:
0596     call_usermodehelper_freeinfo(sub_info);
0597 unlock:
0598     helper_unlock();
0599     return retval;
0600 }
0601 EXPORT_SYMBOL(call_usermodehelper_exec);
0602 
0603 /**
0604  * call_usermodehelper() - prepare and start a usermode application
0605  * @path: path to usermode executable
0606  * @argv: arg vector for process
0607  * @envp: environment for process
0608  * @wait: wait for the application to finish and return status.
0609  *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
0610  *        when the program couldn't be exec'ed. This makes it safe to call
0611  *        from interrupt context.
0612  *
0613  * This function is the equivalent to use call_usermodehelper_setup() and
0614  * call_usermodehelper_exec().
0615  */
0616 int call_usermodehelper(char *path, char **argv, char **envp, int wait)
0617 {
0618     struct subprocess_info *info;
0619     gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
0620 
0621     info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
0622                      NULL, NULL, NULL);
0623     if (info == NULL)
0624         return -ENOMEM;
0625 
0626     return call_usermodehelper_exec(info, wait);
0627 }
0628 EXPORT_SYMBOL(call_usermodehelper);
0629 
0630 static int proc_cap_handler(struct ctl_table *table, int write,
0631              void __user *buffer, size_t *lenp, loff_t *ppos)
0632 {
0633     struct ctl_table t;
0634     unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
0635     kernel_cap_t new_cap;
0636     int err, i;
0637 
0638     if (write && (!capable(CAP_SETPCAP) ||
0639               !capable(CAP_SYS_MODULE)))
0640         return -EPERM;
0641 
0642     /*
0643      * convert from the global kernel_cap_t to the ulong array to print to
0644      * userspace if this is a read.
0645      */
0646     spin_lock(&umh_sysctl_lock);
0647     for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
0648         if (table->data == CAP_BSET)
0649             cap_array[i] = usermodehelper_bset.cap[i];
0650         else if (table->data == CAP_PI)
0651             cap_array[i] = usermodehelper_inheritable.cap[i];
0652         else
0653             BUG();
0654     }
0655     spin_unlock(&umh_sysctl_lock);
0656 
0657     t = *table;
0658     t.data = &cap_array;
0659 
0660     /*
0661      * actually read or write and array of ulongs from userspace.  Remember
0662      * these are least significant 32 bits first
0663      */
0664     err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
0665     if (err < 0)
0666         return err;
0667 
0668     /*
0669      * convert from the sysctl array of ulongs to the kernel_cap_t
0670      * internal representation
0671      */
0672     for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
0673         new_cap.cap[i] = cap_array[i];
0674 
0675     /*
0676      * Drop everything not in the new_cap (but don't add things)
0677      */
0678     spin_lock(&umh_sysctl_lock);
0679     if (write) {
0680         if (table->data == CAP_BSET)
0681             usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
0682         if (table->data == CAP_PI)
0683             usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
0684     }
0685     spin_unlock(&umh_sysctl_lock);
0686 
0687     return 0;
0688 }
0689 
0690 struct ctl_table usermodehelper_table[] = {
0691     {
0692         .procname   = "bset",
0693         .data       = CAP_BSET,
0694         .maxlen     = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
0695         .mode       = 0600,
0696         .proc_handler   = proc_cap_handler,
0697     },
0698     {
0699         .procname   = "inheritable",
0700         .data       = CAP_PI,
0701         .maxlen     = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
0702         .mode       = 0600,
0703         .proc_handler   = proc_cap_handler,
0704     },
0705     { }
0706 };