Back to home page

LXR

 
 

    


0001 /*
0002  * The "user cache".
0003  *
0004  * (C) Copyright 1991-2000 Linus Torvalds
0005  *
0006  * We have a per-user structure to keep track of how many
0007  * processes, files etc the user has claimed, in order to be
0008  * able to have per-user limits for system resources. 
0009  */
0010 
0011 #include <linux/init.h>
0012 #include <linux/sched.h>
0013 #include <linux/slab.h>
0014 #include <linux/bitops.h>
0015 #include <linux/key.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/export.h>
0018 #include <linux/user_namespace.h>
0019 #include <linux/proc_ns.h>
0020 
0021 /*
0022  * userns count is 1 for root user, 1 for init_uts_ns,
0023  * and 1 for... ?
0024  */
0025 struct user_namespace init_user_ns = {
0026     .uid_map = {
0027         .nr_extents = 1,
0028         .extent[0] = {
0029             .first = 0,
0030             .lower_first = 0,
0031             .count = 4294967295U,
0032         },
0033     },
0034     .gid_map = {
0035         .nr_extents = 1,
0036         .extent[0] = {
0037             .first = 0,
0038             .lower_first = 0,
0039             .count = 4294967295U,
0040         },
0041     },
0042     .projid_map = {
0043         .nr_extents = 1,
0044         .extent[0] = {
0045             .first = 0,
0046             .lower_first = 0,
0047             .count = 4294967295U,
0048         },
0049     },
0050     .count = ATOMIC_INIT(3),
0051     .owner = GLOBAL_ROOT_UID,
0052     .group = GLOBAL_ROOT_GID,
0053     .ns.inum = PROC_USER_INIT_INO,
0054 #ifdef CONFIG_USER_NS
0055     .ns.ops = &userns_operations,
0056 #endif
0057     .flags = USERNS_INIT_FLAGS,
0058 #ifdef CONFIG_PERSISTENT_KEYRINGS
0059     .persistent_keyring_register_sem =
0060     __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
0061 #endif
0062 };
0063 EXPORT_SYMBOL_GPL(init_user_ns);
0064 
0065 /*
0066  * UID task count cache, to get fast user lookup in "alloc_uid"
0067  * when changing user ID's (ie setuid() and friends).
0068  */
0069 
0070 #define UIDHASH_BITS    (CONFIG_BASE_SMALL ? 3 : 7)
0071 #define UIDHASH_SZ  (1 << UIDHASH_BITS)
0072 #define UIDHASH_MASK        (UIDHASH_SZ - 1)
0073 #define __uidhashfn(uid)    (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
0074 #define uidhashentry(uid)   (uidhash_table + __uidhashfn((__kuid_val(uid))))
0075 
0076 static struct kmem_cache *uid_cachep;
0077 struct hlist_head uidhash_table[UIDHASH_SZ];
0078 
0079 /*
0080  * The uidhash_lock is mostly taken from process context, but it is
0081  * occasionally also taken from softirq/tasklet context, when
0082  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
0083  * But free_uid() is also called with local interrupts disabled, and running
0084  * local_bh_enable() with local interrupts disabled is an error - we'll run
0085  * softirq callbacks, and they can unconditionally enable interrupts, and
0086  * the caller of free_uid() didn't expect that..
0087  */
0088 static DEFINE_SPINLOCK(uidhash_lock);
0089 
0090 /* root_user.__count is 1, for init task cred */
0091 struct user_struct root_user = {
0092     .__count    = ATOMIC_INIT(1),
0093     .processes  = ATOMIC_INIT(1),
0094     .sigpending = ATOMIC_INIT(0),
0095     .locked_shm     = 0,
0096     .uid        = GLOBAL_ROOT_UID,
0097 };
0098 
0099 /*
0100  * These routines must be called with the uidhash spinlock held!
0101  */
0102 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
0103 {
0104     hlist_add_head(&up->uidhash_node, hashent);
0105 }
0106 
0107 static void uid_hash_remove(struct user_struct *up)
0108 {
0109     hlist_del_init(&up->uidhash_node);
0110 }
0111 
0112 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
0113 {
0114     struct user_struct *user;
0115 
0116     hlist_for_each_entry(user, hashent, uidhash_node) {
0117         if (uid_eq(user->uid, uid)) {
0118             atomic_inc(&user->__count);
0119             return user;
0120         }
0121     }
0122 
0123     return NULL;
0124 }
0125 
0126 /* IRQs are disabled and uidhash_lock is held upon function entry.
0127  * IRQ state (as stored in flags) is restored and uidhash_lock released
0128  * upon function exit.
0129  */
0130 static void free_user(struct user_struct *up, unsigned long flags)
0131     __releases(&uidhash_lock)
0132 {
0133     uid_hash_remove(up);
0134     spin_unlock_irqrestore(&uidhash_lock, flags);
0135     key_put(up->uid_keyring);
0136     key_put(up->session_keyring);
0137     kmem_cache_free(uid_cachep, up);
0138 }
0139 
0140 /*
0141  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
0142  * caller must undo that ref with free_uid().
0143  *
0144  * If the user_struct could not be found, return NULL.
0145  */
0146 struct user_struct *find_user(kuid_t uid)
0147 {
0148     struct user_struct *ret;
0149     unsigned long flags;
0150 
0151     spin_lock_irqsave(&uidhash_lock, flags);
0152     ret = uid_hash_find(uid, uidhashentry(uid));
0153     spin_unlock_irqrestore(&uidhash_lock, flags);
0154     return ret;
0155 }
0156 
0157 void free_uid(struct user_struct *up)
0158 {
0159     unsigned long flags;
0160 
0161     if (!up)
0162         return;
0163 
0164     local_irq_save(flags);
0165     if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
0166         free_user(up, flags);
0167     else
0168         local_irq_restore(flags);
0169 }
0170 
0171 struct user_struct *alloc_uid(kuid_t uid)
0172 {
0173     struct hlist_head *hashent = uidhashentry(uid);
0174     struct user_struct *up, *new;
0175 
0176     spin_lock_irq(&uidhash_lock);
0177     up = uid_hash_find(uid, hashent);
0178     spin_unlock_irq(&uidhash_lock);
0179 
0180     if (!up) {
0181         new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
0182         if (!new)
0183             goto out_unlock;
0184 
0185         new->uid = uid;
0186         atomic_set(&new->__count, 1);
0187 
0188         /*
0189          * Before adding this, check whether we raced
0190          * on adding the same user already..
0191          */
0192         spin_lock_irq(&uidhash_lock);
0193         up = uid_hash_find(uid, hashent);
0194         if (up) {
0195             key_put(new->uid_keyring);
0196             key_put(new->session_keyring);
0197             kmem_cache_free(uid_cachep, new);
0198         } else {
0199             uid_hash_insert(new, hashent);
0200             up = new;
0201         }
0202         spin_unlock_irq(&uidhash_lock);
0203     }
0204 
0205     return up;
0206 
0207 out_unlock:
0208     return NULL;
0209 }
0210 
0211 static int __init uid_cache_init(void)
0212 {
0213     int n;
0214 
0215     uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
0216             0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
0217 
0218     for(n = 0; n < UIDHASH_SZ; ++n)
0219         INIT_HLIST_HEAD(uidhash_table + n);
0220 
0221     /* Insert the root user immediately (init already runs as root) */
0222     spin_lock_irq(&uidhash_lock);
0223     uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
0224     spin_unlock_irq(&uidhash_lock);
0225 
0226     return 0;
0227 }
0228 subsys_initcall(uid_cache_init);