Back to home page

LXR

 
 

    


0001 /*
0002  *  linux/fs/fcntl.c
0003  *
0004  *  Copyright (C) 1991, 1992  Linus Torvalds
0005  */
0006 
0007 #include <linux/syscalls.h>
0008 #include <linux/init.h>
0009 #include <linux/mm.h>
0010 #include <linux/fs.h>
0011 #include <linux/file.h>
0012 #include <linux/fdtable.h>
0013 #include <linux/capability.h>
0014 #include <linux/dnotify.h>
0015 #include <linux/slab.h>
0016 #include <linux/module.h>
0017 #include <linux/pipe_fs_i.h>
0018 #include <linux/security.h>
0019 #include <linux/ptrace.h>
0020 #include <linux/signal.h>
0021 #include <linux/rcupdate.h>
0022 #include <linux/pid_namespace.h>
0023 #include <linux/user_namespace.h>
0024 #include <linux/shmem_fs.h>
0025 
0026 #include <asm/poll.h>
0027 #include <asm/siginfo.h>
0028 #include <linux/uaccess.h>
0029 
0030 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
0031 
0032 static int setfl(int fd, struct file * filp, unsigned long arg)
0033 {
0034     struct inode * inode = file_inode(filp);
0035     int error = 0;
0036 
0037     /*
0038      * O_APPEND cannot be cleared if the file is marked as append-only
0039      * and the file is open for write.
0040      */
0041     if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
0042         return -EPERM;
0043 
0044     /* O_NOATIME can only be set by the owner or superuser */
0045     if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
0046         if (!inode_owner_or_capable(inode))
0047             return -EPERM;
0048 
0049     /* required for strict SunOS emulation */
0050     if (O_NONBLOCK != O_NDELAY)
0051            if (arg & O_NDELAY)
0052            arg |= O_NONBLOCK;
0053 
0054     /* Pipe packetized mode is controlled by O_DIRECT flag */
0055     if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT)) {
0056         if (!filp->f_mapping || !filp->f_mapping->a_ops ||
0057             !filp->f_mapping->a_ops->direct_IO)
0058                 return -EINVAL;
0059     }
0060 
0061     if (filp->f_op->check_flags)
0062         error = filp->f_op->check_flags(arg);
0063     if (error)
0064         return error;
0065 
0066     /*
0067      * ->fasync() is responsible for setting the FASYNC bit.
0068      */
0069     if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
0070         error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
0071         if (error < 0)
0072             goto out;
0073         if (error > 0)
0074             error = 0;
0075     }
0076     spin_lock(&filp->f_lock);
0077     filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
0078     spin_unlock(&filp->f_lock);
0079 
0080  out:
0081     return error;
0082 }
0083 
0084 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
0085                      int force)
0086 {
0087     write_lock_irq(&filp->f_owner.lock);
0088     if (force || !filp->f_owner.pid) {
0089         put_pid(filp->f_owner.pid);
0090         filp->f_owner.pid = get_pid(pid);
0091         filp->f_owner.pid_type = type;
0092 
0093         if (pid) {
0094             const struct cred *cred = current_cred();
0095             filp->f_owner.uid = cred->uid;
0096             filp->f_owner.euid = cred->euid;
0097         }
0098     }
0099     write_unlock_irq(&filp->f_owner.lock);
0100 }
0101 
0102 void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
0103         int force)
0104 {
0105     security_file_set_fowner(filp);
0106     f_modown(filp, pid, type, force);
0107 }
0108 EXPORT_SYMBOL(__f_setown);
0109 
0110 void f_setown(struct file *filp, unsigned long arg, int force)
0111 {
0112     enum pid_type type;
0113     struct pid *pid;
0114     int who = arg;
0115     type = PIDTYPE_PID;
0116     if (who < 0) {
0117         type = PIDTYPE_PGID;
0118         who = -who;
0119     }
0120     rcu_read_lock();
0121     pid = find_vpid(who);
0122     __f_setown(filp, pid, type, force);
0123     rcu_read_unlock();
0124 }
0125 EXPORT_SYMBOL(f_setown);
0126 
0127 void f_delown(struct file *filp)
0128 {
0129     f_modown(filp, NULL, PIDTYPE_PID, 1);
0130 }
0131 
0132 pid_t f_getown(struct file *filp)
0133 {
0134     pid_t pid;
0135     read_lock(&filp->f_owner.lock);
0136     pid = pid_vnr(filp->f_owner.pid);
0137     if (filp->f_owner.pid_type == PIDTYPE_PGID)
0138         pid = -pid;
0139     read_unlock(&filp->f_owner.lock);
0140     return pid;
0141 }
0142 
0143 static int f_setown_ex(struct file *filp, unsigned long arg)
0144 {
0145     struct f_owner_ex __user *owner_p = (void __user *)arg;
0146     struct f_owner_ex owner;
0147     struct pid *pid;
0148     int type;
0149     int ret;
0150 
0151     ret = copy_from_user(&owner, owner_p, sizeof(owner));
0152     if (ret)
0153         return -EFAULT;
0154 
0155     switch (owner.type) {
0156     case F_OWNER_TID:
0157         type = PIDTYPE_MAX;
0158         break;
0159 
0160     case F_OWNER_PID:
0161         type = PIDTYPE_PID;
0162         break;
0163 
0164     case F_OWNER_PGRP:
0165         type = PIDTYPE_PGID;
0166         break;
0167 
0168     default:
0169         return -EINVAL;
0170     }
0171 
0172     rcu_read_lock();
0173     pid = find_vpid(owner.pid);
0174     if (owner.pid && !pid)
0175         ret = -ESRCH;
0176     else
0177          __f_setown(filp, pid, type, 1);
0178     rcu_read_unlock();
0179 
0180     return ret;
0181 }
0182 
0183 static int f_getown_ex(struct file *filp, unsigned long arg)
0184 {
0185     struct f_owner_ex __user *owner_p = (void __user *)arg;
0186     struct f_owner_ex owner;
0187     int ret = 0;
0188 
0189     read_lock(&filp->f_owner.lock);
0190     owner.pid = pid_vnr(filp->f_owner.pid);
0191     switch (filp->f_owner.pid_type) {
0192     case PIDTYPE_MAX:
0193         owner.type = F_OWNER_TID;
0194         break;
0195 
0196     case PIDTYPE_PID:
0197         owner.type = F_OWNER_PID;
0198         break;
0199 
0200     case PIDTYPE_PGID:
0201         owner.type = F_OWNER_PGRP;
0202         break;
0203 
0204     default:
0205         WARN_ON(1);
0206         ret = -EINVAL;
0207         break;
0208     }
0209     read_unlock(&filp->f_owner.lock);
0210 
0211     if (!ret) {
0212         ret = copy_to_user(owner_p, &owner, sizeof(owner));
0213         if (ret)
0214             ret = -EFAULT;
0215     }
0216     return ret;
0217 }
0218 
0219 #ifdef CONFIG_CHECKPOINT_RESTORE
0220 static int f_getowner_uids(struct file *filp, unsigned long arg)
0221 {
0222     struct user_namespace *user_ns = current_user_ns();
0223     uid_t __user *dst = (void __user *)arg;
0224     uid_t src[2];
0225     int err;
0226 
0227     read_lock(&filp->f_owner.lock);
0228     src[0] = from_kuid(user_ns, filp->f_owner.uid);
0229     src[1] = from_kuid(user_ns, filp->f_owner.euid);
0230     read_unlock(&filp->f_owner.lock);
0231 
0232     err  = put_user(src[0], &dst[0]);
0233     err |= put_user(src[1], &dst[1]);
0234 
0235     return err;
0236 }
0237 #else
0238 static int f_getowner_uids(struct file *filp, unsigned long arg)
0239 {
0240     return -EINVAL;
0241 }
0242 #endif
0243 
0244 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
0245         struct file *filp)
0246 {
0247     long err = -EINVAL;
0248 
0249     switch (cmd) {
0250     case F_DUPFD:
0251         err = f_dupfd(arg, filp, 0);
0252         break;
0253     case F_DUPFD_CLOEXEC:
0254         err = f_dupfd(arg, filp, O_CLOEXEC);
0255         break;
0256     case F_GETFD:
0257         err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
0258         break;
0259     case F_SETFD:
0260         err = 0;
0261         set_close_on_exec(fd, arg & FD_CLOEXEC);
0262         break;
0263     case F_GETFL:
0264         err = filp->f_flags;
0265         break;
0266     case F_SETFL:
0267         err = setfl(fd, filp, arg);
0268         break;
0269 #if BITS_PER_LONG != 32
0270     /* 32-bit arches must use fcntl64() */
0271     case F_OFD_GETLK:
0272 #endif
0273     case F_GETLK:
0274         err = fcntl_getlk(filp, cmd, (struct flock __user *) arg);
0275         break;
0276 #if BITS_PER_LONG != 32
0277     /* 32-bit arches must use fcntl64() */
0278     case F_OFD_SETLK:
0279     case F_OFD_SETLKW:
0280 #endif
0281         /* Fallthrough */
0282     case F_SETLK:
0283     case F_SETLKW:
0284         err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
0285         break;
0286     case F_GETOWN:
0287         /*
0288          * XXX If f_owner is a process group, the
0289          * negative return value will get converted
0290          * into an error.  Oops.  If we keep the
0291          * current syscall conventions, the only way
0292          * to fix this will be in libc.
0293          */
0294         err = f_getown(filp);
0295         force_successful_syscall_return();
0296         break;
0297     case F_SETOWN:
0298         f_setown(filp, arg, 1);
0299         err = 0;
0300         break;
0301     case F_GETOWN_EX:
0302         err = f_getown_ex(filp, arg);
0303         break;
0304     case F_SETOWN_EX:
0305         err = f_setown_ex(filp, arg);
0306         break;
0307     case F_GETOWNER_UIDS:
0308         err = f_getowner_uids(filp, arg);
0309         break;
0310     case F_GETSIG:
0311         err = filp->f_owner.signum;
0312         break;
0313     case F_SETSIG:
0314         /* arg == 0 restores default behaviour. */
0315         if (!valid_signal(arg)) {
0316             break;
0317         }
0318         err = 0;
0319         filp->f_owner.signum = arg;
0320         break;
0321     case F_GETLEASE:
0322         err = fcntl_getlease(filp);
0323         break;
0324     case F_SETLEASE:
0325         err = fcntl_setlease(fd, filp, arg);
0326         break;
0327     case F_NOTIFY:
0328         err = fcntl_dirnotify(fd, filp, arg);
0329         break;
0330     case F_SETPIPE_SZ:
0331     case F_GETPIPE_SZ:
0332         err = pipe_fcntl(filp, cmd, arg);
0333         break;
0334     case F_ADD_SEALS:
0335     case F_GET_SEALS:
0336         err = shmem_fcntl(filp, cmd, arg);
0337         break;
0338     default:
0339         break;
0340     }
0341     return err;
0342 }
0343 
0344 static int check_fcntl_cmd(unsigned cmd)
0345 {
0346     switch (cmd) {
0347     case F_DUPFD:
0348     case F_DUPFD_CLOEXEC:
0349     case F_GETFD:
0350     case F_SETFD:
0351     case F_GETFL:
0352         return 1;
0353     }
0354     return 0;
0355 }
0356 
0357 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
0358 {   
0359     struct fd f = fdget_raw(fd);
0360     long err = -EBADF;
0361 
0362     if (!f.file)
0363         goto out;
0364 
0365     if (unlikely(f.file->f_mode & FMODE_PATH)) {
0366         if (!check_fcntl_cmd(cmd))
0367             goto out1;
0368     }
0369 
0370     err = security_file_fcntl(f.file, cmd, arg);
0371     if (!err)
0372         err = do_fcntl(fd, cmd, arg, f.file);
0373 
0374 out1:
0375     fdput(f);
0376 out:
0377     return err;
0378 }
0379 
0380 #if BITS_PER_LONG == 32
0381 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
0382         unsigned long, arg)
0383 {   
0384     struct fd f = fdget_raw(fd);
0385     long err = -EBADF;
0386 
0387     if (!f.file)
0388         goto out;
0389 
0390     if (unlikely(f.file->f_mode & FMODE_PATH)) {
0391         if (!check_fcntl_cmd(cmd))
0392             goto out1;
0393     }
0394 
0395     err = security_file_fcntl(f.file, cmd, arg);
0396     if (err)
0397         goto out1;
0398     
0399     switch (cmd) {
0400     case F_GETLK64:
0401     case F_OFD_GETLK:
0402         err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg);
0403         break;
0404     case F_SETLK64:
0405     case F_SETLKW64:
0406     case F_OFD_SETLK:
0407     case F_OFD_SETLKW:
0408         err = fcntl_setlk64(fd, f.file, cmd,
0409                 (struct flock64 __user *) arg);
0410         break;
0411     default:
0412         err = do_fcntl(fd, cmd, arg, f.file);
0413         break;
0414     }
0415 out1:
0416     fdput(f);
0417 out:
0418     return err;
0419 }
0420 #endif
0421 
0422 /* Table to convert sigio signal codes into poll band bitmaps */
0423 
0424 static const long band_table[NSIGPOLL] = {
0425     POLLIN | POLLRDNORM,            /* POLL_IN */
0426     POLLOUT | POLLWRNORM | POLLWRBAND,  /* POLL_OUT */
0427     POLLIN | POLLRDNORM | POLLMSG,      /* POLL_MSG */
0428     POLLERR,                /* POLL_ERR */
0429     POLLPRI | POLLRDBAND,           /* POLL_PRI */
0430     POLLHUP | POLLERR           /* POLL_HUP */
0431 };
0432 
0433 static inline int sigio_perm(struct task_struct *p,
0434                              struct fown_struct *fown, int sig)
0435 {
0436     const struct cred *cred;
0437     int ret;
0438 
0439     rcu_read_lock();
0440     cred = __task_cred(p);
0441     ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
0442         uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
0443         uid_eq(fown->uid,  cred->suid) || uid_eq(fown->uid,  cred->uid)) &&
0444            !security_file_send_sigiotask(p, fown, sig));
0445     rcu_read_unlock();
0446     return ret;
0447 }
0448 
0449 static void send_sigio_to_task(struct task_struct *p,
0450                    struct fown_struct *fown,
0451                    int fd, int reason, int group)
0452 {
0453     /*
0454      * F_SETSIG can change ->signum lockless in parallel, make
0455      * sure we read it once and use the same value throughout.
0456      */
0457     int signum = ACCESS_ONCE(fown->signum);
0458 
0459     if (!sigio_perm(p, fown, signum))
0460         return;
0461 
0462     switch (signum) {
0463         siginfo_t si;
0464         default:
0465             /* Queue a rt signal with the appropriate fd as its
0466                value.  We use SI_SIGIO as the source, not 
0467                SI_KERNEL, since kernel signals always get 
0468                delivered even if we can't queue.  Failure to
0469                queue in this case _should_ be reported; we fall
0470                back to SIGIO in that case. --sct */
0471             si.si_signo = signum;
0472             si.si_errno = 0;
0473                 si.si_code  = reason;
0474             /* Make sure we are called with one of the POLL_*
0475                reasons, otherwise we could leak kernel stack into
0476                userspace.  */
0477             BUG_ON((reason & __SI_MASK) != __SI_POLL);
0478             if (reason - POLL_IN >= NSIGPOLL)
0479                 si.si_band  = ~0L;
0480             else
0481                 si.si_band = band_table[reason - POLL_IN];
0482             si.si_fd    = fd;
0483             if (!do_send_sig_info(signum, &si, p, group))
0484                 break;
0485         /* fall-through: fall back on the old plain SIGIO signal */
0486         case 0:
0487             do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
0488     }
0489 }
0490 
0491 void send_sigio(struct fown_struct *fown, int fd, int band)
0492 {
0493     struct task_struct *p;
0494     enum pid_type type;
0495     struct pid *pid;
0496     int group = 1;
0497     
0498     read_lock(&fown->lock);
0499 
0500     type = fown->pid_type;
0501     if (type == PIDTYPE_MAX) {
0502         group = 0;
0503         type = PIDTYPE_PID;
0504     }
0505 
0506     pid = fown->pid;
0507     if (!pid)
0508         goto out_unlock_fown;
0509     
0510     read_lock(&tasklist_lock);
0511     do_each_pid_task(pid, type, p) {
0512         send_sigio_to_task(p, fown, fd, band, group);
0513     } while_each_pid_task(pid, type, p);
0514     read_unlock(&tasklist_lock);
0515  out_unlock_fown:
0516     read_unlock(&fown->lock);
0517 }
0518 
0519 static void send_sigurg_to_task(struct task_struct *p,
0520                 struct fown_struct *fown, int group)
0521 {
0522     if (sigio_perm(p, fown, SIGURG))
0523         do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
0524 }
0525 
0526 int send_sigurg(struct fown_struct *fown)
0527 {
0528     struct task_struct *p;
0529     enum pid_type type;
0530     struct pid *pid;
0531     int group = 1;
0532     int ret = 0;
0533     
0534     read_lock(&fown->lock);
0535 
0536     type = fown->pid_type;
0537     if (type == PIDTYPE_MAX) {
0538         group = 0;
0539         type = PIDTYPE_PID;
0540     }
0541 
0542     pid = fown->pid;
0543     if (!pid)
0544         goto out_unlock_fown;
0545 
0546     ret = 1;
0547     
0548     read_lock(&tasklist_lock);
0549     do_each_pid_task(pid, type, p) {
0550         send_sigurg_to_task(p, fown, group);
0551     } while_each_pid_task(pid, type, p);
0552     read_unlock(&tasklist_lock);
0553  out_unlock_fown:
0554     read_unlock(&fown->lock);
0555     return ret;
0556 }
0557 
0558 static DEFINE_SPINLOCK(fasync_lock);
0559 static struct kmem_cache *fasync_cache __read_mostly;
0560 
0561 static void fasync_free_rcu(struct rcu_head *head)
0562 {
0563     kmem_cache_free(fasync_cache,
0564             container_of(head, struct fasync_struct, fa_rcu));
0565 }
0566 
0567 /*
0568  * Remove a fasync entry. If successfully removed, return
0569  * positive and clear the FASYNC flag. If no entry exists,
0570  * do nothing and return 0.
0571  *
0572  * NOTE! It is very important that the FASYNC flag always
0573  * match the state "is the filp on a fasync list".
0574  *
0575  */
0576 int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
0577 {
0578     struct fasync_struct *fa, **fp;
0579     int result = 0;
0580 
0581     spin_lock(&filp->f_lock);
0582     spin_lock(&fasync_lock);
0583     for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
0584         if (fa->fa_file != filp)
0585             continue;
0586 
0587         spin_lock_irq(&fa->fa_lock);
0588         fa->fa_file = NULL;
0589         spin_unlock_irq(&fa->fa_lock);
0590 
0591         *fp = fa->fa_next;
0592         call_rcu(&fa->fa_rcu, fasync_free_rcu);
0593         filp->f_flags &= ~FASYNC;
0594         result = 1;
0595         break;
0596     }
0597     spin_unlock(&fasync_lock);
0598     spin_unlock(&filp->f_lock);
0599     return result;
0600 }
0601 
0602 struct fasync_struct *fasync_alloc(void)
0603 {
0604     return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
0605 }
0606 
0607 /*
0608  * NOTE! This can be used only for unused fasync entries:
0609  * entries that actually got inserted on the fasync list
0610  * need to be released by rcu - see fasync_remove_entry.
0611  */
0612 void fasync_free(struct fasync_struct *new)
0613 {
0614     kmem_cache_free(fasync_cache, new);
0615 }
0616 
0617 /*
0618  * Insert a new entry into the fasync list.  Return the pointer to the
0619  * old one if we didn't use the new one.
0620  *
0621  * NOTE! It is very important that the FASYNC flag always
0622  * match the state "is the filp on a fasync list".
0623  */
0624 struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
0625 {
0626         struct fasync_struct *fa, **fp;
0627 
0628     spin_lock(&filp->f_lock);
0629     spin_lock(&fasync_lock);
0630     for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
0631         if (fa->fa_file != filp)
0632             continue;
0633 
0634         spin_lock_irq(&fa->fa_lock);
0635         fa->fa_fd = fd;
0636         spin_unlock_irq(&fa->fa_lock);
0637         goto out;
0638     }
0639 
0640     spin_lock_init(&new->fa_lock);
0641     new->magic = FASYNC_MAGIC;
0642     new->fa_file = filp;
0643     new->fa_fd = fd;
0644     new->fa_next = *fapp;
0645     rcu_assign_pointer(*fapp, new);
0646     filp->f_flags |= FASYNC;
0647 
0648 out:
0649     spin_unlock(&fasync_lock);
0650     spin_unlock(&filp->f_lock);
0651     return fa;
0652 }
0653 
0654 /*
0655  * Add a fasync entry. Return negative on error, positive if
0656  * added, and zero if did nothing but change an existing one.
0657  */
0658 static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
0659 {
0660     struct fasync_struct *new;
0661 
0662     new = fasync_alloc();
0663     if (!new)
0664         return -ENOMEM;
0665 
0666     /*
0667      * fasync_insert_entry() returns the old (update) entry if
0668      * it existed.
0669      *
0670      * So free the (unused) new entry and return 0 to let the
0671      * caller know that we didn't add any new fasync entries.
0672      */
0673     if (fasync_insert_entry(fd, filp, fapp, new)) {
0674         fasync_free(new);
0675         return 0;
0676     }
0677 
0678     return 1;
0679 }
0680 
0681 /*
0682  * fasync_helper() is used by almost all character device drivers
0683  * to set up the fasync queue, and for regular files by the file
0684  * lease code. It returns negative on error, 0 if it did no changes
0685  * and positive if it added/deleted the entry.
0686  */
0687 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
0688 {
0689     if (!on)
0690         return fasync_remove_entry(filp, fapp);
0691     return fasync_add_entry(fd, filp, fapp);
0692 }
0693 
0694 EXPORT_SYMBOL(fasync_helper);
0695 
0696 /*
0697  * rcu_read_lock() is held
0698  */
0699 static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
0700 {
0701     while (fa) {
0702         struct fown_struct *fown;
0703         unsigned long flags;
0704 
0705         if (fa->magic != FASYNC_MAGIC) {
0706             printk(KERN_ERR "kill_fasync: bad magic number in "
0707                    "fasync_struct!\n");
0708             return;
0709         }
0710         spin_lock_irqsave(&fa->fa_lock, flags);
0711         if (fa->fa_file) {
0712             fown = &fa->fa_file->f_owner;
0713             /* Don't send SIGURG to processes which have not set a
0714                queued signum: SIGURG has its own default signalling
0715                mechanism. */
0716             if (!(sig == SIGURG && fown->signum == 0))
0717                 send_sigio(fown, fa->fa_fd, band);
0718         }
0719         spin_unlock_irqrestore(&fa->fa_lock, flags);
0720         fa = rcu_dereference(fa->fa_next);
0721     }
0722 }
0723 
0724 void kill_fasync(struct fasync_struct **fp, int sig, int band)
0725 {
0726     /* First a quick test without locking: usually
0727      * the list is empty.
0728      */
0729     if (*fp) {
0730         rcu_read_lock();
0731         kill_fasync_rcu(rcu_dereference(*fp), sig, band);
0732         rcu_read_unlock();
0733     }
0734 }
0735 EXPORT_SYMBOL(kill_fasync);
0736 
0737 static int __init fcntl_init(void)
0738 {
0739     /*
0740      * Please add new bits here to ensure allocation uniqueness.
0741      * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
0742      * is defined as O_NONBLOCK on some platforms and not on others.
0743      */
0744     BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
0745         O_RDONLY    | O_WRONLY  | O_RDWR    |
0746         O_CREAT     | O_EXCL    | O_NOCTTY  |
0747         O_TRUNC     | O_APPEND  | /* O_NONBLOCK | */
0748         __O_SYNC    | O_DSYNC   | FASYNC    |
0749         O_DIRECT    | O_LARGEFILE   | O_DIRECTORY   |
0750         O_NOFOLLOW  | O_NOATIME | O_CLOEXEC |
0751         __FMODE_EXEC    | O_PATH    | __O_TMPFILE   |
0752         __FMODE_NONOTIFY
0753         ));
0754 
0755     fasync_cache = kmem_cache_create("fasync_cache",
0756         sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
0757     return 0;
0758 }
0759 
0760 module_init(fcntl_init)