0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 #include <linux/capability.h>
0053 #include <linux/file.h>
0054 #include <linux/fdtable.h>
0055 #include <linux/fs.h>
0056 #include <linux/init.h>
0057 #include <linux/security.h>
0058 #include <linux/slab.h>
0059 #include <linux/syscalls.h>
0060 #include <linux/time.h>
0061 #include <linux/rcupdate.h>
0062 #include <linux/pid_namespace.h>
0063 #include <linux/hashtable.h>
0064 #include <linux/percpu.h>
0065 #include <linux/sysctl.h>
0066
0067 #define CREATE_TRACE_POINTS
0068 #include <trace/events/filelock.h>
0069
0070 #include <linux/uaccess.h>
0071
0072 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
0073 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
0074 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
0075 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
0076 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
0077
0078 static bool lease_breaking(struct file_lock *fl)
0079 {
0080 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
0081 }
0082
0083 static int target_leasetype(struct file_lock *fl)
0084 {
0085 if (fl->fl_flags & FL_UNLOCK_PENDING)
0086 return F_UNLCK;
0087 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
0088 return F_RDLCK;
0089 return fl->fl_type;
0090 }
0091
0092 static int leases_enable = 1;
0093 static int lease_break_time = 45;
0094
0095 #ifdef CONFIG_SYSCTL
0096 static struct ctl_table locks_sysctls[] = {
0097 {
0098 .procname = "leases-enable",
0099 .data = &leases_enable,
0100 .maxlen = sizeof(int),
0101 .mode = 0644,
0102 .proc_handler = proc_dointvec,
0103 },
0104 #ifdef CONFIG_MMU
0105 {
0106 .procname = "lease-break-time",
0107 .data = &lease_break_time,
0108 .maxlen = sizeof(int),
0109 .mode = 0644,
0110 .proc_handler = proc_dointvec,
0111 },
0112 #endif
0113 {}
0114 };
0115
0116 static int __init init_fs_locks_sysctls(void)
0117 {
0118 register_sysctl_init("fs", locks_sysctls);
0119 return 0;
0120 }
0121 early_initcall(init_fs_locks_sysctls);
0122 #endif
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 struct file_lock_list_struct {
0133 spinlock_t lock;
0134 struct hlist_head hlist;
0135 };
0136 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
0137 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 #define BLOCKED_HASH_BITS 7
0152 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 static DEFINE_SPINLOCK(blocked_lock_lock);
0168
0169 static struct kmem_cache *flctx_cache __read_mostly;
0170 static struct kmem_cache *filelock_cache __read_mostly;
0171
0172 static struct file_lock_context *
0173 locks_get_lock_context(struct inode *inode, int type)
0174 {
0175 struct file_lock_context *ctx;
0176
0177
0178 ctx = smp_load_acquire(&inode->i_flctx);
0179 if (likely(ctx) || type == F_UNLCK)
0180 goto out;
0181
0182 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
0183 if (!ctx)
0184 goto out;
0185
0186 spin_lock_init(&ctx->flc_lock);
0187 INIT_LIST_HEAD(&ctx->flc_flock);
0188 INIT_LIST_HEAD(&ctx->flc_posix);
0189 INIT_LIST_HEAD(&ctx->flc_lease);
0190
0191
0192
0193
0194
0195 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
0196 kmem_cache_free(flctx_cache, ctx);
0197 ctx = smp_load_acquire(&inode->i_flctx);
0198 }
0199 out:
0200 trace_locks_get_lock_context(inode, type, ctx);
0201 return ctx;
0202 }
0203
0204 static void
0205 locks_dump_ctx_list(struct list_head *list, char *list_type)
0206 {
0207 struct file_lock *fl;
0208
0209 list_for_each_entry(fl, list, fl_list) {
0210 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
0211 }
0212 }
0213
0214 static void
0215 locks_check_ctx_lists(struct inode *inode)
0216 {
0217 struct file_lock_context *ctx = inode->i_flctx;
0218
0219 if (unlikely(!list_empty(&ctx->flc_flock) ||
0220 !list_empty(&ctx->flc_posix) ||
0221 !list_empty(&ctx->flc_lease))) {
0222 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
0223 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
0224 inode->i_ino);
0225 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
0226 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
0227 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
0228 }
0229 }
0230
0231 static void
0232 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
0233 char *list_type)
0234 {
0235 struct file_lock *fl;
0236 struct inode *inode = locks_inode(filp);
0237
0238 list_for_each_entry(fl, list, fl_list)
0239 if (fl->fl_file == filp)
0240 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
0241 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
0242 list_type, MAJOR(inode->i_sb->s_dev),
0243 MINOR(inode->i_sb->s_dev), inode->i_ino,
0244 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
0245 }
0246
0247 void
0248 locks_free_lock_context(struct inode *inode)
0249 {
0250 struct file_lock_context *ctx = inode->i_flctx;
0251
0252 if (unlikely(ctx)) {
0253 locks_check_ctx_lists(inode);
0254 kmem_cache_free(flctx_cache, ctx);
0255 }
0256 }
0257
0258 static void locks_init_lock_heads(struct file_lock *fl)
0259 {
0260 INIT_HLIST_NODE(&fl->fl_link);
0261 INIT_LIST_HEAD(&fl->fl_list);
0262 INIT_LIST_HEAD(&fl->fl_blocked_requests);
0263 INIT_LIST_HEAD(&fl->fl_blocked_member);
0264 init_waitqueue_head(&fl->fl_wait);
0265 }
0266
0267
0268 struct file_lock *locks_alloc_lock(void)
0269 {
0270 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
0271
0272 if (fl)
0273 locks_init_lock_heads(fl);
0274
0275 return fl;
0276 }
0277 EXPORT_SYMBOL_GPL(locks_alloc_lock);
0278
0279 void locks_release_private(struct file_lock *fl)
0280 {
0281 BUG_ON(waitqueue_active(&fl->fl_wait));
0282 BUG_ON(!list_empty(&fl->fl_list));
0283 BUG_ON(!list_empty(&fl->fl_blocked_requests));
0284 BUG_ON(!list_empty(&fl->fl_blocked_member));
0285 BUG_ON(!hlist_unhashed(&fl->fl_link));
0286
0287 if (fl->fl_ops) {
0288 if (fl->fl_ops->fl_release_private)
0289 fl->fl_ops->fl_release_private(fl);
0290 fl->fl_ops = NULL;
0291 }
0292
0293 if (fl->fl_lmops) {
0294 if (fl->fl_lmops->lm_put_owner) {
0295 fl->fl_lmops->lm_put_owner(fl->fl_owner);
0296 fl->fl_owner = NULL;
0297 }
0298 fl->fl_lmops = NULL;
0299 }
0300 }
0301 EXPORT_SYMBOL_GPL(locks_release_private);
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 bool locks_owner_has_blockers(struct file_lock_context *flctx,
0313 fl_owner_t owner)
0314 {
0315 struct file_lock *fl;
0316
0317 spin_lock(&flctx->flc_lock);
0318 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
0319 if (fl->fl_owner != owner)
0320 continue;
0321 if (!list_empty(&fl->fl_blocked_requests)) {
0322 spin_unlock(&flctx->flc_lock);
0323 return true;
0324 }
0325 }
0326 spin_unlock(&flctx->flc_lock);
0327 return false;
0328 }
0329 EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
0330
0331
0332 void locks_free_lock(struct file_lock *fl)
0333 {
0334 locks_release_private(fl);
0335 kmem_cache_free(filelock_cache, fl);
0336 }
0337 EXPORT_SYMBOL(locks_free_lock);
0338
0339 static void
0340 locks_dispose_list(struct list_head *dispose)
0341 {
0342 struct file_lock *fl;
0343
0344 while (!list_empty(dispose)) {
0345 fl = list_first_entry(dispose, struct file_lock, fl_list);
0346 list_del_init(&fl->fl_list);
0347 locks_free_lock(fl);
0348 }
0349 }
0350
0351 void locks_init_lock(struct file_lock *fl)
0352 {
0353 memset(fl, 0, sizeof(struct file_lock));
0354 locks_init_lock_heads(fl);
0355 }
0356 EXPORT_SYMBOL(locks_init_lock);
0357
0358
0359
0360
0361 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
0362 {
0363 new->fl_owner = fl->fl_owner;
0364 new->fl_pid = fl->fl_pid;
0365 new->fl_file = NULL;
0366 new->fl_flags = fl->fl_flags;
0367 new->fl_type = fl->fl_type;
0368 new->fl_start = fl->fl_start;
0369 new->fl_end = fl->fl_end;
0370 new->fl_lmops = fl->fl_lmops;
0371 new->fl_ops = NULL;
0372
0373 if (fl->fl_lmops) {
0374 if (fl->fl_lmops->lm_get_owner)
0375 fl->fl_lmops->lm_get_owner(fl->fl_owner);
0376 }
0377 }
0378 EXPORT_SYMBOL(locks_copy_conflock);
0379
0380 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
0381 {
0382
0383 WARN_ON_ONCE(new->fl_ops);
0384
0385 locks_copy_conflock(new, fl);
0386
0387 new->fl_file = fl->fl_file;
0388 new->fl_ops = fl->fl_ops;
0389
0390 if (fl->fl_ops) {
0391 if (fl->fl_ops->fl_copy_lock)
0392 fl->fl_ops->fl_copy_lock(new, fl);
0393 }
0394 }
0395 EXPORT_SYMBOL(locks_copy_lock);
0396
0397 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
0398 {
0399 struct file_lock *f;
0400
0401
0402
0403
0404
0405
0406 if (list_empty(&fl->fl_blocked_requests))
0407 return;
0408 spin_lock(&blocked_lock_lock);
0409 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
0410 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
0411 f->fl_blocker = new;
0412 spin_unlock(&blocked_lock_lock);
0413 }
0414
0415 static inline int flock_translate_cmd(int cmd) {
0416 switch (cmd) {
0417 case LOCK_SH:
0418 return F_RDLCK;
0419 case LOCK_EX:
0420 return F_WRLCK;
0421 case LOCK_UN:
0422 return F_UNLCK;
0423 }
0424 return -EINVAL;
0425 }
0426
0427
0428 static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
0429 {
0430 locks_init_lock(fl);
0431
0432 fl->fl_file = filp;
0433 fl->fl_owner = filp;
0434 fl->fl_pid = current->tgid;
0435 fl->fl_flags = FL_FLOCK;
0436 fl->fl_type = type;
0437 fl->fl_end = OFFSET_MAX;
0438 }
0439
0440 static int assign_type(struct file_lock *fl, long type)
0441 {
0442 switch (type) {
0443 case F_RDLCK:
0444 case F_WRLCK:
0445 case F_UNLCK:
0446 fl->fl_type = type;
0447 break;
0448 default:
0449 return -EINVAL;
0450 }
0451 return 0;
0452 }
0453
0454 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
0455 struct flock64 *l)
0456 {
0457 switch (l->l_whence) {
0458 case SEEK_SET:
0459 fl->fl_start = 0;
0460 break;
0461 case SEEK_CUR:
0462 fl->fl_start = filp->f_pos;
0463 break;
0464 case SEEK_END:
0465 fl->fl_start = i_size_read(file_inode(filp));
0466 break;
0467 default:
0468 return -EINVAL;
0469 }
0470 if (l->l_start > OFFSET_MAX - fl->fl_start)
0471 return -EOVERFLOW;
0472 fl->fl_start += l->l_start;
0473 if (fl->fl_start < 0)
0474 return -EINVAL;
0475
0476
0477
0478 if (l->l_len > 0) {
0479 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
0480 return -EOVERFLOW;
0481 fl->fl_end = fl->fl_start + (l->l_len - 1);
0482
0483 } else if (l->l_len < 0) {
0484 if (fl->fl_start + l->l_len < 0)
0485 return -EINVAL;
0486 fl->fl_end = fl->fl_start - 1;
0487 fl->fl_start += l->l_len;
0488 } else
0489 fl->fl_end = OFFSET_MAX;
0490
0491 fl->fl_owner = current->files;
0492 fl->fl_pid = current->tgid;
0493 fl->fl_file = filp;
0494 fl->fl_flags = FL_POSIX;
0495 fl->fl_ops = NULL;
0496 fl->fl_lmops = NULL;
0497
0498 return assign_type(fl, l->l_type);
0499 }
0500
0501
0502
0503
0504 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
0505 struct flock *l)
0506 {
0507 struct flock64 ll = {
0508 .l_type = l->l_type,
0509 .l_whence = l->l_whence,
0510 .l_start = l->l_start,
0511 .l_len = l->l_len,
0512 };
0513
0514 return flock64_to_posix_lock(filp, fl, &ll);
0515 }
0516
0517
0518 static bool
0519 lease_break_callback(struct file_lock *fl)
0520 {
0521 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
0522 return false;
0523 }
0524
0525 static void
0526 lease_setup(struct file_lock *fl, void **priv)
0527 {
0528 struct file *filp = fl->fl_file;
0529 struct fasync_struct *fa = *priv;
0530
0531
0532
0533
0534
0535
0536 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
0537 *priv = NULL;
0538
0539 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
0540 }
0541
0542 static const struct lock_manager_operations lease_manager_ops = {
0543 .lm_break = lease_break_callback,
0544 .lm_change = lease_modify,
0545 .lm_setup = lease_setup,
0546 };
0547
0548
0549
0550
0551 static int lease_init(struct file *filp, long type, struct file_lock *fl)
0552 {
0553 if (assign_type(fl, type) != 0)
0554 return -EINVAL;
0555
0556 fl->fl_owner = filp;
0557 fl->fl_pid = current->tgid;
0558
0559 fl->fl_file = filp;
0560 fl->fl_flags = FL_LEASE;
0561 fl->fl_start = 0;
0562 fl->fl_end = OFFSET_MAX;
0563 fl->fl_ops = NULL;
0564 fl->fl_lmops = &lease_manager_ops;
0565 return 0;
0566 }
0567
0568
0569 static struct file_lock *lease_alloc(struct file *filp, long type)
0570 {
0571 struct file_lock *fl = locks_alloc_lock();
0572 int error = -ENOMEM;
0573
0574 if (fl == NULL)
0575 return ERR_PTR(error);
0576
0577 error = lease_init(filp, type, fl);
0578 if (error) {
0579 locks_free_lock(fl);
0580 return ERR_PTR(error);
0581 }
0582 return fl;
0583 }
0584
0585
0586
0587 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
0588 {
0589 return ((fl1->fl_end >= fl2->fl_start) &&
0590 (fl2->fl_end >= fl1->fl_start));
0591 }
0592
0593
0594
0595
0596 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
0597 {
0598 return fl1->fl_owner == fl2->fl_owner;
0599 }
0600
0601
0602 static void locks_insert_global_locks(struct file_lock *fl)
0603 {
0604 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
0605
0606 percpu_rwsem_assert_held(&file_rwsem);
0607
0608 spin_lock(&fll->lock);
0609 fl->fl_link_cpu = smp_processor_id();
0610 hlist_add_head(&fl->fl_link, &fll->hlist);
0611 spin_unlock(&fll->lock);
0612 }
0613
0614
0615 static void locks_delete_global_locks(struct file_lock *fl)
0616 {
0617 struct file_lock_list_struct *fll;
0618
0619 percpu_rwsem_assert_held(&file_rwsem);
0620
0621
0622
0623
0624
0625
0626 if (hlist_unhashed(&fl->fl_link))
0627 return;
0628
0629 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
0630 spin_lock(&fll->lock);
0631 hlist_del_init(&fl->fl_link);
0632 spin_unlock(&fll->lock);
0633 }
0634
0635 static unsigned long
0636 posix_owner_key(struct file_lock *fl)
0637 {
0638 return (unsigned long)fl->fl_owner;
0639 }
0640
0641 static void locks_insert_global_blocked(struct file_lock *waiter)
0642 {
0643 lockdep_assert_held(&blocked_lock_lock);
0644
0645 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
0646 }
0647
0648 static void locks_delete_global_blocked(struct file_lock *waiter)
0649 {
0650 lockdep_assert_held(&blocked_lock_lock);
0651
0652 hash_del(&waiter->fl_link);
0653 }
0654
0655
0656
0657
0658
0659
0660 static void __locks_delete_block(struct file_lock *waiter)
0661 {
0662 locks_delete_global_blocked(waiter);
0663 list_del_init(&waiter->fl_blocked_member);
0664 }
0665
0666 static void __locks_wake_up_blocks(struct file_lock *blocker)
0667 {
0668 while (!list_empty(&blocker->fl_blocked_requests)) {
0669 struct file_lock *waiter;
0670
0671 waiter = list_first_entry(&blocker->fl_blocked_requests,
0672 struct file_lock, fl_blocked_member);
0673 __locks_delete_block(waiter);
0674 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
0675 waiter->fl_lmops->lm_notify(waiter);
0676 else
0677 wake_up(&waiter->fl_wait);
0678
0679
0680
0681
0682
0683
0684 smp_store_release(&waiter->fl_blocker, NULL);
0685 }
0686 }
0687
0688
0689
0690
0691
0692
0693
0694 int locks_delete_block(struct file_lock *waiter)
0695 {
0696 int status = -ENOENT;
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719 if (!smp_load_acquire(&waiter->fl_blocker) &&
0720 list_empty(&waiter->fl_blocked_requests))
0721 return status;
0722
0723 spin_lock(&blocked_lock_lock);
0724 if (waiter->fl_blocker)
0725 status = 0;
0726 __locks_wake_up_blocks(waiter);
0727 __locks_delete_block(waiter);
0728
0729
0730
0731
0732
0733 smp_store_release(&waiter->fl_blocker, NULL);
0734 spin_unlock(&blocked_lock_lock);
0735 return status;
0736 }
0737 EXPORT_SYMBOL(locks_delete_block);
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 static void __locks_insert_block(struct file_lock *blocker,
0755 struct file_lock *waiter,
0756 bool conflict(struct file_lock *,
0757 struct file_lock *))
0758 {
0759 struct file_lock *fl;
0760 BUG_ON(!list_empty(&waiter->fl_blocked_member));
0761
0762 new_blocker:
0763 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
0764 if (conflict(fl, waiter)) {
0765 blocker = fl;
0766 goto new_blocker;
0767 }
0768 waiter->fl_blocker = blocker;
0769 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
0770 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
0771 locks_insert_global_blocked(waiter);
0772
0773
0774
0775
0776
0777 __locks_wake_up_blocks(waiter);
0778 }
0779
0780
0781 static void locks_insert_block(struct file_lock *blocker,
0782 struct file_lock *waiter,
0783 bool conflict(struct file_lock *,
0784 struct file_lock *))
0785 {
0786 spin_lock(&blocked_lock_lock);
0787 __locks_insert_block(blocker, waiter, conflict);
0788 spin_unlock(&blocked_lock_lock);
0789 }
0790
0791
0792
0793
0794
0795
0796 static void locks_wake_up_blocks(struct file_lock *blocker)
0797 {
0798
0799
0800
0801
0802
0803
0804
0805 if (list_empty(&blocker->fl_blocked_requests))
0806 return;
0807
0808 spin_lock(&blocked_lock_lock);
0809 __locks_wake_up_blocks(blocker);
0810 spin_unlock(&blocked_lock_lock);
0811 }
0812
0813 static void
0814 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
0815 {
0816 list_add_tail(&fl->fl_list, before);
0817 locks_insert_global_locks(fl);
0818 }
0819
0820 static void
0821 locks_unlink_lock_ctx(struct file_lock *fl)
0822 {
0823 locks_delete_global_locks(fl);
0824 list_del_init(&fl->fl_list);
0825 locks_wake_up_blocks(fl);
0826 }
0827
0828 static void
0829 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
0830 {
0831 locks_unlink_lock_ctx(fl);
0832 if (dispose)
0833 list_add(&fl->fl_list, dispose);
0834 else
0835 locks_free_lock(fl);
0836 }
0837
0838
0839
0840
0841 static bool locks_conflict(struct file_lock *caller_fl,
0842 struct file_lock *sys_fl)
0843 {
0844 if (sys_fl->fl_type == F_WRLCK)
0845 return true;
0846 if (caller_fl->fl_type == F_WRLCK)
0847 return true;
0848 return false;
0849 }
0850
0851
0852
0853
0854 static bool posix_locks_conflict(struct file_lock *caller_fl,
0855 struct file_lock *sys_fl)
0856 {
0857
0858
0859
0860 if (posix_same_owner(caller_fl, sys_fl))
0861 return false;
0862
0863
0864 if (!locks_overlap(caller_fl, sys_fl))
0865 return false;
0866
0867 return locks_conflict(caller_fl, sys_fl);
0868 }
0869
0870
0871
0872
0873 static bool flock_locks_conflict(struct file_lock *caller_fl,
0874 struct file_lock *sys_fl)
0875 {
0876
0877
0878
0879 if (caller_fl->fl_file == sys_fl->fl_file)
0880 return false;
0881
0882 return locks_conflict(caller_fl, sys_fl);
0883 }
0884
0885 void
0886 posix_test_lock(struct file *filp, struct file_lock *fl)
0887 {
0888 struct file_lock *cfl;
0889 struct file_lock_context *ctx;
0890 struct inode *inode = locks_inode(filp);
0891 void *owner;
0892 void (*func)(void);
0893
0894 ctx = smp_load_acquire(&inode->i_flctx);
0895 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
0896 fl->fl_type = F_UNLCK;
0897 return;
0898 }
0899
0900 retry:
0901 spin_lock(&ctx->flc_lock);
0902 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
0903 if (!posix_locks_conflict(fl, cfl))
0904 continue;
0905 if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
0906 && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
0907 owner = cfl->fl_lmops->lm_mod_owner;
0908 func = cfl->fl_lmops->lm_expire_lock;
0909 __module_get(owner);
0910 spin_unlock(&ctx->flc_lock);
0911 (*func)();
0912 module_put(owner);
0913 goto retry;
0914 }
0915 locks_copy_conflock(fl, cfl);
0916 goto out;
0917 }
0918 fl->fl_type = F_UNLCK;
0919 out:
0920 spin_unlock(&ctx->flc_lock);
0921 return;
0922 }
0923 EXPORT_SYMBOL(posix_test_lock);
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958 #define MAX_DEADLK_ITERATIONS 10
0959
0960
0961 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
0962 {
0963 struct file_lock *fl;
0964
0965 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
0966 if (posix_same_owner(fl, block_fl)) {
0967 while (fl->fl_blocker)
0968 fl = fl->fl_blocker;
0969 return fl;
0970 }
0971 }
0972 return NULL;
0973 }
0974
0975
0976 static int posix_locks_deadlock(struct file_lock *caller_fl,
0977 struct file_lock *block_fl)
0978 {
0979 int i = 0;
0980
0981 lockdep_assert_held(&blocked_lock_lock);
0982
0983
0984
0985
0986
0987 if (IS_OFDLCK(caller_fl))
0988 return 0;
0989
0990 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
0991 if (i++ > MAX_DEADLK_ITERATIONS)
0992 return 0;
0993 if (posix_same_owner(caller_fl, block_fl))
0994 return 1;
0995 }
0996 return 0;
0997 }
0998
0999
1000
1001
1002
1003
1004
1005
1006 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1007 {
1008 struct file_lock *new_fl = NULL;
1009 struct file_lock *fl;
1010 struct file_lock_context *ctx;
1011 int error = 0;
1012 bool found = false;
1013 LIST_HEAD(dispose);
1014
1015 ctx = locks_get_lock_context(inode, request->fl_type);
1016 if (!ctx) {
1017 if (request->fl_type != F_UNLCK)
1018 return -ENOMEM;
1019 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1020 }
1021
1022 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1023 new_fl = locks_alloc_lock();
1024 if (!new_fl)
1025 return -ENOMEM;
1026 }
1027
1028 percpu_down_read(&file_rwsem);
1029 spin_lock(&ctx->flc_lock);
1030 if (request->fl_flags & FL_ACCESS)
1031 goto find_conflict;
1032
1033 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1034 if (request->fl_file != fl->fl_file)
1035 continue;
1036 if (request->fl_type == fl->fl_type)
1037 goto out;
1038 found = true;
1039 locks_delete_lock_ctx(fl, &dispose);
1040 break;
1041 }
1042
1043 if (request->fl_type == F_UNLCK) {
1044 if ((request->fl_flags & FL_EXISTS) && !found)
1045 error = -ENOENT;
1046 goto out;
1047 }
1048
1049 find_conflict:
1050 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1051 if (!flock_locks_conflict(request, fl))
1052 continue;
1053 error = -EAGAIN;
1054 if (!(request->fl_flags & FL_SLEEP))
1055 goto out;
1056 error = FILE_LOCK_DEFERRED;
1057 locks_insert_block(fl, request, flock_locks_conflict);
1058 goto out;
1059 }
1060 if (request->fl_flags & FL_ACCESS)
1061 goto out;
1062 locks_copy_lock(new_fl, request);
1063 locks_move_blocks(new_fl, request);
1064 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1065 new_fl = NULL;
1066 error = 0;
1067
1068 out:
1069 spin_unlock(&ctx->flc_lock);
1070 percpu_up_read(&file_rwsem);
1071 if (new_fl)
1072 locks_free_lock(new_fl);
1073 locks_dispose_list(&dispose);
1074 trace_flock_lock_inode(inode, request, error);
1075 return error;
1076 }
1077
1078 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1079 struct file_lock *conflock)
1080 {
1081 struct file_lock *fl, *tmp;
1082 struct file_lock *new_fl = NULL;
1083 struct file_lock *new_fl2 = NULL;
1084 struct file_lock *left = NULL;
1085 struct file_lock *right = NULL;
1086 struct file_lock_context *ctx;
1087 int error;
1088 bool added = false;
1089 LIST_HEAD(dispose);
1090 void *owner;
1091 void (*func)(void);
1092
1093 ctx = locks_get_lock_context(inode, request->fl_type);
1094 if (!ctx)
1095 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1096
1097
1098
1099
1100
1101
1102
1103 if (!(request->fl_flags & FL_ACCESS) &&
1104 (request->fl_type != F_UNLCK ||
1105 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1106 new_fl = locks_alloc_lock();
1107 new_fl2 = locks_alloc_lock();
1108 }
1109
1110 retry:
1111 percpu_down_read(&file_rwsem);
1112 spin_lock(&ctx->flc_lock);
1113
1114
1115
1116
1117
1118 if (request->fl_type != F_UNLCK) {
1119 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1120 if (!posix_locks_conflict(request, fl))
1121 continue;
1122 if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1123 && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1124 owner = fl->fl_lmops->lm_mod_owner;
1125 func = fl->fl_lmops->lm_expire_lock;
1126 __module_get(owner);
1127 spin_unlock(&ctx->flc_lock);
1128 percpu_up_read(&file_rwsem);
1129 (*func)();
1130 module_put(owner);
1131 goto retry;
1132 }
1133 if (conflock)
1134 locks_copy_conflock(conflock, fl);
1135 error = -EAGAIN;
1136 if (!(request->fl_flags & FL_SLEEP))
1137 goto out;
1138
1139
1140
1141
1142 error = -EDEADLK;
1143 spin_lock(&blocked_lock_lock);
1144
1145
1146
1147
1148 __locks_wake_up_blocks(request);
1149 if (likely(!posix_locks_deadlock(request, fl))) {
1150 error = FILE_LOCK_DEFERRED;
1151 __locks_insert_block(fl, request,
1152 posix_locks_conflict);
1153 }
1154 spin_unlock(&blocked_lock_lock);
1155 goto out;
1156 }
1157 }
1158
1159
1160 error = 0;
1161 if (request->fl_flags & FL_ACCESS)
1162 goto out;
1163
1164
1165 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1166 if (posix_same_owner(request, fl))
1167 break;
1168 }
1169
1170
1171 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1172 if (!posix_same_owner(request, fl))
1173 break;
1174
1175
1176 if (request->fl_type == fl->fl_type) {
1177
1178
1179
1180
1181 if (fl->fl_end < request->fl_start - 1)
1182 continue;
1183
1184
1185
1186 if (fl->fl_start - 1 > request->fl_end)
1187 break;
1188
1189
1190
1191
1192
1193
1194 if (fl->fl_start > request->fl_start)
1195 fl->fl_start = request->fl_start;
1196 else
1197 request->fl_start = fl->fl_start;
1198 if (fl->fl_end < request->fl_end)
1199 fl->fl_end = request->fl_end;
1200 else
1201 request->fl_end = fl->fl_end;
1202 if (added) {
1203 locks_delete_lock_ctx(fl, &dispose);
1204 continue;
1205 }
1206 request = fl;
1207 added = true;
1208 } else {
1209
1210
1211
1212 if (fl->fl_end < request->fl_start)
1213 continue;
1214 if (fl->fl_start > request->fl_end)
1215 break;
1216 if (request->fl_type == F_UNLCK)
1217 added = true;
1218 if (fl->fl_start < request->fl_start)
1219 left = fl;
1220
1221
1222
1223 if (fl->fl_end > request->fl_end) {
1224 right = fl;
1225 break;
1226 }
1227 if (fl->fl_start >= request->fl_start) {
1228
1229
1230
1231 if (added) {
1232 locks_delete_lock_ctx(fl, &dispose);
1233 continue;
1234 }
1235
1236
1237
1238
1239
1240
1241
1242 error = -ENOLCK;
1243 if (!new_fl)
1244 goto out;
1245 locks_copy_lock(new_fl, request);
1246 locks_move_blocks(new_fl, request);
1247 request = new_fl;
1248 new_fl = NULL;
1249 locks_insert_lock_ctx(request, &fl->fl_list);
1250 locks_delete_lock_ctx(fl, &dispose);
1251 added = true;
1252 }
1253 }
1254 }
1255
1256
1257
1258
1259
1260
1261 error = -ENOLCK;
1262 if (right && left == right && !new_fl2)
1263 goto out;
1264
1265 error = 0;
1266 if (!added) {
1267 if (request->fl_type == F_UNLCK) {
1268 if (request->fl_flags & FL_EXISTS)
1269 error = -ENOENT;
1270 goto out;
1271 }
1272
1273 if (!new_fl) {
1274 error = -ENOLCK;
1275 goto out;
1276 }
1277 locks_copy_lock(new_fl, request);
1278 locks_move_blocks(new_fl, request);
1279 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1280 fl = new_fl;
1281 new_fl = NULL;
1282 }
1283 if (right) {
1284 if (left == right) {
1285
1286
1287
1288 left = new_fl2;
1289 new_fl2 = NULL;
1290 locks_copy_lock(left, right);
1291 locks_insert_lock_ctx(left, &fl->fl_list);
1292 }
1293 right->fl_start = request->fl_end + 1;
1294 locks_wake_up_blocks(right);
1295 }
1296 if (left) {
1297 left->fl_end = request->fl_start - 1;
1298 locks_wake_up_blocks(left);
1299 }
1300 out:
1301 spin_unlock(&ctx->flc_lock);
1302 percpu_up_read(&file_rwsem);
1303
1304
1305
1306 if (new_fl)
1307 locks_free_lock(new_fl);
1308 if (new_fl2)
1309 locks_free_lock(new_fl2);
1310 locks_dispose_list(&dispose);
1311 trace_posix_lock_inode(inode, request, error);
1312
1313 return error;
1314 }
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 int posix_lock_file(struct file *filp, struct file_lock *fl,
1331 struct file_lock *conflock)
1332 {
1333 return posix_lock_inode(locks_inode(filp), fl, conflock);
1334 }
1335 EXPORT_SYMBOL(posix_lock_file);
1336
1337
1338
1339
1340
1341
1342
1343
1344 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1345 {
1346 int error;
1347 might_sleep ();
1348 for (;;) {
1349 error = posix_lock_inode(inode, fl, NULL);
1350 if (error != FILE_LOCK_DEFERRED)
1351 break;
1352 error = wait_event_interruptible(fl->fl_wait,
1353 list_empty(&fl->fl_blocked_member));
1354 if (error)
1355 break;
1356 }
1357 locks_delete_block(fl);
1358 return error;
1359 }
1360
1361 static void lease_clear_pending(struct file_lock *fl, int arg)
1362 {
1363 switch (arg) {
1364 case F_UNLCK:
1365 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1366 fallthrough;
1367 case F_RDLCK:
1368 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1369 }
1370 }
1371
1372
1373 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1374 {
1375 int error = assign_type(fl, arg);
1376
1377 if (error)
1378 return error;
1379 lease_clear_pending(fl, arg);
1380 locks_wake_up_blocks(fl);
1381 if (arg == F_UNLCK) {
1382 struct file *filp = fl->fl_file;
1383
1384 f_delown(filp);
1385 filp->f_owner.signum = 0;
1386 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1387 if (fl->fl_fasync != NULL) {
1388 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1389 fl->fl_fasync = NULL;
1390 }
1391 locks_delete_lock_ctx(fl, dispose);
1392 }
1393 return 0;
1394 }
1395 EXPORT_SYMBOL(lease_modify);
1396
1397 static bool past_time(unsigned long then)
1398 {
1399 if (!then)
1400
1401 return false;
1402 return time_after(jiffies, then);
1403 }
1404
1405 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1406 {
1407 struct file_lock_context *ctx = inode->i_flctx;
1408 struct file_lock *fl, *tmp;
1409
1410 lockdep_assert_held(&ctx->flc_lock);
1411
1412 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1413 trace_time_out_leases(inode, fl);
1414 if (past_time(fl->fl_downgrade_time))
1415 lease_modify(fl, F_RDLCK, dispose);
1416 if (past_time(fl->fl_break_time))
1417 lease_modify(fl, F_UNLCK, dispose);
1418 }
1419 }
1420
1421 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1422 {
1423 bool rc;
1424
1425 if (lease->fl_lmops->lm_breaker_owns_lease
1426 && lease->fl_lmops->lm_breaker_owns_lease(lease))
1427 return false;
1428 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1429 rc = false;
1430 goto trace;
1431 }
1432 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1433 rc = false;
1434 goto trace;
1435 }
1436
1437 rc = locks_conflict(breaker, lease);
1438 trace:
1439 trace_leases_conflict(rc, lease, breaker);
1440 return rc;
1441 }
1442
1443 static bool
1444 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1445 {
1446 struct file_lock_context *ctx = inode->i_flctx;
1447 struct file_lock *fl;
1448
1449 lockdep_assert_held(&ctx->flc_lock);
1450
1451 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1452 if (leases_conflict(fl, breaker))
1453 return true;
1454 }
1455 return false;
1456 }
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1472 {
1473 int error = 0;
1474 struct file_lock_context *ctx;
1475 struct file_lock *new_fl, *fl, *tmp;
1476 unsigned long break_time;
1477 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1478 LIST_HEAD(dispose);
1479
1480 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1481 if (IS_ERR(new_fl))
1482 return PTR_ERR(new_fl);
1483 new_fl->fl_flags = type;
1484
1485
1486 ctx = smp_load_acquire(&inode->i_flctx);
1487 if (!ctx) {
1488 WARN_ON_ONCE(1);
1489 goto free_lock;
1490 }
1491
1492 percpu_down_read(&file_rwsem);
1493 spin_lock(&ctx->flc_lock);
1494
1495 time_out_leases(inode, &dispose);
1496
1497 if (!any_leases_conflict(inode, new_fl))
1498 goto out;
1499
1500 break_time = 0;
1501 if (lease_break_time > 0) {
1502 break_time = jiffies + lease_break_time * HZ;
1503 if (break_time == 0)
1504 break_time++;
1505 }
1506
1507 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1508 if (!leases_conflict(fl, new_fl))
1509 continue;
1510 if (want_write) {
1511 if (fl->fl_flags & FL_UNLOCK_PENDING)
1512 continue;
1513 fl->fl_flags |= FL_UNLOCK_PENDING;
1514 fl->fl_break_time = break_time;
1515 } else {
1516 if (lease_breaking(fl))
1517 continue;
1518 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1519 fl->fl_downgrade_time = break_time;
1520 }
1521 if (fl->fl_lmops->lm_break(fl))
1522 locks_delete_lock_ctx(fl, &dispose);
1523 }
1524
1525 if (list_empty(&ctx->flc_lease))
1526 goto out;
1527
1528 if (mode & O_NONBLOCK) {
1529 trace_break_lease_noblock(inode, new_fl);
1530 error = -EWOULDBLOCK;
1531 goto out;
1532 }
1533
1534 restart:
1535 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1536 break_time = fl->fl_break_time;
1537 if (break_time != 0)
1538 break_time -= jiffies;
1539 if (break_time == 0)
1540 break_time++;
1541 locks_insert_block(fl, new_fl, leases_conflict);
1542 trace_break_lease_block(inode, new_fl);
1543 spin_unlock(&ctx->flc_lock);
1544 percpu_up_read(&file_rwsem);
1545
1546 locks_dispose_list(&dispose);
1547 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1548 list_empty(&new_fl->fl_blocked_member),
1549 break_time);
1550
1551 percpu_down_read(&file_rwsem);
1552 spin_lock(&ctx->flc_lock);
1553 trace_break_lease_unblock(inode, new_fl);
1554 locks_delete_block(new_fl);
1555 if (error >= 0) {
1556
1557
1558
1559
1560 if (error == 0)
1561 time_out_leases(inode, &dispose);
1562 if (any_leases_conflict(inode, new_fl))
1563 goto restart;
1564 error = 0;
1565 }
1566 out:
1567 spin_unlock(&ctx->flc_lock);
1568 percpu_up_read(&file_rwsem);
1569 locks_dispose_list(&dispose);
1570 free_lock:
1571 locks_free_lock(new_fl);
1572 return error;
1573 }
1574 EXPORT_SYMBOL(__break_lease);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1586 {
1587 bool has_lease = false;
1588 struct file_lock_context *ctx;
1589 struct file_lock *fl;
1590
1591 ctx = smp_load_acquire(&inode->i_flctx);
1592 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1593 spin_lock(&ctx->flc_lock);
1594 fl = list_first_entry_or_null(&ctx->flc_lease,
1595 struct file_lock, fl_list);
1596 if (fl && (fl->fl_type == F_WRLCK))
1597 has_lease = true;
1598 spin_unlock(&ctx->flc_lock);
1599 }
1600
1601 if (has_lease)
1602 *time = current_time(inode);
1603 }
1604 EXPORT_SYMBOL(lease_get_mtime);
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629 int fcntl_getlease(struct file *filp)
1630 {
1631 struct file_lock *fl;
1632 struct inode *inode = locks_inode(filp);
1633 struct file_lock_context *ctx;
1634 int type = F_UNLCK;
1635 LIST_HEAD(dispose);
1636
1637 ctx = smp_load_acquire(&inode->i_flctx);
1638 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1639 percpu_down_read(&file_rwsem);
1640 spin_lock(&ctx->flc_lock);
1641 time_out_leases(inode, &dispose);
1642 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1643 if (fl->fl_file != filp)
1644 continue;
1645 type = target_leasetype(fl);
1646 break;
1647 }
1648 spin_unlock(&ctx->flc_lock);
1649 percpu_up_read(&file_rwsem);
1650
1651 locks_dispose_list(&dispose);
1652 }
1653 return type;
1654 }
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 static int
1668 check_conflicting_open(struct file *filp, const long arg, int flags)
1669 {
1670 struct inode *inode = locks_inode(filp);
1671 int self_wcount = 0, self_rcount = 0;
1672
1673 if (flags & FL_LAYOUT)
1674 return 0;
1675 if (flags & FL_DELEG)
1676
1677 return 0;
1678
1679 if (arg == F_RDLCK)
1680 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1681 else if (arg != F_WRLCK)
1682 return 0;
1683
1684
1685
1686
1687
1688
1689
1690 if (filp->f_mode & FMODE_WRITE)
1691 self_wcount = 1;
1692 else if (filp->f_mode & FMODE_READ)
1693 self_rcount = 1;
1694
1695 if (atomic_read(&inode->i_writecount) != self_wcount ||
1696 atomic_read(&inode->i_readcount) != self_rcount)
1697 return -EAGAIN;
1698
1699 return 0;
1700 }
1701
1702 static int
1703 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1704 {
1705 struct file_lock *fl, *my_fl = NULL, *lease;
1706 struct inode *inode = locks_inode(filp);
1707 struct file_lock_context *ctx;
1708 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1709 int error;
1710 LIST_HEAD(dispose);
1711
1712 lease = *flp;
1713 trace_generic_add_lease(inode, lease);
1714
1715
1716 ctx = locks_get_lock_context(inode, arg);
1717 if (!ctx)
1718 return -ENOMEM;
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728 if (is_deleg && !inode_trylock(inode))
1729 return -EAGAIN;
1730
1731 if (is_deleg && arg == F_WRLCK) {
1732
1733 inode_unlock(inode);
1734 WARN_ON_ONCE(1);
1735 return -EINVAL;
1736 }
1737
1738 percpu_down_read(&file_rwsem);
1739 spin_lock(&ctx->flc_lock);
1740 time_out_leases(inode, &dispose);
1741 error = check_conflicting_open(filp, arg, lease->fl_flags);
1742 if (error)
1743 goto out;
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 error = -EAGAIN;
1754 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1755 if (fl->fl_file == filp &&
1756 fl->fl_owner == lease->fl_owner) {
1757 my_fl = fl;
1758 continue;
1759 }
1760
1761
1762
1763
1764
1765 if (arg == F_WRLCK)
1766 goto out;
1767
1768
1769
1770
1771 if (fl->fl_flags & FL_UNLOCK_PENDING)
1772 goto out;
1773 }
1774
1775 if (my_fl != NULL) {
1776 lease = my_fl;
1777 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1778 if (error)
1779 goto out;
1780 goto out_setup;
1781 }
1782
1783 error = -EINVAL;
1784 if (!leases_enable)
1785 goto out;
1786
1787 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797 smp_mb();
1798 error = check_conflicting_open(filp, arg, lease->fl_flags);
1799 if (error) {
1800 locks_unlink_lock_ctx(lease);
1801 goto out;
1802 }
1803
1804 out_setup:
1805 if (lease->fl_lmops->lm_setup)
1806 lease->fl_lmops->lm_setup(lease, priv);
1807 out:
1808 spin_unlock(&ctx->flc_lock);
1809 percpu_up_read(&file_rwsem);
1810 locks_dispose_list(&dispose);
1811 if (is_deleg)
1812 inode_unlock(inode);
1813 if (!error && !my_fl)
1814 *flp = NULL;
1815 return error;
1816 }
1817
1818 static int generic_delete_lease(struct file *filp, void *owner)
1819 {
1820 int error = -EAGAIN;
1821 struct file_lock *fl, *victim = NULL;
1822 struct inode *inode = locks_inode(filp);
1823 struct file_lock_context *ctx;
1824 LIST_HEAD(dispose);
1825
1826 ctx = smp_load_acquire(&inode->i_flctx);
1827 if (!ctx) {
1828 trace_generic_delete_lease(inode, NULL);
1829 return error;
1830 }
1831
1832 percpu_down_read(&file_rwsem);
1833 spin_lock(&ctx->flc_lock);
1834 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1835 if (fl->fl_file == filp &&
1836 fl->fl_owner == owner) {
1837 victim = fl;
1838 break;
1839 }
1840 }
1841 trace_generic_delete_lease(inode, victim);
1842 if (victim)
1843 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1844 spin_unlock(&ctx->flc_lock);
1845 percpu_up_read(&file_rwsem);
1846 locks_dispose_list(&dispose);
1847 return error;
1848 }
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1862 void **priv)
1863 {
1864 struct inode *inode = locks_inode(filp);
1865 int error;
1866
1867 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1868 return -EACCES;
1869 if (!S_ISREG(inode->i_mode))
1870 return -EINVAL;
1871 error = security_file_lock(filp, arg);
1872 if (error)
1873 return error;
1874
1875 switch (arg) {
1876 case F_UNLCK:
1877 return generic_delete_lease(filp, *priv);
1878 case F_RDLCK:
1879 case F_WRLCK:
1880 if (!(*flp)->fl_lmops->lm_break) {
1881 WARN_ON_ONCE(1);
1882 return -ENOLCK;
1883 }
1884
1885 return generic_add_lease(filp, arg, flp, priv);
1886 default:
1887 return -EINVAL;
1888 }
1889 }
1890 EXPORT_SYMBOL(generic_setlease);
1891
1892 #if IS_ENABLED(CONFIG_SRCU)
1893
1894
1895
1896
1897
1898
1899 static struct srcu_notifier_head lease_notifier_chain;
1900
1901 static inline void
1902 lease_notifier_chain_init(void)
1903 {
1904 srcu_init_notifier_head(&lease_notifier_chain);
1905 }
1906
1907 static inline void
1908 setlease_notifier(long arg, struct file_lock *lease)
1909 {
1910 if (arg != F_UNLCK)
1911 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1912 }
1913
1914 int lease_register_notifier(struct notifier_block *nb)
1915 {
1916 return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1917 }
1918 EXPORT_SYMBOL_GPL(lease_register_notifier);
1919
1920 void lease_unregister_notifier(struct notifier_block *nb)
1921 {
1922 srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1923 }
1924 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1925
1926 #else
1927 static inline void
1928 lease_notifier_chain_init(void)
1929 {
1930 }
1931
1932 static inline void
1933 setlease_notifier(long arg, struct file_lock *lease)
1934 {
1935 }
1936
1937 int lease_register_notifier(struct notifier_block *nb)
1938 {
1939 return 0;
1940 }
1941 EXPORT_SYMBOL_GPL(lease_register_notifier);
1942
1943 void lease_unregister_notifier(struct notifier_block *nb)
1944 {
1945 }
1946 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1947
1948 #endif
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967 int
1968 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1969 {
1970 if (lease)
1971 setlease_notifier(arg, *lease);
1972 if (filp->f_op->setlease)
1973 return filp->f_op->setlease(filp, arg, lease, priv);
1974 else
1975 return generic_setlease(filp, arg, lease, priv);
1976 }
1977 EXPORT_SYMBOL_GPL(vfs_setlease);
1978
1979 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1980 {
1981 struct file_lock *fl;
1982 struct fasync_struct *new;
1983 int error;
1984
1985 fl = lease_alloc(filp, arg);
1986 if (IS_ERR(fl))
1987 return PTR_ERR(fl);
1988
1989 new = fasync_alloc();
1990 if (!new) {
1991 locks_free_lock(fl);
1992 return -ENOMEM;
1993 }
1994 new->fa_fd = fd;
1995
1996 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1997 if (fl)
1998 locks_free_lock(fl);
1999 if (new)
2000 fasync_free(new);
2001 return error;
2002 }
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
2015 {
2016 if (arg == F_UNLCK)
2017 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2018 return do_fcntl_add_lease(fd, filp, arg);
2019 }
2020
2021
2022
2023
2024
2025
2026
2027
2028 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2029 {
2030 int error;
2031 might_sleep();
2032 for (;;) {
2033 error = flock_lock_inode(inode, fl);
2034 if (error != FILE_LOCK_DEFERRED)
2035 break;
2036 error = wait_event_interruptible(fl->fl_wait,
2037 list_empty(&fl->fl_blocked_member));
2038 if (error)
2039 break;
2040 }
2041 locks_delete_block(fl);
2042 return error;
2043 }
2044
2045
2046
2047
2048
2049
2050
2051
2052 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2053 {
2054 int res = 0;
2055 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2056 case FL_POSIX:
2057 res = posix_lock_inode_wait(inode, fl);
2058 break;
2059 case FL_FLOCK:
2060 res = flock_lock_inode_wait(inode, fl);
2061 break;
2062 default:
2063 BUG();
2064 }
2065 return res;
2066 }
2067 EXPORT_SYMBOL(locks_lock_inode_wait);
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2085 {
2086 int can_sleep, error, type;
2087 struct file_lock fl;
2088 struct fd f;
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098 if (cmd & LOCK_MAND) {
2099 pr_warn_once("Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n");
2100 return 0;
2101 }
2102
2103 type = flock_translate_cmd(cmd & ~LOCK_NB);
2104 if (type < 0)
2105 return type;
2106
2107 error = -EBADF;
2108 f = fdget(fd);
2109 if (!f.file)
2110 return error;
2111
2112 if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
2113 goto out_putf;
2114
2115 flock_make_lock(f.file, &fl, type);
2116
2117 error = security_file_lock(f.file, fl.fl_type);
2118 if (error)
2119 goto out_putf;
2120
2121 can_sleep = !(cmd & LOCK_NB);
2122 if (can_sleep)
2123 fl.fl_flags |= FL_SLEEP;
2124
2125 if (f.file->f_op->flock)
2126 error = f.file->f_op->flock(f.file,
2127 (can_sleep) ? F_SETLKW : F_SETLK,
2128 &fl);
2129 else
2130 error = locks_lock_file_wait(f.file, &fl);
2131
2132 locks_release_private(&fl);
2133 out_putf:
2134 fdput(f);
2135
2136 return error;
2137 }
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2148 {
2149 if (filp->f_op->lock)
2150 return filp->f_op->lock(filp, F_GETLK, fl);
2151 posix_test_lock(filp, fl);
2152 return 0;
2153 }
2154 EXPORT_SYMBOL_GPL(vfs_test_lock);
2155
2156
2157
2158
2159
2160
2161
2162
2163 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2164 {
2165 pid_t vnr;
2166 struct pid *pid;
2167
2168 if (IS_OFDLCK(fl))
2169 return -1;
2170 if (IS_REMOTELCK(fl))
2171 return fl->fl_pid;
2172
2173
2174
2175
2176
2177 if (ns == &init_pid_ns)
2178 return (pid_t)fl->fl_pid;
2179
2180 rcu_read_lock();
2181 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2182 vnr = pid_nr_ns(pid, ns);
2183 rcu_read_unlock();
2184 return vnr;
2185 }
2186
2187 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2188 {
2189 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2190 #if BITS_PER_LONG == 32
2191
2192
2193
2194
2195 if (fl->fl_start > OFFT_OFFSET_MAX)
2196 return -EOVERFLOW;
2197 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2198 return -EOVERFLOW;
2199 #endif
2200 flock->l_start = fl->fl_start;
2201 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2202 fl->fl_end - fl->fl_start + 1;
2203 flock->l_whence = 0;
2204 flock->l_type = fl->fl_type;
2205 return 0;
2206 }
2207
2208 #if BITS_PER_LONG == 32
2209 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2210 {
2211 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2212 flock->l_start = fl->fl_start;
2213 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2214 fl->fl_end - fl->fl_start + 1;
2215 flock->l_whence = 0;
2216 flock->l_type = fl->fl_type;
2217 }
2218 #endif
2219
2220
2221
2222
2223 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2224 {
2225 struct file_lock *fl;
2226 int error;
2227
2228 fl = locks_alloc_lock();
2229 if (fl == NULL)
2230 return -ENOMEM;
2231 error = -EINVAL;
2232 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2233 goto out;
2234
2235 error = flock_to_posix_lock(filp, fl, flock);
2236 if (error)
2237 goto out;
2238
2239 if (cmd == F_OFD_GETLK) {
2240 error = -EINVAL;
2241 if (flock->l_pid != 0)
2242 goto out;
2243
2244 fl->fl_flags |= FL_OFDLCK;
2245 fl->fl_owner = filp;
2246 }
2247
2248 error = vfs_test_lock(filp, fl);
2249 if (error)
2250 goto out;
2251
2252 flock->l_type = fl->fl_type;
2253 if (fl->fl_type != F_UNLCK) {
2254 error = posix_lock_to_flock(flock, fl);
2255 if (error)
2256 goto out;
2257 }
2258 out:
2259 locks_free_lock(fl);
2260 return error;
2261 }
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2297 {
2298 if (filp->f_op->lock)
2299 return filp->f_op->lock(filp, cmd, fl);
2300 else
2301 return posix_lock_file(filp, fl, conf);
2302 }
2303 EXPORT_SYMBOL_GPL(vfs_lock_file);
2304
2305 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2306 struct file_lock *fl)
2307 {
2308 int error;
2309
2310 error = security_file_lock(filp, fl->fl_type);
2311 if (error)
2312 return error;
2313
2314 for (;;) {
2315 error = vfs_lock_file(filp, cmd, fl, NULL);
2316 if (error != FILE_LOCK_DEFERRED)
2317 break;
2318 error = wait_event_interruptible(fl->fl_wait,
2319 list_empty(&fl->fl_blocked_member));
2320 if (error)
2321 break;
2322 }
2323 locks_delete_block(fl);
2324
2325 return error;
2326 }
2327
2328
2329 static int
2330 check_fmode_for_setlk(struct file_lock *fl)
2331 {
2332 switch (fl->fl_type) {
2333 case F_RDLCK:
2334 if (!(fl->fl_file->f_mode & FMODE_READ))
2335 return -EBADF;
2336 break;
2337 case F_WRLCK:
2338 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2339 return -EBADF;
2340 }
2341 return 0;
2342 }
2343
2344
2345
2346
2347 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2348 struct flock *flock)
2349 {
2350 struct file_lock *file_lock = locks_alloc_lock();
2351 struct inode *inode = locks_inode(filp);
2352 struct file *f;
2353 int error;
2354
2355 if (file_lock == NULL)
2356 return -ENOLCK;
2357
2358 error = flock_to_posix_lock(filp, file_lock, flock);
2359 if (error)
2360 goto out;
2361
2362 error = check_fmode_for_setlk(file_lock);
2363 if (error)
2364 goto out;
2365
2366
2367
2368
2369
2370 switch (cmd) {
2371 case F_OFD_SETLK:
2372 error = -EINVAL;
2373 if (flock->l_pid != 0)
2374 goto out;
2375
2376 cmd = F_SETLK;
2377 file_lock->fl_flags |= FL_OFDLCK;
2378 file_lock->fl_owner = filp;
2379 break;
2380 case F_OFD_SETLKW:
2381 error = -EINVAL;
2382 if (flock->l_pid != 0)
2383 goto out;
2384
2385 cmd = F_SETLKW;
2386 file_lock->fl_flags |= FL_OFDLCK;
2387 file_lock->fl_owner = filp;
2388 fallthrough;
2389 case F_SETLKW:
2390 file_lock->fl_flags |= FL_SLEEP;
2391 }
2392
2393 error = do_lock_file_wait(filp, cmd, file_lock);
2394
2395
2396
2397
2398
2399
2400 if (!error && file_lock->fl_type != F_UNLCK &&
2401 !(file_lock->fl_flags & FL_OFDLCK)) {
2402 struct files_struct *files = current->files;
2403
2404
2405
2406
2407
2408 spin_lock(&files->file_lock);
2409 f = files_lookup_fd_locked(files, fd);
2410 spin_unlock(&files->file_lock);
2411 if (f != filp) {
2412 file_lock->fl_type = F_UNLCK;
2413 error = do_lock_file_wait(filp, cmd, file_lock);
2414 WARN_ON_ONCE(error);
2415 error = -EBADF;
2416 }
2417 }
2418 out:
2419 trace_fcntl_setlk(inode, file_lock, error);
2420 locks_free_lock(file_lock);
2421 return error;
2422 }
2423
2424 #if BITS_PER_LONG == 32
2425
2426
2427
2428 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2429 {
2430 struct file_lock *fl;
2431 int error;
2432
2433 fl = locks_alloc_lock();
2434 if (fl == NULL)
2435 return -ENOMEM;
2436
2437 error = -EINVAL;
2438 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2439 goto out;
2440
2441 error = flock64_to_posix_lock(filp, fl, flock);
2442 if (error)
2443 goto out;
2444
2445 if (cmd == F_OFD_GETLK) {
2446 error = -EINVAL;
2447 if (flock->l_pid != 0)
2448 goto out;
2449
2450 cmd = F_GETLK64;
2451 fl->fl_flags |= FL_OFDLCK;
2452 fl->fl_owner = filp;
2453 }
2454
2455 error = vfs_test_lock(filp, fl);
2456 if (error)
2457 goto out;
2458
2459 flock->l_type = fl->fl_type;
2460 if (fl->fl_type != F_UNLCK)
2461 posix_lock_to_flock64(flock, fl);
2462
2463 out:
2464 locks_free_lock(fl);
2465 return error;
2466 }
2467
2468
2469
2470
2471 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2472 struct flock64 *flock)
2473 {
2474 struct file_lock *file_lock = locks_alloc_lock();
2475 struct file *f;
2476 int error;
2477
2478 if (file_lock == NULL)
2479 return -ENOLCK;
2480
2481 error = flock64_to_posix_lock(filp, file_lock, flock);
2482 if (error)
2483 goto out;
2484
2485 error = check_fmode_for_setlk(file_lock);
2486 if (error)
2487 goto out;
2488
2489
2490
2491
2492
2493 switch (cmd) {
2494 case F_OFD_SETLK:
2495 error = -EINVAL;
2496 if (flock->l_pid != 0)
2497 goto out;
2498
2499 cmd = F_SETLK64;
2500 file_lock->fl_flags |= FL_OFDLCK;
2501 file_lock->fl_owner = filp;
2502 break;
2503 case F_OFD_SETLKW:
2504 error = -EINVAL;
2505 if (flock->l_pid != 0)
2506 goto out;
2507
2508 cmd = F_SETLKW64;
2509 file_lock->fl_flags |= FL_OFDLCK;
2510 file_lock->fl_owner = filp;
2511 fallthrough;
2512 case F_SETLKW64:
2513 file_lock->fl_flags |= FL_SLEEP;
2514 }
2515
2516 error = do_lock_file_wait(filp, cmd, file_lock);
2517
2518
2519
2520
2521
2522
2523 if (!error && file_lock->fl_type != F_UNLCK &&
2524 !(file_lock->fl_flags & FL_OFDLCK)) {
2525 struct files_struct *files = current->files;
2526
2527
2528
2529
2530
2531 spin_lock(&files->file_lock);
2532 f = files_lookup_fd_locked(files, fd);
2533 spin_unlock(&files->file_lock);
2534 if (f != filp) {
2535 file_lock->fl_type = F_UNLCK;
2536 error = do_lock_file_wait(filp, cmd, file_lock);
2537 WARN_ON_ONCE(error);
2538 error = -EBADF;
2539 }
2540 }
2541 out:
2542 locks_free_lock(file_lock);
2543 return error;
2544 }
2545 #endif
2546
2547
2548
2549
2550
2551
2552 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2553 {
2554 int error;
2555 struct inode *inode = locks_inode(filp);
2556 struct file_lock lock;
2557 struct file_lock_context *ctx;
2558
2559
2560
2561
2562
2563
2564 ctx = smp_load_acquire(&inode->i_flctx);
2565 if (!ctx || list_empty(&ctx->flc_posix))
2566 return;
2567
2568 locks_init_lock(&lock);
2569 lock.fl_type = F_UNLCK;
2570 lock.fl_flags = FL_POSIX | FL_CLOSE;
2571 lock.fl_start = 0;
2572 lock.fl_end = OFFSET_MAX;
2573 lock.fl_owner = owner;
2574 lock.fl_pid = current->tgid;
2575 lock.fl_file = filp;
2576 lock.fl_ops = NULL;
2577 lock.fl_lmops = NULL;
2578
2579 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2580
2581 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2582 lock.fl_ops->fl_release_private(&lock);
2583 trace_locks_remove_posix(inode, &lock, error);
2584 }
2585 EXPORT_SYMBOL(locks_remove_posix);
2586
2587
2588 static void
2589 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2590 {
2591 struct file_lock fl;
2592 struct inode *inode = locks_inode(filp);
2593
2594 if (list_empty(&flctx->flc_flock))
2595 return;
2596
2597 flock_make_lock(filp, &fl, F_UNLCK);
2598 fl.fl_flags |= FL_CLOSE;
2599
2600 if (filp->f_op->flock)
2601 filp->f_op->flock(filp, F_SETLKW, &fl);
2602 else
2603 flock_lock_inode(inode, &fl);
2604
2605 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2606 fl.fl_ops->fl_release_private(&fl);
2607 }
2608
2609
2610 static void
2611 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2612 {
2613 struct file_lock *fl, *tmp;
2614 LIST_HEAD(dispose);
2615
2616 if (list_empty(&ctx->flc_lease))
2617 return;
2618
2619 percpu_down_read(&file_rwsem);
2620 spin_lock(&ctx->flc_lock);
2621 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2622 if (filp == fl->fl_file)
2623 lease_modify(fl, F_UNLCK, &dispose);
2624 spin_unlock(&ctx->flc_lock);
2625 percpu_up_read(&file_rwsem);
2626
2627 locks_dispose_list(&dispose);
2628 }
2629
2630
2631
2632
2633 void locks_remove_file(struct file *filp)
2634 {
2635 struct file_lock_context *ctx;
2636
2637 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2638 if (!ctx)
2639 return;
2640
2641
2642 locks_remove_posix(filp, filp);
2643
2644
2645 locks_remove_flock(filp, ctx);
2646
2647
2648 locks_remove_lease(filp, ctx);
2649
2650 spin_lock(&ctx->flc_lock);
2651 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2652 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2653 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2654 spin_unlock(&ctx->flc_lock);
2655 }
2656
2657
2658
2659
2660
2661
2662
2663
2664 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2665 {
2666 if (filp->f_op->lock)
2667 return filp->f_op->lock(filp, F_CANCELLK, fl);
2668 return 0;
2669 }
2670 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2671
2672 #ifdef CONFIG_PROC_FS
2673 #include <linux/proc_fs.h>
2674 #include <linux/seq_file.h>
2675
2676 struct locks_iterator {
2677 int li_cpu;
2678 loff_t li_pos;
2679 };
2680
2681 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2682 loff_t id, char *pfx, int repeat)
2683 {
2684 struct inode *inode = NULL;
2685 unsigned int fl_pid;
2686 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2687 int type;
2688
2689 fl_pid = locks_translate_pid(fl, proc_pidns);
2690
2691
2692
2693
2694
2695
2696 if (fl->fl_file != NULL)
2697 inode = locks_inode(fl->fl_file);
2698
2699 seq_printf(f, "%lld: ", id);
2700
2701 if (repeat)
2702 seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2703
2704 if (IS_POSIX(fl)) {
2705 if (fl->fl_flags & FL_ACCESS)
2706 seq_puts(f, "ACCESS");
2707 else if (IS_OFDLCK(fl))
2708 seq_puts(f, "OFDLCK");
2709 else
2710 seq_puts(f, "POSIX ");
2711
2712 seq_printf(f, " %s ",
2713 (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2714 } else if (IS_FLOCK(fl)) {
2715 seq_puts(f, "FLOCK ADVISORY ");
2716 } else if (IS_LEASE(fl)) {
2717 if (fl->fl_flags & FL_DELEG)
2718 seq_puts(f, "DELEG ");
2719 else
2720 seq_puts(f, "LEASE ");
2721
2722 if (lease_breaking(fl))
2723 seq_puts(f, "BREAKING ");
2724 else if (fl->fl_file)
2725 seq_puts(f, "ACTIVE ");
2726 else
2727 seq_puts(f, "BREAKER ");
2728 } else {
2729 seq_puts(f, "UNKNOWN UNKNOWN ");
2730 }
2731 type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2732
2733 seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2734 (type == F_RDLCK) ? "READ" : "UNLCK");
2735 if (inode) {
2736
2737 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2738 MAJOR(inode->i_sb->s_dev),
2739 MINOR(inode->i_sb->s_dev), inode->i_ino);
2740 } else {
2741 seq_printf(f, "%d <none>:0 ", fl_pid);
2742 }
2743 if (IS_POSIX(fl)) {
2744 if (fl->fl_end == OFFSET_MAX)
2745 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2746 else
2747 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2748 } else {
2749 seq_puts(f, "0 EOF\n");
2750 }
2751 }
2752
2753 static struct file_lock *get_next_blocked_member(struct file_lock *node)
2754 {
2755 struct file_lock *tmp;
2756
2757
2758 if (node == NULL || node->fl_blocker == NULL)
2759 return NULL;
2760
2761
2762 tmp = list_next_entry(node, fl_blocked_member);
2763 if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2764 || tmp == node) {
2765 return NULL;
2766 }
2767
2768 return tmp;
2769 }
2770
2771 static int locks_show(struct seq_file *f, void *v)
2772 {
2773 struct locks_iterator *iter = f->private;
2774 struct file_lock *cur, *tmp;
2775 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2776 int level = 0;
2777
2778 cur = hlist_entry(v, struct file_lock, fl_link);
2779
2780 if (locks_translate_pid(cur, proc_pidns) == 0)
2781 return 0;
2782
2783
2784
2785
2786
2787
2788 while (cur != NULL) {
2789 if (level)
2790 lock_get_status(f, cur, iter->li_pos, "-> ", level);
2791 else
2792 lock_get_status(f, cur, iter->li_pos, "", level);
2793
2794 if (!list_empty(&cur->fl_blocked_requests)) {
2795
2796 cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2797 struct file_lock, fl_blocked_member);
2798 level++;
2799 } else {
2800
2801 tmp = get_next_blocked_member(cur);
2802
2803 while (tmp == NULL && cur->fl_blocker != NULL) {
2804 cur = cur->fl_blocker;
2805 level--;
2806 tmp = get_next_blocked_member(cur);
2807 }
2808 cur = tmp;
2809 }
2810 }
2811
2812 return 0;
2813 }
2814
2815 static void __show_fd_locks(struct seq_file *f,
2816 struct list_head *head, int *id,
2817 struct file *filp, struct files_struct *files)
2818 {
2819 struct file_lock *fl;
2820
2821 list_for_each_entry(fl, head, fl_list) {
2822
2823 if (filp != fl->fl_file)
2824 continue;
2825 if (fl->fl_owner != files &&
2826 fl->fl_owner != filp)
2827 continue;
2828
2829 (*id)++;
2830 seq_puts(f, "lock:\t");
2831 lock_get_status(f, fl, *id, "", 0);
2832 }
2833 }
2834
2835 void show_fd_locks(struct seq_file *f,
2836 struct file *filp, struct files_struct *files)
2837 {
2838 struct inode *inode = locks_inode(filp);
2839 struct file_lock_context *ctx;
2840 int id = 0;
2841
2842 ctx = smp_load_acquire(&inode->i_flctx);
2843 if (!ctx)
2844 return;
2845
2846 spin_lock(&ctx->flc_lock);
2847 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2848 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2849 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2850 spin_unlock(&ctx->flc_lock);
2851 }
2852
2853 static void *locks_start(struct seq_file *f, loff_t *pos)
2854 __acquires(&blocked_lock_lock)
2855 {
2856 struct locks_iterator *iter = f->private;
2857
2858 iter->li_pos = *pos + 1;
2859 percpu_down_write(&file_rwsem);
2860 spin_lock(&blocked_lock_lock);
2861 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2862 }
2863
2864 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2865 {
2866 struct locks_iterator *iter = f->private;
2867
2868 ++iter->li_pos;
2869 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2870 }
2871
2872 static void locks_stop(struct seq_file *f, void *v)
2873 __releases(&blocked_lock_lock)
2874 {
2875 spin_unlock(&blocked_lock_lock);
2876 percpu_up_write(&file_rwsem);
2877 }
2878
2879 static const struct seq_operations locks_seq_operations = {
2880 .start = locks_start,
2881 .next = locks_next,
2882 .stop = locks_stop,
2883 .show = locks_show,
2884 };
2885
2886 static int __init proc_locks_init(void)
2887 {
2888 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2889 sizeof(struct locks_iterator), NULL);
2890 return 0;
2891 }
2892 fs_initcall(proc_locks_init);
2893 #endif
2894
2895 static int __init filelock_init(void)
2896 {
2897 int i;
2898
2899 flctx_cache = kmem_cache_create("file_lock_ctx",
2900 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2901
2902 filelock_cache = kmem_cache_create("file_lock_cache",
2903 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2904
2905 for_each_possible_cpu(i) {
2906 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2907
2908 spin_lock_init(&fll->lock);
2909 INIT_HLIST_HEAD(&fll->hlist);
2910 }
2911
2912 lease_notifier_chain_init();
2913 return 0;
2914 }
2915 core_initcall(filelock_init);