0001
0002
0003
0004
0005
0006
0007
0008 #include "internal.h"
0009
0010 #define AFS_LOCK_GRANTED 0
0011 #define AFS_LOCK_PENDING 1
0012 #define AFS_LOCK_YOUR_TRY 2
0013
0014 struct workqueue_struct *afs_lock_manager;
0015
0016 static void afs_next_locker(struct afs_vnode *vnode, int error);
0017 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
0018 static void afs_fl_release_private(struct file_lock *fl);
0019
0020 static const struct file_lock_operations afs_lock_ops = {
0021 .fl_copy_lock = afs_fl_copy_lock,
0022 .fl_release_private = afs_fl_release_private,
0023 };
0024
0025 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
0026 {
0027 _debug("STATE %u -> %u", vnode->lock_state, state);
0028 vnode->lock_state = state;
0029 }
0030
0031 static atomic_t afs_file_lock_debug_id;
0032
0033
0034
0035
0036 void afs_lock_may_be_available(struct afs_vnode *vnode)
0037 {
0038 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
0039
0040 spin_lock(&vnode->lock);
0041 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
0042 afs_next_locker(vnode, 0);
0043 trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
0044 spin_unlock(&vnode->lock);
0045 }
0046
0047
0048
0049
0050
0051 static void afs_schedule_lock_extension(struct afs_vnode *vnode)
0052 {
0053 ktime_t expires_at, now, duration;
0054 u64 duration_j;
0055
0056 expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
0057 now = ktime_get_real();
0058 duration = ktime_sub(expires_at, now);
0059 if (duration <= 0)
0060 duration_j = 0;
0061 else
0062 duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
0063
0064 queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
0065 }
0066
0067
0068
0069
0070
0071 void afs_lock_op_done(struct afs_call *call)
0072 {
0073 struct afs_operation *op = call->op;
0074 struct afs_vnode *vnode = op->file[0].vnode;
0075
0076 if (call->error == 0) {
0077 spin_lock(&vnode->lock);
0078 trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
0079 vnode->locked_at = call->issue_time;
0080 afs_schedule_lock_extension(vnode);
0081 spin_unlock(&vnode->lock);
0082 }
0083 }
0084
0085
0086
0087
0088
0089
0090 static void afs_grant_locks(struct afs_vnode *vnode)
0091 {
0092 struct file_lock *p, *_p;
0093 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
0094
0095 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
0096 if (!exclusive && p->fl_type == F_WRLCK)
0097 continue;
0098
0099 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
0100 p->fl_u.afs.state = AFS_LOCK_GRANTED;
0101 trace_afs_flock_op(vnode, p, afs_flock_op_grant);
0102 wake_up(&p->fl_wait);
0103 }
0104 }
0105
0106
0107
0108
0109
0110
0111 static void afs_next_locker(struct afs_vnode *vnode, int error)
0112 {
0113 struct file_lock *p, *_p, *next = NULL;
0114 struct key *key = vnode->lock_key;
0115 unsigned int fl_type = F_RDLCK;
0116
0117 _enter("");
0118
0119 if (vnode->lock_type == AFS_LOCK_WRITE)
0120 fl_type = F_WRLCK;
0121
0122 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
0123 if (error &&
0124 p->fl_type == fl_type &&
0125 afs_file_key(p->fl_file) == key) {
0126 list_del_init(&p->fl_u.afs.link);
0127 p->fl_u.afs.state = error;
0128 wake_up(&p->fl_wait);
0129 }
0130
0131
0132 if (next &&
0133 (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
0134 continue;
0135 next = p;
0136 }
0137
0138 vnode->lock_key = NULL;
0139 key_put(key);
0140
0141 if (next) {
0142 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
0143 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
0144 trace_afs_flock_op(vnode, next, afs_flock_op_wake);
0145 wake_up(&next->fl_wait);
0146 } else {
0147 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
0148 trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
0149 }
0150
0151 _leave("");
0152 }
0153
0154
0155
0156
0157
0158 static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
0159 {
0160 struct file_lock *p;
0161
0162 afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
0163
0164 while (!list_empty(&vnode->pending_locks)) {
0165 p = list_entry(vnode->pending_locks.next,
0166 struct file_lock, fl_u.afs.link);
0167 list_del_init(&p->fl_u.afs.link);
0168 p->fl_u.afs.state = -ENOENT;
0169 wake_up(&p->fl_wait);
0170 }
0171
0172 key_put(vnode->lock_key);
0173 vnode->lock_key = NULL;
0174 }
0175
0176 static void afs_lock_success(struct afs_operation *op)
0177 {
0178 _enter("op=%08x", op->debug_id);
0179 afs_vnode_commit_status(op, &op->file[0]);
0180 }
0181
0182 static const struct afs_operation_ops afs_set_lock_operation = {
0183 .issue_afs_rpc = afs_fs_set_lock,
0184 .issue_yfs_rpc = yfs_fs_set_lock,
0185 .success = afs_lock_success,
0186 .aborted = afs_check_for_remote_deletion,
0187 };
0188
0189
0190
0191
0192 static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
0193 afs_lock_type_t type)
0194 {
0195 struct afs_operation *op;
0196
0197 _enter("%s{%llx:%llu.%u},%x,%u",
0198 vnode->volume->name,
0199 vnode->fid.vid,
0200 vnode->fid.vnode,
0201 vnode->fid.unique,
0202 key_serial(key), type);
0203
0204 op = afs_alloc_operation(key, vnode->volume);
0205 if (IS_ERR(op))
0206 return PTR_ERR(op);
0207
0208 afs_op_set_vnode(op, 0, vnode);
0209
0210 op->lock.type = type;
0211 op->ops = &afs_set_lock_operation;
0212 return afs_do_sync_operation(op);
0213 }
0214
0215 static const struct afs_operation_ops afs_extend_lock_operation = {
0216 .issue_afs_rpc = afs_fs_extend_lock,
0217 .issue_yfs_rpc = yfs_fs_extend_lock,
0218 .success = afs_lock_success,
0219 };
0220
0221
0222
0223
0224 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
0225 {
0226 struct afs_operation *op;
0227
0228 _enter("%s{%llx:%llu.%u},%x",
0229 vnode->volume->name,
0230 vnode->fid.vid,
0231 vnode->fid.vnode,
0232 vnode->fid.unique,
0233 key_serial(key));
0234
0235 op = afs_alloc_operation(key, vnode->volume);
0236 if (IS_ERR(op))
0237 return PTR_ERR(op);
0238
0239 afs_op_set_vnode(op, 0, vnode);
0240
0241 op->flags |= AFS_OPERATION_UNINTR;
0242 op->ops = &afs_extend_lock_operation;
0243 return afs_do_sync_operation(op);
0244 }
0245
0246 static const struct afs_operation_ops afs_release_lock_operation = {
0247 .issue_afs_rpc = afs_fs_release_lock,
0248 .issue_yfs_rpc = yfs_fs_release_lock,
0249 .success = afs_lock_success,
0250 };
0251
0252
0253
0254
0255 static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
0256 {
0257 struct afs_operation *op;
0258
0259 _enter("%s{%llx:%llu.%u},%x",
0260 vnode->volume->name,
0261 vnode->fid.vid,
0262 vnode->fid.vnode,
0263 vnode->fid.unique,
0264 key_serial(key));
0265
0266 op = afs_alloc_operation(key, vnode->volume);
0267 if (IS_ERR(op))
0268 return PTR_ERR(op);
0269
0270 afs_op_set_vnode(op, 0, vnode);
0271
0272 op->flags |= AFS_OPERATION_UNINTR;
0273 op->ops = &afs_release_lock_operation;
0274 return afs_do_sync_operation(op);
0275 }
0276
0277
0278
0279
0280
0281
0282 void afs_lock_work(struct work_struct *work)
0283 {
0284 struct afs_vnode *vnode =
0285 container_of(work, struct afs_vnode, lock_work.work);
0286 struct key *key;
0287 int ret;
0288
0289 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
0290
0291 spin_lock(&vnode->lock);
0292
0293 again:
0294 _debug("wstate %u for %p", vnode->lock_state, vnode);
0295 switch (vnode->lock_state) {
0296 case AFS_VNODE_LOCK_NEED_UNLOCK:
0297 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
0298 trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
0299 spin_unlock(&vnode->lock);
0300
0301
0302
0303 ret = afs_release_lock(vnode, vnode->lock_key);
0304 if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
0305 trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
0306 ret);
0307 printk(KERN_WARNING "AFS:"
0308 " Failed to release lock on {%llx:%llx} error %d\n",
0309 vnode->fid.vid, vnode->fid.vnode, ret);
0310 }
0311
0312 spin_lock(&vnode->lock);
0313 if (ret == -ENOENT)
0314 afs_kill_lockers_enoent(vnode);
0315 else
0316 afs_next_locker(vnode, 0);
0317 spin_unlock(&vnode->lock);
0318 return;
0319
0320
0321
0322
0323 case AFS_VNODE_LOCK_GRANTED:
0324 _debug("extend");
0325
0326 ASSERT(!list_empty(&vnode->granted_locks));
0327
0328 key = key_get(vnode->lock_key);
0329 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
0330 trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
0331 spin_unlock(&vnode->lock);
0332
0333 ret = afs_extend_lock(vnode, key);
0334 key_put(key);
0335
0336 if (ret < 0) {
0337 trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
0338 ret);
0339 pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
0340 vnode->fid.vid, vnode->fid.vnode, ret);
0341 }
0342
0343 spin_lock(&vnode->lock);
0344
0345 if (ret == -ENOENT) {
0346 afs_kill_lockers_enoent(vnode);
0347 spin_unlock(&vnode->lock);
0348 return;
0349 }
0350
0351 if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
0352 goto again;
0353 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
0354
0355 if (ret != 0)
0356 queue_delayed_work(afs_lock_manager, &vnode->lock_work,
0357 HZ * 10);
0358 spin_unlock(&vnode->lock);
0359 _leave(" [ext]");
0360 return;
0361
0362
0363
0364
0365
0366
0367
0368 case AFS_VNODE_LOCK_WAITING_FOR_CB:
0369 _debug("retry");
0370 afs_next_locker(vnode, 0);
0371 spin_unlock(&vnode->lock);
0372 return;
0373
0374 case AFS_VNODE_LOCK_DELETED:
0375 afs_kill_lockers_enoent(vnode);
0376 spin_unlock(&vnode->lock);
0377 return;
0378
0379 default:
0380
0381 spin_unlock(&vnode->lock);
0382 _leave(" [no]");
0383 return;
0384 }
0385 }
0386
0387
0388
0389
0390
0391
0392
0393 static void afs_defer_unlock(struct afs_vnode *vnode)
0394 {
0395 _enter("%u", vnode->lock_state);
0396
0397 if (list_empty(&vnode->granted_locks) &&
0398 (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
0399 vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
0400 cancel_delayed_work(&vnode->lock_work);
0401
0402 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
0403 trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
0404 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
0405 }
0406 }
0407
0408
0409
0410
0411
0412 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
0413 enum afs_flock_mode mode, afs_lock_type_t type)
0414 {
0415 afs_access_t access;
0416 int ret;
0417
0418
0419
0420
0421 ret = afs_validate(vnode, key);
0422 if (ret < 0)
0423 return ret;
0424
0425
0426
0427
0428 ret = afs_check_permit(vnode, key, &access);
0429 if (ret < 0)
0430 return ret;
0431
0432
0433
0434
0435
0436
0437
0438 if (type == AFS_LOCK_READ) {
0439 if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
0440 return -EACCES;
0441 } else {
0442 if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
0443 return -EACCES;
0444 }
0445
0446 return 0;
0447 }
0448
0449
0450
0451
0452 static int afs_do_setlk(struct file *file, struct file_lock *fl)
0453 {
0454 struct inode *inode = locks_inode(file);
0455 struct afs_vnode *vnode = AFS_FS_I(inode);
0456 enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
0457 afs_lock_type_t type;
0458 struct key *key = afs_file_key(file);
0459 bool partial, no_server_lock = false;
0460 int ret;
0461
0462 if (mode == afs_flock_mode_unset)
0463 mode = afs_flock_mode_openafs;
0464
0465 _enter("{%llx:%llu},%llu-%llu,%u,%u",
0466 vnode->fid.vid, vnode->fid.vnode,
0467 fl->fl_start, fl->fl_end, fl->fl_type, mode);
0468
0469 fl->fl_ops = &afs_lock_ops;
0470 INIT_LIST_HEAD(&fl->fl_u.afs.link);
0471 fl->fl_u.afs.state = AFS_LOCK_PENDING;
0472
0473 partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
0474 type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
0475 if (mode == afs_flock_mode_write && partial)
0476 type = AFS_LOCK_WRITE;
0477
0478 ret = afs_do_setlk_check(vnode, key, mode, type);
0479 if (ret < 0)
0480 return ret;
0481
0482 trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 if (mode == afs_flock_mode_local ||
0493 (partial && mode == afs_flock_mode_openafs)) {
0494 no_server_lock = true;
0495 goto skip_server_lock;
0496 }
0497
0498 spin_lock(&vnode->lock);
0499 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
0500
0501 ret = -ENOENT;
0502 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
0503 goto error_unlock;
0504
0505
0506
0507
0508
0509 _debug("try %u", vnode->lock_state);
0510 if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
0511 if (type == AFS_LOCK_READ) {
0512 _debug("instant readlock");
0513 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
0514 fl->fl_u.afs.state = AFS_LOCK_GRANTED;
0515 goto vnode_is_locked_u;
0516 }
0517
0518 if (vnode->lock_type == AFS_LOCK_WRITE) {
0519 _debug("instant writelock");
0520 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
0521 fl->fl_u.afs.state = AFS_LOCK_GRANTED;
0522 goto vnode_is_locked_u;
0523 }
0524 }
0525
0526 if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
0527 !(fl->fl_flags & FL_SLEEP)) {
0528 ret = -EAGAIN;
0529 if (type == AFS_LOCK_READ) {
0530 if (vnode->status.lock_count == -1)
0531 goto lock_is_contended;
0532 } else {
0533 if (vnode->status.lock_count != 0)
0534 goto lock_is_contended;
0535 }
0536 }
0537
0538 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
0539 goto need_to_wait;
0540
0541 try_to_lock:
0542
0543
0544
0545
0546
0547
0548
0549
0550 trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
0551 vnode->lock_key = key_get(key);
0552 vnode->lock_type = type;
0553 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
0554 spin_unlock(&vnode->lock);
0555
0556 ret = afs_set_lock(vnode, key, type);
0557
0558 spin_lock(&vnode->lock);
0559 switch (ret) {
0560 case -EKEYREJECTED:
0561 case -EKEYEXPIRED:
0562 case -EKEYREVOKED:
0563 case -EPERM:
0564 case -EACCES:
0565 fl->fl_u.afs.state = ret;
0566 trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
0567 list_del_init(&fl->fl_u.afs.link);
0568 afs_next_locker(vnode, ret);
0569 goto error_unlock;
0570
0571 case -ENOENT:
0572 fl->fl_u.afs.state = ret;
0573 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
0574 list_del_init(&fl->fl_u.afs.link);
0575 afs_kill_lockers_enoent(vnode);
0576 goto error_unlock;
0577
0578 default:
0579 fl->fl_u.afs.state = ret;
0580 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
0581 list_del_init(&fl->fl_u.afs.link);
0582 afs_next_locker(vnode, 0);
0583 goto error_unlock;
0584
0585 case -EWOULDBLOCK:
0586
0587
0588
0589
0590 ASSERT(list_empty(&vnode->granted_locks));
0591 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
0592 goto lock_is_contended;
0593
0594 case 0:
0595 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
0596 trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
0597 afs_grant_locks(vnode);
0598 goto vnode_is_locked_u;
0599 }
0600
0601 vnode_is_locked_u:
0602 spin_unlock(&vnode->lock);
0603 vnode_is_locked:
0604
0605 ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
0606
0607 skip_server_lock:
0608
0609 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
0610 ret = locks_lock_file_wait(file, fl);
0611 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
0612 if (ret < 0)
0613 goto vfs_rejected_lock;
0614
0615
0616
0617
0618
0619 afs_validate(vnode, key);
0620 _leave(" = 0");
0621 return 0;
0622
0623 lock_is_contended:
0624 if (!(fl->fl_flags & FL_SLEEP)) {
0625 list_del_init(&fl->fl_u.afs.link);
0626 afs_next_locker(vnode, 0);
0627 ret = -EAGAIN;
0628 goto error_unlock;
0629 }
0630
0631 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
0632 trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
0633 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
0634
0635 need_to_wait:
0636
0637
0638
0639
0640
0641 spin_unlock(&vnode->lock);
0642
0643 trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
0644 ret = wait_event_interruptible(fl->fl_wait,
0645 fl->fl_u.afs.state != AFS_LOCK_PENDING);
0646 trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
0647
0648 if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
0649 spin_lock(&vnode->lock);
0650
0651 switch (fl->fl_u.afs.state) {
0652 case AFS_LOCK_YOUR_TRY:
0653 fl->fl_u.afs.state = AFS_LOCK_PENDING;
0654 goto try_to_lock;
0655 case AFS_LOCK_PENDING:
0656 if (ret > 0) {
0657
0658
0659
0660
0661 ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
0662 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
0663 fl->fl_u.afs.state = AFS_LOCK_PENDING;
0664 goto try_to_lock;
0665 }
0666 goto error_unlock;
0667 case AFS_LOCK_GRANTED:
0668 default:
0669 break;
0670 }
0671
0672 spin_unlock(&vnode->lock);
0673 }
0674
0675 if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
0676 goto vnode_is_locked;
0677 ret = fl->fl_u.afs.state;
0678 goto error;
0679
0680 vfs_rejected_lock:
0681
0682
0683
0684
0685 _debug("vfs refused %d", ret);
0686 if (no_server_lock)
0687 goto error;
0688 spin_lock(&vnode->lock);
0689 list_del_init(&fl->fl_u.afs.link);
0690 afs_defer_unlock(vnode);
0691
0692 error_unlock:
0693 spin_unlock(&vnode->lock);
0694 error:
0695 _leave(" = %d", ret);
0696 return ret;
0697 }
0698
0699
0700
0701
0702 static int afs_do_unlk(struct file *file, struct file_lock *fl)
0703 {
0704 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
0705 int ret;
0706
0707 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
0708
0709 trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
0710
0711
0712 vfs_fsync(file, 0);
0713
0714 ret = locks_lock_file_wait(file, fl);
0715 _leave(" = %d [%u]", ret, vnode->lock_state);
0716 return ret;
0717 }
0718
0719
0720
0721
0722 static int afs_do_getlk(struct file *file, struct file_lock *fl)
0723 {
0724 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
0725 struct key *key = afs_file_key(file);
0726 int ret, lock_count;
0727
0728 _enter("");
0729
0730 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
0731 return -ENOENT;
0732
0733 fl->fl_type = F_UNLCK;
0734
0735
0736 posix_test_lock(file, fl);
0737 if (fl->fl_type == F_UNLCK) {
0738
0739 ret = afs_fetch_status(vnode, key, false, NULL);
0740 if (ret < 0)
0741 goto error;
0742
0743 lock_count = READ_ONCE(vnode->status.lock_count);
0744 if (lock_count != 0) {
0745 if (lock_count > 0)
0746 fl->fl_type = F_RDLCK;
0747 else
0748 fl->fl_type = F_WRLCK;
0749 fl->fl_start = 0;
0750 fl->fl_end = OFFSET_MAX;
0751 fl->fl_pid = 0;
0752 }
0753 }
0754
0755 ret = 0;
0756 error:
0757 _leave(" = %d [%hd]", ret, fl->fl_type);
0758 return ret;
0759 }
0760
0761
0762
0763
0764 int afs_lock(struct file *file, int cmd, struct file_lock *fl)
0765 {
0766 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
0767 enum afs_flock_operation op;
0768 int ret;
0769
0770 _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
0771 vnode->fid.vid, vnode->fid.vnode, cmd,
0772 fl->fl_type, fl->fl_flags,
0773 (long long) fl->fl_start, (long long) fl->fl_end);
0774
0775 if (IS_GETLK(cmd))
0776 return afs_do_getlk(file, fl);
0777
0778 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
0779 trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
0780
0781 if (fl->fl_type == F_UNLCK)
0782 ret = afs_do_unlk(file, fl);
0783 else
0784 ret = afs_do_setlk(file, fl);
0785
0786 switch (ret) {
0787 case 0: op = afs_flock_op_return_ok; break;
0788 case -EAGAIN: op = afs_flock_op_return_eagain; break;
0789 case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
0790 default: op = afs_flock_op_return_error; break;
0791 }
0792 trace_afs_flock_op(vnode, fl, op);
0793 return ret;
0794 }
0795
0796
0797
0798
0799 int afs_flock(struct file *file, int cmd, struct file_lock *fl)
0800 {
0801 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
0802 enum afs_flock_operation op;
0803 int ret;
0804
0805 _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
0806 vnode->fid.vid, vnode->fid.vnode, cmd,
0807 fl->fl_type, fl->fl_flags);
0808
0809
0810
0811
0812
0813
0814
0815
0816 if (!(fl->fl_flags & FL_FLOCK))
0817 return -ENOLCK;
0818
0819 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
0820 trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
0821
0822
0823 if (fl->fl_type == F_UNLCK)
0824 ret = afs_do_unlk(file, fl);
0825 else
0826 ret = afs_do_setlk(file, fl);
0827
0828 switch (ret) {
0829 case 0: op = afs_flock_op_return_ok; break;
0830 case -EAGAIN: op = afs_flock_op_return_eagain; break;
0831 case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
0832 default: op = afs_flock_op_return_error; break;
0833 }
0834 trace_afs_flock_op(vnode, fl, op);
0835 return ret;
0836 }
0837
0838
0839
0840
0841
0842
0843
0844 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
0845 {
0846 struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
0847
0848 _enter("");
0849
0850 new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
0851
0852 spin_lock(&vnode->lock);
0853 trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
0854 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
0855 spin_unlock(&vnode->lock);
0856 }
0857
0858
0859
0860
0861
0862 static void afs_fl_release_private(struct file_lock *fl)
0863 {
0864 struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
0865
0866 _enter("");
0867
0868 spin_lock(&vnode->lock);
0869
0870 trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
0871 list_del_init(&fl->fl_u.afs.link);
0872 if (list_empty(&vnode->granted_locks))
0873 afs_defer_unlock(vnode);
0874
0875 _debug("state %u for %p", vnode->lock_state, vnode);
0876 spin_unlock(&vnode->lock);
0877 }