0001
0002
0003
0004
0005
0006
0007 #include <linux/fs.h>
0008 #include <linux/slab.h>
0009 #include <linux/vmalloc.h>
0010
0011 #include "glob.h"
0012 #include "vfs_cache.h"
0013 #include "oplock.h"
0014 #include "vfs.h"
0015 #include "connection.h"
0016 #include "mgmt/tree_connect.h"
0017 #include "mgmt/user_session.h"
0018 #include "smb_common.h"
0019
0020 #define S_DEL_PENDING 1
0021 #define S_DEL_ON_CLS 2
0022 #define S_DEL_ON_CLS_STREAM 8
0023
0024 static unsigned int inode_hash_mask __read_mostly;
0025 static unsigned int inode_hash_shift __read_mostly;
0026 static struct hlist_head *inode_hashtable __read_mostly;
0027 static DEFINE_RWLOCK(inode_hash_lock);
0028
0029 static struct ksmbd_file_table global_ft;
0030 static atomic_long_t fd_limit;
0031 static struct kmem_cache *filp_cache;
0032
0033 void ksmbd_set_fd_limit(unsigned long limit)
0034 {
0035 limit = min(limit, get_max_files());
0036 atomic_long_set(&fd_limit, limit);
0037 }
0038
0039 static bool fd_limit_depleted(void)
0040 {
0041 long v = atomic_long_dec_return(&fd_limit);
0042
0043 if (v >= 0)
0044 return false;
0045 atomic_long_inc(&fd_limit);
0046 return true;
0047 }
0048
0049 static void fd_limit_close(void)
0050 {
0051 atomic_long_inc(&fd_limit);
0052 }
0053
0054
0055
0056
0057
0058 static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
0059 {
0060 unsigned long tmp;
0061
0062 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
0063 L1_CACHE_BYTES;
0064 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
0065 return tmp & inode_hash_mask;
0066 }
0067
0068 static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
0069 {
0070 struct hlist_head *head = inode_hashtable +
0071 inode_hash(inode->i_sb, inode->i_ino);
0072 struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
0073
0074 hlist_for_each_entry(ci, head, m_hash) {
0075 if (ci->m_inode == inode) {
0076 if (atomic_inc_not_zero(&ci->m_count))
0077 ret_ci = ci;
0078 break;
0079 }
0080 }
0081 return ret_ci;
0082 }
0083
0084 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
0085 {
0086 return __ksmbd_inode_lookup(file_inode(fp->filp));
0087 }
0088
0089 static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
0090 {
0091 struct ksmbd_inode *ci;
0092
0093 read_lock(&inode_hash_lock);
0094 ci = __ksmbd_inode_lookup(inode);
0095 read_unlock(&inode_hash_lock);
0096 return ci;
0097 }
0098
0099 int ksmbd_query_inode_status(struct inode *inode)
0100 {
0101 struct ksmbd_inode *ci;
0102 int ret = KSMBD_INODE_STATUS_UNKNOWN;
0103
0104 read_lock(&inode_hash_lock);
0105 ci = __ksmbd_inode_lookup(inode);
0106 if (ci) {
0107 ret = KSMBD_INODE_STATUS_OK;
0108 if (ci->m_flags & S_DEL_PENDING)
0109 ret = KSMBD_INODE_STATUS_PENDING_DELETE;
0110 atomic_dec(&ci->m_count);
0111 }
0112 read_unlock(&inode_hash_lock);
0113 return ret;
0114 }
0115
0116 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
0117 {
0118 return (fp->f_ci->m_flags & S_DEL_PENDING);
0119 }
0120
0121 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
0122 {
0123 fp->f_ci->m_flags |= S_DEL_PENDING;
0124 }
0125
0126 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
0127 {
0128 fp->f_ci->m_flags &= ~S_DEL_PENDING;
0129 }
0130
0131 void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
0132 int file_info)
0133 {
0134 if (ksmbd_stream_fd(fp)) {
0135 fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
0136 return;
0137 }
0138
0139 fp->f_ci->m_flags |= S_DEL_ON_CLS;
0140 }
0141
0142 static void ksmbd_inode_hash(struct ksmbd_inode *ci)
0143 {
0144 struct hlist_head *b = inode_hashtable +
0145 inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
0146
0147 hlist_add_head(&ci->m_hash, b);
0148 }
0149
0150 static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
0151 {
0152 write_lock(&inode_hash_lock);
0153 hlist_del_init(&ci->m_hash);
0154 write_unlock(&inode_hash_lock);
0155 }
0156
0157 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
0158 {
0159 ci->m_inode = file_inode(fp->filp);
0160 atomic_set(&ci->m_count, 1);
0161 atomic_set(&ci->op_count, 0);
0162 atomic_set(&ci->sop_count, 0);
0163 ci->m_flags = 0;
0164 ci->m_fattr = 0;
0165 INIT_LIST_HEAD(&ci->m_fp_list);
0166 INIT_LIST_HEAD(&ci->m_op_list);
0167 rwlock_init(&ci->m_lock);
0168 return 0;
0169 }
0170
0171 static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
0172 {
0173 struct ksmbd_inode *ci, *tmpci;
0174 int rc;
0175
0176 read_lock(&inode_hash_lock);
0177 ci = ksmbd_inode_lookup(fp);
0178 read_unlock(&inode_hash_lock);
0179 if (ci)
0180 return ci;
0181
0182 ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
0183 if (!ci)
0184 return NULL;
0185
0186 rc = ksmbd_inode_init(ci, fp);
0187 if (rc) {
0188 pr_err("inode initialized failed\n");
0189 kfree(ci);
0190 return NULL;
0191 }
0192
0193 write_lock(&inode_hash_lock);
0194 tmpci = ksmbd_inode_lookup(fp);
0195 if (!tmpci) {
0196 ksmbd_inode_hash(ci);
0197 } else {
0198 kfree(ci);
0199 ci = tmpci;
0200 }
0201 write_unlock(&inode_hash_lock);
0202 return ci;
0203 }
0204
0205 static void ksmbd_inode_free(struct ksmbd_inode *ci)
0206 {
0207 ksmbd_inode_unhash(ci);
0208 kfree(ci);
0209 }
0210
0211 static void ksmbd_inode_put(struct ksmbd_inode *ci)
0212 {
0213 if (atomic_dec_and_test(&ci->m_count))
0214 ksmbd_inode_free(ci);
0215 }
0216
0217 int __init ksmbd_inode_hash_init(void)
0218 {
0219 unsigned int loop;
0220 unsigned long numentries = 16384;
0221 unsigned long bucketsize = sizeof(struct hlist_head);
0222 unsigned long size;
0223
0224 inode_hash_shift = ilog2(numentries);
0225 inode_hash_mask = (1 << inode_hash_shift) - 1;
0226
0227 size = bucketsize << inode_hash_shift;
0228
0229
0230 inode_hashtable = vmalloc(size);
0231 if (!inode_hashtable)
0232 return -ENOMEM;
0233
0234 for (loop = 0; loop < (1U << inode_hash_shift); loop++)
0235 INIT_HLIST_HEAD(&inode_hashtable[loop]);
0236 return 0;
0237 }
0238
0239 void ksmbd_release_inode_hash(void)
0240 {
0241 vfree(inode_hashtable);
0242 }
0243
0244 static void __ksmbd_inode_close(struct ksmbd_file *fp)
0245 {
0246 struct dentry *dir, *dentry;
0247 struct ksmbd_inode *ci = fp->f_ci;
0248 int err;
0249 struct file *filp;
0250
0251 filp = fp->filp;
0252 if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
0253 ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
0254 err = ksmbd_vfs_remove_xattr(file_mnt_user_ns(filp),
0255 filp->f_path.dentry,
0256 fp->stream.name);
0257 if (err)
0258 pr_err("remove xattr failed : %s\n",
0259 fp->stream.name);
0260 }
0261
0262 if (atomic_dec_and_test(&ci->m_count)) {
0263 write_lock(&ci->m_lock);
0264 if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
0265 dentry = filp->f_path.dentry;
0266 dir = dentry->d_parent;
0267 ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
0268 write_unlock(&ci->m_lock);
0269 ksmbd_vfs_unlink(file_mnt_user_ns(filp), dir, dentry);
0270 write_lock(&ci->m_lock);
0271 }
0272 write_unlock(&ci->m_lock);
0273
0274 ksmbd_inode_free(ci);
0275 }
0276 }
0277
0278 static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
0279 {
0280 if (!has_file_id(fp->persistent_id))
0281 return;
0282
0283 write_lock(&global_ft.lock);
0284 idr_remove(global_ft.idr, fp->persistent_id);
0285 write_unlock(&global_ft.lock);
0286 }
0287
0288 static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
0289 {
0290 if (!has_file_id(fp->volatile_id))
0291 return;
0292
0293 write_lock(&fp->f_ci->m_lock);
0294 list_del_init(&fp->node);
0295 write_unlock(&fp->f_ci->m_lock);
0296
0297 write_lock(&ft->lock);
0298 idr_remove(ft->idr, fp->volatile_id);
0299 write_unlock(&ft->lock);
0300 }
0301
0302 static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
0303 {
0304 struct file *filp;
0305 struct ksmbd_lock *smb_lock, *tmp_lock;
0306
0307 fd_limit_close();
0308 __ksmbd_remove_durable_fd(fp);
0309 __ksmbd_remove_fd(ft, fp);
0310
0311 close_id_del_oplock(fp);
0312 filp = fp->filp;
0313
0314 __ksmbd_inode_close(fp);
0315 if (!IS_ERR_OR_NULL(filp))
0316 fput(filp);
0317
0318
0319
0320
0321 list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
0322 spin_lock(&fp->conn->llist_lock);
0323 list_del(&smb_lock->clist);
0324 spin_unlock(&fp->conn->llist_lock);
0325
0326 list_del(&smb_lock->flist);
0327 locks_free_lock(smb_lock->fl);
0328 kfree(smb_lock);
0329 }
0330
0331 if (ksmbd_stream_fd(fp))
0332 kfree(fp->stream.name);
0333 kmem_cache_free(filp_cache, fp);
0334 }
0335
0336 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
0337 {
0338 if (!atomic_inc_not_zero(&fp->refcount))
0339 return NULL;
0340 return fp;
0341 }
0342
0343 static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
0344 u64 id)
0345 {
0346 struct ksmbd_file *fp;
0347
0348 if (!has_file_id(id))
0349 return NULL;
0350
0351 read_lock(&ft->lock);
0352 fp = idr_find(ft->idr, id);
0353 if (fp)
0354 fp = ksmbd_fp_get(fp);
0355 read_unlock(&ft->lock);
0356 return fp;
0357 }
0358
0359 static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
0360 {
0361 __ksmbd_close_fd(&work->sess->file_table, fp);
0362 atomic_dec(&work->conn->stats.open_files_count);
0363 }
0364
0365 static void set_close_state_blocked_works(struct ksmbd_file *fp)
0366 {
0367 struct ksmbd_work *cancel_work, *ctmp;
0368
0369 spin_lock(&fp->f_lock);
0370 list_for_each_entry_safe(cancel_work, ctmp, &fp->blocked_works,
0371 fp_entry) {
0372 list_del(&cancel_work->fp_entry);
0373 cancel_work->state = KSMBD_WORK_CLOSED;
0374 cancel_work->cancel_fn(cancel_work->cancel_argv);
0375 }
0376 spin_unlock(&fp->f_lock);
0377 }
0378
0379 int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
0380 {
0381 struct ksmbd_file *fp;
0382 struct ksmbd_file_table *ft;
0383
0384 if (!has_file_id(id))
0385 return 0;
0386
0387 ft = &work->sess->file_table;
0388 read_lock(&ft->lock);
0389 fp = idr_find(ft->idr, id);
0390 if (fp) {
0391 set_close_state_blocked_works(fp);
0392
0393 if (!atomic_dec_and_test(&fp->refcount))
0394 fp = NULL;
0395 }
0396 read_unlock(&ft->lock);
0397
0398 if (!fp)
0399 return -EINVAL;
0400
0401 __put_fd_final(work, fp);
0402 return 0;
0403 }
0404
0405 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
0406 {
0407 if (!fp)
0408 return;
0409
0410 if (!atomic_dec_and_test(&fp->refcount))
0411 return;
0412 __put_fd_final(work, fp);
0413 }
0414
0415 static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
0416 {
0417 if (!fp)
0418 return false;
0419 if (fp->tcon != tcon)
0420 return false;
0421 return true;
0422 }
0423
0424 struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
0425 {
0426 return __ksmbd_lookup_fd(&work->sess->file_table, id);
0427 }
0428
0429 struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
0430 {
0431 struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
0432
0433 if (__sanity_check(work->tcon, fp))
0434 return fp;
0435
0436 ksmbd_fd_put(work, fp);
0437 return NULL;
0438 }
0439
0440 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
0441 u64 pid)
0442 {
0443 struct ksmbd_file *fp;
0444
0445 if (!has_file_id(id)) {
0446 id = work->compound_fid;
0447 pid = work->compound_pfid;
0448 }
0449
0450 fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
0451 if (!__sanity_check(work->tcon, fp)) {
0452 ksmbd_fd_put(work, fp);
0453 return NULL;
0454 }
0455 if (fp->persistent_id != pid) {
0456 ksmbd_fd_put(work, fp);
0457 return NULL;
0458 }
0459 return fp;
0460 }
0461
0462 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
0463 {
0464 return __ksmbd_lookup_fd(&global_ft, id);
0465 }
0466
0467 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
0468 {
0469 struct ksmbd_file *fp = NULL;
0470 unsigned int id;
0471
0472 read_lock(&global_ft.lock);
0473 idr_for_each_entry(global_ft.idr, fp, id) {
0474 if (!memcmp(fp->create_guid,
0475 cguid,
0476 SMB2_CREATE_GUID_SIZE)) {
0477 fp = ksmbd_fp_get(fp);
0478 break;
0479 }
0480 }
0481 read_unlock(&global_ft.lock);
0482
0483 return fp;
0484 }
0485
0486 struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
0487 {
0488 struct ksmbd_file *lfp;
0489 struct ksmbd_inode *ci;
0490
0491 ci = ksmbd_inode_lookup_by_vfsinode(inode);
0492 if (!ci)
0493 return NULL;
0494
0495 read_lock(&ci->m_lock);
0496 list_for_each_entry(lfp, &ci->m_fp_list, node) {
0497 if (inode == file_inode(lfp->filp)) {
0498 atomic_dec(&ci->m_count);
0499 lfp = ksmbd_fp_get(lfp);
0500 read_unlock(&ci->m_lock);
0501 return lfp;
0502 }
0503 }
0504 atomic_dec(&ci->m_count);
0505 read_unlock(&ci->m_lock);
0506 return NULL;
0507 }
0508
0509 #define OPEN_ID_TYPE_VOLATILE_ID (0)
0510 #define OPEN_ID_TYPE_PERSISTENT_ID (1)
0511
0512 static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
0513 {
0514 if (type == OPEN_ID_TYPE_VOLATILE_ID)
0515 fp->volatile_id = id;
0516 if (type == OPEN_ID_TYPE_PERSISTENT_ID)
0517 fp->persistent_id = id;
0518 }
0519
0520 static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
0521 int type)
0522 {
0523 u64 id = 0;
0524 int ret;
0525
0526 if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
0527 __open_id_set(fp, KSMBD_NO_FID, type);
0528 return -EMFILE;
0529 }
0530
0531 idr_preload(GFP_KERNEL);
0532 write_lock(&ft->lock);
0533 ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
0534 if (ret >= 0) {
0535 id = ret;
0536 ret = 0;
0537 } else {
0538 id = KSMBD_NO_FID;
0539 fd_limit_close();
0540 }
0541
0542 __open_id_set(fp, id, type);
0543 write_unlock(&ft->lock);
0544 idr_preload_end();
0545 return ret;
0546 }
0547
0548 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
0549 {
0550 __open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
0551 return fp->persistent_id;
0552 }
0553
0554 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
0555 {
0556 struct ksmbd_file *fp;
0557 int ret;
0558
0559 fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
0560 if (!fp) {
0561 pr_err("Failed to allocate memory\n");
0562 return ERR_PTR(-ENOMEM);
0563 }
0564
0565 INIT_LIST_HEAD(&fp->blocked_works);
0566 INIT_LIST_HEAD(&fp->node);
0567 INIT_LIST_HEAD(&fp->lock_list);
0568 spin_lock_init(&fp->f_lock);
0569 atomic_set(&fp->refcount, 1);
0570
0571 fp->filp = filp;
0572 fp->conn = work->conn;
0573 fp->tcon = work->tcon;
0574 fp->volatile_id = KSMBD_NO_FID;
0575 fp->persistent_id = KSMBD_NO_FID;
0576 fp->f_ci = ksmbd_inode_get(fp);
0577
0578 if (!fp->f_ci) {
0579 ret = -ENOMEM;
0580 goto err_out;
0581 }
0582
0583 ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
0584 if (ret) {
0585 ksmbd_inode_put(fp->f_ci);
0586 goto err_out;
0587 }
0588
0589 atomic_inc(&work->conn->stats.open_files_count);
0590 return fp;
0591
0592 err_out:
0593 kmem_cache_free(filp_cache, fp);
0594 return ERR_PTR(ret);
0595 }
0596
0597 static int
0598 __close_file_table_ids(struct ksmbd_file_table *ft,
0599 struct ksmbd_tree_connect *tcon,
0600 bool (*skip)(struct ksmbd_tree_connect *tcon,
0601 struct ksmbd_file *fp))
0602 {
0603 unsigned int id;
0604 struct ksmbd_file *fp;
0605 int num = 0;
0606
0607 idr_for_each_entry(ft->idr, fp, id) {
0608 if (skip(tcon, fp))
0609 continue;
0610
0611 set_close_state_blocked_works(fp);
0612
0613 if (!atomic_dec_and_test(&fp->refcount))
0614 continue;
0615 __ksmbd_close_fd(ft, fp);
0616 num++;
0617 }
0618 return num;
0619 }
0620
0621 static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
0622 struct ksmbd_file *fp)
0623 {
0624 return fp->tcon != tcon;
0625 }
0626
0627 static bool session_fd_check(struct ksmbd_tree_connect *tcon,
0628 struct ksmbd_file *fp)
0629 {
0630 return false;
0631 }
0632
0633 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
0634 {
0635 int num = __close_file_table_ids(&work->sess->file_table,
0636 work->tcon,
0637 tree_conn_fd_check);
0638
0639 atomic_sub(num, &work->conn->stats.open_files_count);
0640 }
0641
0642 void ksmbd_close_session_fds(struct ksmbd_work *work)
0643 {
0644 int num = __close_file_table_ids(&work->sess->file_table,
0645 work->tcon,
0646 session_fd_check);
0647
0648 atomic_sub(num, &work->conn->stats.open_files_count);
0649 }
0650
0651 int ksmbd_init_global_file_table(void)
0652 {
0653 return ksmbd_init_file_table(&global_ft);
0654 }
0655
0656 void ksmbd_free_global_file_table(void)
0657 {
0658 struct ksmbd_file *fp = NULL;
0659 unsigned int id;
0660
0661 idr_for_each_entry(global_ft.idr, fp, id) {
0662 __ksmbd_remove_durable_fd(fp);
0663 kmem_cache_free(filp_cache, fp);
0664 }
0665
0666 ksmbd_destroy_file_table(&global_ft);
0667 }
0668
0669 int ksmbd_init_file_table(struct ksmbd_file_table *ft)
0670 {
0671 ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
0672 if (!ft->idr)
0673 return -ENOMEM;
0674
0675 idr_init(ft->idr);
0676 rwlock_init(&ft->lock);
0677 return 0;
0678 }
0679
0680 void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
0681 {
0682 if (!ft->idr)
0683 return;
0684
0685 __close_file_table_ids(ft, NULL, session_fd_check);
0686 idr_destroy(ft->idr);
0687 kfree(ft->idr);
0688 ft->idr = NULL;
0689 }
0690
0691 int ksmbd_init_file_cache(void)
0692 {
0693 filp_cache = kmem_cache_create("ksmbd_file_cache",
0694 sizeof(struct ksmbd_file), 0,
0695 SLAB_HWCACHE_ALIGN, NULL);
0696 if (!filp_cache)
0697 goto out;
0698
0699 return 0;
0700
0701 out:
0702 pr_err("failed to allocate file cache\n");
0703 return -ENOMEM;
0704 }
0705
0706 void ksmbd_exit_file_cache(void)
0707 {
0708 kmem_cache_destroy(filp_cache);
0709 }