0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/jhash.h>
0009 #include <linux/ktime.h>
0010 #include <linux/slab.h>
0011 #include <linux/proc_fs.h>
0012 #include <linux/nls.h>
0013 #include <linux/workqueue.h>
0014 #include <linux/uuid.h>
0015 #include "cifsglob.h"
0016 #include "smb2pdu.h"
0017 #include "smb2proto.h"
0018 #include "cifsproto.h"
0019 #include "cifs_debug.h"
0020 #include "cifs_unicode.h"
0021 #include "smb2glob.h"
0022 #include "dns_resolve.h"
0023
0024 #include "dfs_cache.h"
0025
0026 #define CACHE_HTABLE_SIZE 32
0027 #define CACHE_MAX_ENTRIES 64
0028 #define CACHE_MIN_TTL 120
0029
0030 #define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
0031
0032 struct cache_dfs_tgt {
0033 char *name;
0034 int path_consumed;
0035 struct list_head list;
0036 };
0037
0038 struct cache_entry {
0039 struct hlist_node hlist;
0040 const char *path;
0041 int hdr_flags;
0042 int ttl;
0043 int srvtype;
0044 int ref_flags;
0045 struct timespec64 etime;
0046 int path_consumed;
0047 int numtgts;
0048 struct list_head tlist;
0049 struct cache_dfs_tgt *tgthint;
0050 };
0051
0052
0053 struct mount_group {
0054 struct list_head list;
0055 uuid_t id;
0056 struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
0057 int num_sessions;
0058 spinlock_t lock;
0059 struct list_head refresh_list;
0060 struct kref refcount;
0061 };
0062
0063 static struct kmem_cache *cache_slab __read_mostly;
0064 static struct workqueue_struct *dfscache_wq __read_mostly;
0065
0066 static int cache_ttl;
0067 static DEFINE_SPINLOCK(cache_ttl_lock);
0068
0069 static struct nls_table *cache_cp;
0070
0071
0072
0073
0074 static atomic_t cache_count;
0075
0076 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
0077 static DECLARE_RWSEM(htable_rw_lock);
0078
0079 static LIST_HEAD(mount_group_list);
0080 static DEFINE_MUTEX(mount_group_list_lock);
0081
0082 static void refresh_cache_worker(struct work_struct *work);
0083
0084 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
0085
0086 static void get_ipc_unc(const char *ref_path, char *ipc, size_t ipclen)
0087 {
0088 const char *host;
0089 size_t len;
0090
0091 extract_unc_hostname(ref_path, &host, &len);
0092 scnprintf(ipc, ipclen, "\\\\%.*s\\IPC$", (int)len, host);
0093 }
0094
0095 static struct cifs_ses *find_ipc_from_server_path(struct cifs_ses **ses, const char *path)
0096 {
0097 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
0098
0099 get_ipc_unc(path, unc, sizeof(unc));
0100 for (; *ses; ses++) {
0101 if (!strcasecmp(unc, (*ses)->tcon_ipc->treeName))
0102 return *ses;
0103 }
0104 return ERR_PTR(-ENOENT);
0105 }
0106
0107 static void __mount_group_release(struct mount_group *mg)
0108 {
0109 int i;
0110
0111 for (i = 0; i < mg->num_sessions; i++)
0112 cifs_put_smb_ses(mg->sessions[i]);
0113 kfree(mg);
0114 }
0115
0116 static void mount_group_release(struct kref *kref)
0117 {
0118 struct mount_group *mg = container_of(kref, struct mount_group, refcount);
0119
0120 mutex_lock(&mount_group_list_lock);
0121 list_del(&mg->list);
0122 mutex_unlock(&mount_group_list_lock);
0123 __mount_group_release(mg);
0124 }
0125
0126 static struct mount_group *find_mount_group_locked(const uuid_t *id)
0127 {
0128 struct mount_group *mg;
0129
0130 list_for_each_entry(mg, &mount_group_list, list) {
0131 if (uuid_equal(&mg->id, id))
0132 return mg;
0133 }
0134 return ERR_PTR(-ENOENT);
0135 }
0136
0137 static struct mount_group *__get_mount_group_locked(const uuid_t *id)
0138 {
0139 struct mount_group *mg;
0140
0141 mg = find_mount_group_locked(id);
0142 if (!IS_ERR(mg))
0143 return mg;
0144
0145 mg = kmalloc(sizeof(*mg), GFP_KERNEL);
0146 if (!mg)
0147 return ERR_PTR(-ENOMEM);
0148 kref_init(&mg->refcount);
0149 uuid_copy(&mg->id, id);
0150 mg->num_sessions = 0;
0151 spin_lock_init(&mg->lock);
0152 list_add(&mg->list, &mount_group_list);
0153 return mg;
0154 }
0155
0156 static struct mount_group *get_mount_group(const uuid_t *id)
0157 {
0158 struct mount_group *mg;
0159
0160 mutex_lock(&mount_group_list_lock);
0161 mg = __get_mount_group_locked(id);
0162 if (!IS_ERR(mg))
0163 kref_get(&mg->refcount);
0164 mutex_unlock(&mount_group_list_lock);
0165
0166 return mg;
0167 }
0168
0169 static void free_mount_group_list(void)
0170 {
0171 struct mount_group *mg, *tmp_mg;
0172
0173 list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
0174 list_del_init(&mg->list);
0175 __mount_group_release(mg);
0176 }
0177 }
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
0189 {
0190 char *tmp;
0191 int plen = 0;
0192 char *npath;
0193
0194 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
0195 return ERR_PTR(-EINVAL);
0196
0197 if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
0198 tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
0199 if (!tmp) {
0200 cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
0201 return ERR_PTR(-EINVAL);
0202 }
0203
0204 npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
0205 kfree(tmp);
0206
0207 if (!npath) {
0208 cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
0209 return ERR_PTR(-EINVAL);
0210 }
0211 } else {
0212 npath = kstrdup(path, GFP_KERNEL);
0213 if (!npath)
0214 return ERR_PTR(-ENOMEM);
0215 }
0216 convert_delimiter(npath, '\\');
0217 return npath;
0218 }
0219
0220 static inline bool cache_entry_expired(const struct cache_entry *ce)
0221 {
0222 struct timespec64 ts;
0223
0224 ktime_get_coarse_real_ts64(&ts);
0225 return timespec64_compare(&ts, &ce->etime) >= 0;
0226 }
0227
0228 static inline void free_tgts(struct cache_entry *ce)
0229 {
0230 struct cache_dfs_tgt *t, *n;
0231
0232 list_for_each_entry_safe(t, n, &ce->tlist, list) {
0233 list_del(&t->list);
0234 kfree(t->name);
0235 kfree(t);
0236 }
0237 }
0238
0239 static inline void flush_cache_ent(struct cache_entry *ce)
0240 {
0241 hlist_del_init(&ce->hlist);
0242 kfree(ce->path);
0243 free_tgts(ce);
0244 atomic_dec(&cache_count);
0245 kmem_cache_free(cache_slab, ce);
0246 }
0247
0248 static void flush_cache_ents(void)
0249 {
0250 int i;
0251
0252 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
0253 struct hlist_head *l = &cache_htable[i];
0254 struct hlist_node *n;
0255 struct cache_entry *ce;
0256
0257 hlist_for_each_entry_safe(ce, n, l, hlist) {
0258 if (!hlist_unhashed(&ce->hlist))
0259 flush_cache_ent(ce);
0260 }
0261 }
0262 }
0263
0264
0265
0266
0267 static int dfscache_proc_show(struct seq_file *m, void *v)
0268 {
0269 int i;
0270 struct cache_entry *ce;
0271 struct cache_dfs_tgt *t;
0272
0273 seq_puts(m, "DFS cache\n---------\n");
0274
0275 down_read(&htable_rw_lock);
0276 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
0277 struct hlist_head *l = &cache_htable[i];
0278
0279 hlist_for_each_entry(ce, l, hlist) {
0280 if (hlist_unhashed(&ce->hlist))
0281 continue;
0282
0283 seq_printf(m,
0284 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
0285 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
0286 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
0287 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
0288 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
0289
0290 list_for_each_entry(t, &ce->tlist, list) {
0291 seq_printf(m, " %s%s\n",
0292 t->name,
0293 ce->tgthint == t ? " (target hint)" : "");
0294 }
0295 }
0296 }
0297 up_read(&htable_rw_lock);
0298
0299 return 0;
0300 }
0301
0302 static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
0303 size_t count, loff_t *ppos)
0304 {
0305 char c;
0306 int rc;
0307
0308 rc = get_user(c, buffer);
0309 if (rc)
0310 return rc;
0311
0312 if (c != '0')
0313 return -EINVAL;
0314
0315 cifs_dbg(FYI, "clearing dfs cache\n");
0316
0317 down_write(&htable_rw_lock);
0318 flush_cache_ents();
0319 up_write(&htable_rw_lock);
0320
0321 return count;
0322 }
0323
0324 static int dfscache_proc_open(struct inode *inode, struct file *file)
0325 {
0326 return single_open(file, dfscache_proc_show, NULL);
0327 }
0328
0329 const struct proc_ops dfscache_proc_ops = {
0330 .proc_open = dfscache_proc_open,
0331 .proc_read = seq_read,
0332 .proc_lseek = seq_lseek,
0333 .proc_release = single_release,
0334 .proc_write = dfscache_proc_write,
0335 };
0336
0337 #ifdef CONFIG_CIFS_DEBUG2
0338 static inline void dump_tgts(const struct cache_entry *ce)
0339 {
0340 struct cache_dfs_tgt *t;
0341
0342 cifs_dbg(FYI, "target list:\n");
0343 list_for_each_entry(t, &ce->tlist, list) {
0344 cifs_dbg(FYI, " %s%s\n", t->name,
0345 ce->tgthint == t ? " (target hint)" : "");
0346 }
0347 }
0348
0349 static inline void dump_ce(const struct cache_entry *ce)
0350 {
0351 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
0352 ce->path,
0353 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
0354 ce->etime.tv_nsec,
0355 ce->hdr_flags, ce->ref_flags,
0356 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
0357 ce->path_consumed,
0358 cache_entry_expired(ce) ? "yes" : "no");
0359 dump_tgts(ce);
0360 }
0361
0362 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
0363 {
0364 int i;
0365
0366 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
0367 for (i = 0; i < numrefs; i++) {
0368 const struct dfs_info3_param *ref = &refs[i];
0369
0370 cifs_dbg(FYI,
0371 "\n"
0372 "flags: 0x%x\n"
0373 "path_consumed: %d\n"
0374 "server_type: 0x%x\n"
0375 "ref_flag: 0x%x\n"
0376 "path_name: %s\n"
0377 "node_name: %s\n"
0378 "ttl: %d (%dm)\n",
0379 ref->flags, ref->path_consumed, ref->server_type,
0380 ref->ref_flag, ref->path_name, ref->node_name,
0381 ref->ttl, ref->ttl / 60);
0382 }
0383 }
0384 #else
0385 #define dump_tgts(e)
0386 #define dump_ce(e)
0387 #define dump_refs(r, n)
0388 #endif
0389
0390
0391
0392
0393
0394
0395 int dfs_cache_init(void)
0396 {
0397 int rc;
0398 int i;
0399
0400 dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
0401 if (!dfscache_wq)
0402 return -ENOMEM;
0403
0404 cache_slab = kmem_cache_create("cifs_dfs_cache",
0405 sizeof(struct cache_entry), 0,
0406 SLAB_HWCACHE_ALIGN, NULL);
0407 if (!cache_slab) {
0408 rc = -ENOMEM;
0409 goto out_destroy_wq;
0410 }
0411
0412 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
0413 INIT_HLIST_HEAD(&cache_htable[i]);
0414
0415 atomic_set(&cache_count, 0);
0416 cache_cp = load_nls("utf8");
0417 if (!cache_cp)
0418 cache_cp = load_nls_default();
0419
0420 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
0421 return 0;
0422
0423 out_destroy_wq:
0424 destroy_workqueue(dfscache_wq);
0425 return rc;
0426 }
0427
0428 static int cache_entry_hash(const void *data, int size, unsigned int *hash)
0429 {
0430 int i, clen;
0431 const unsigned char *s = data;
0432 wchar_t c;
0433 unsigned int h = 0;
0434
0435 for (i = 0; i < size; i += clen) {
0436 clen = cache_cp->char2uni(&s[i], size - i, &c);
0437 if (unlikely(clen < 0)) {
0438 cifs_dbg(VFS, "%s: can't convert char\n", __func__);
0439 return clen;
0440 }
0441 c = cifs_toupper(c);
0442 h = jhash(&c, sizeof(c), h);
0443 }
0444 *hash = h % CACHE_HTABLE_SIZE;
0445 return 0;
0446 }
0447
0448
0449 static inline char *get_tgt_name(const struct cache_entry *ce)
0450 {
0451 struct cache_dfs_tgt *t = ce->tgthint;
0452
0453 return t ? t->name : ERR_PTR(-ENOENT);
0454 }
0455
0456
0457 static inline struct timespec64 get_expire_time(int ttl)
0458 {
0459 struct timespec64 ts = {
0460 .tv_sec = ttl,
0461 .tv_nsec = 0,
0462 };
0463 struct timespec64 now;
0464
0465 ktime_get_coarse_real_ts64(&now);
0466 return timespec64_add(now, ts);
0467 }
0468
0469
0470 static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
0471 {
0472 struct cache_dfs_tgt *t;
0473
0474 t = kmalloc(sizeof(*t), GFP_ATOMIC);
0475 if (!t)
0476 return ERR_PTR(-ENOMEM);
0477 t->name = kstrdup(name, GFP_ATOMIC);
0478 if (!t->name) {
0479 kfree(t);
0480 return ERR_PTR(-ENOMEM);
0481 }
0482 t->path_consumed = path_consumed;
0483 INIT_LIST_HEAD(&t->list);
0484 return t;
0485 }
0486
0487
0488
0489
0490
0491 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
0492 struct cache_entry *ce, const char *tgthint)
0493 {
0494 int i;
0495
0496 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
0497 ce->etime = get_expire_time(ce->ttl);
0498 ce->srvtype = refs[0].server_type;
0499 ce->hdr_flags = refs[0].flags;
0500 ce->ref_flags = refs[0].ref_flag;
0501 ce->path_consumed = refs[0].path_consumed;
0502
0503 for (i = 0; i < numrefs; i++) {
0504 struct cache_dfs_tgt *t;
0505
0506 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
0507 if (IS_ERR(t)) {
0508 free_tgts(ce);
0509 return PTR_ERR(t);
0510 }
0511 if (tgthint && !strcasecmp(t->name, tgthint)) {
0512 list_add(&t->list, &ce->tlist);
0513 tgthint = NULL;
0514 } else {
0515 list_add_tail(&t->list, &ce->tlist);
0516 }
0517 ce->numtgts++;
0518 }
0519
0520 ce->tgthint = list_first_entry_or_null(&ce->tlist,
0521 struct cache_dfs_tgt, list);
0522
0523 return 0;
0524 }
0525
0526
0527 static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
0528 {
0529 struct cache_entry *ce;
0530 int rc;
0531
0532 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
0533 if (!ce)
0534 return ERR_PTR(-ENOMEM);
0535
0536 ce->path = refs[0].path_name;
0537 refs[0].path_name = NULL;
0538
0539 INIT_HLIST_NODE(&ce->hlist);
0540 INIT_LIST_HEAD(&ce->tlist);
0541
0542 rc = copy_ref_data(refs, numrefs, ce, NULL);
0543 if (rc) {
0544 kfree(ce->path);
0545 kmem_cache_free(cache_slab, ce);
0546 ce = ERR_PTR(rc);
0547 }
0548 return ce;
0549 }
0550
0551 static void remove_oldest_entry_locked(void)
0552 {
0553 int i;
0554 struct cache_entry *ce;
0555 struct cache_entry *to_del = NULL;
0556
0557 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
0558
0559 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
0560 struct hlist_head *l = &cache_htable[i];
0561
0562 hlist_for_each_entry(ce, l, hlist) {
0563 if (hlist_unhashed(&ce->hlist))
0564 continue;
0565 if (!to_del || timespec64_compare(&ce->etime,
0566 &to_del->etime) < 0)
0567 to_del = ce;
0568 }
0569 }
0570
0571 if (!to_del) {
0572 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
0573 return;
0574 }
0575
0576 cifs_dbg(FYI, "%s: removing entry\n", __func__);
0577 dump_ce(to_del);
0578 flush_cache_ent(to_del);
0579 }
0580
0581
0582 static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
0583 {
0584 int rc;
0585 struct cache_entry *ce;
0586 unsigned int hash;
0587
0588 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
0589
0590 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
0591 cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
0592 remove_oldest_entry_locked();
0593 }
0594
0595 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
0596 if (rc)
0597 return rc;
0598
0599 ce = alloc_cache_entry(refs, numrefs);
0600 if (IS_ERR(ce))
0601 return PTR_ERR(ce);
0602
0603 spin_lock(&cache_ttl_lock);
0604 if (!cache_ttl) {
0605 cache_ttl = ce->ttl;
0606 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
0607 } else {
0608 cache_ttl = min_t(int, cache_ttl, ce->ttl);
0609 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
0610 }
0611 spin_unlock(&cache_ttl_lock);
0612
0613 hlist_add_head(&ce->hlist, &cache_htable[hash]);
0614 dump_ce(ce);
0615
0616 atomic_inc(&cache_count);
0617
0618 return 0;
0619 }
0620
0621
0622 static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
0623 {
0624 int i, l1, l2;
0625 wchar_t c1, c2;
0626
0627 if (len1 != len2)
0628 return false;
0629
0630 for (i = 0; i < len1; i += l1) {
0631 l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
0632 l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
0633 if (unlikely(l1 < 0 && l2 < 0)) {
0634 if (s1[i] != s2[i])
0635 return false;
0636 l1 = 1;
0637 continue;
0638 }
0639 if (l1 != l2)
0640 return false;
0641 if (cifs_toupper(c1) != cifs_toupper(c2))
0642 return false;
0643 }
0644 return true;
0645 }
0646
0647 static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
0648 {
0649 struct cache_entry *ce;
0650
0651 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
0652 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
0653 dump_ce(ce);
0654 return ce;
0655 }
0656 }
0657 return ERR_PTR(-ENOENT);
0658 }
0659
0660
0661
0662
0663
0664
0665
0666
0667 static struct cache_entry *lookup_cache_entry(const char *path)
0668 {
0669 struct cache_entry *ce;
0670 int cnt = 0;
0671 const char *s = path, *e;
0672 char sep = *s;
0673 unsigned int hash;
0674 int rc;
0675
0676 while ((s = strchr(s, sep)) && ++cnt < 3)
0677 s++;
0678
0679 if (cnt < 3) {
0680 rc = cache_entry_hash(path, strlen(path), &hash);
0681 if (rc)
0682 return ERR_PTR(rc);
0683 return __lookup_cache_entry(path, hash, strlen(path));
0684 }
0685
0686
0687
0688
0689
0690
0691 e = path + strlen(path) - 1;
0692 while (e > s) {
0693 int len;
0694
0695
0696 while (e > s && *e == sep)
0697 e--;
0698 if (e == s)
0699 break;
0700
0701 len = e + 1 - path;
0702 rc = cache_entry_hash(path, len, &hash);
0703 if (rc)
0704 return ERR_PTR(rc);
0705 ce = __lookup_cache_entry(path, hash, len);
0706 if (!IS_ERR(ce))
0707 return ce;
0708
0709
0710 while (e > s && *e != sep)
0711 e--;
0712 }
0713 return ERR_PTR(-ENOENT);
0714 }
0715
0716
0717
0718
0719 void dfs_cache_destroy(void)
0720 {
0721 cancel_delayed_work_sync(&refresh_task);
0722 unload_nls(cache_cp);
0723 free_mount_group_list();
0724 flush_cache_ents();
0725 kmem_cache_destroy(cache_slab);
0726 destroy_workqueue(dfscache_wq);
0727
0728 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
0729 }
0730
0731
0732 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
0733 int numrefs)
0734 {
0735 int rc;
0736 char *s, *th = NULL;
0737
0738 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
0739
0740 if (ce->tgthint) {
0741 s = ce->tgthint->name;
0742 th = kstrdup(s, GFP_ATOMIC);
0743 if (!th)
0744 return -ENOMEM;
0745 }
0746
0747 free_tgts(ce);
0748 ce->numtgts = 0;
0749
0750 rc = copy_ref_data(refs, numrefs, ce, th);
0751
0752 kfree(th);
0753
0754 return rc;
0755 }
0756
0757 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
0758 struct dfs_info3_param **refs, int *numrefs)
0759 {
0760 int rc;
0761 int i;
0762
0763 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
0764
0765 *refs = NULL;
0766 *numrefs = 0;
0767
0768 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
0769 return -EOPNOTSUPP;
0770 if (unlikely(!cache_cp))
0771 return -EINVAL;
0772
0773 rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
0774 NO_MAP_UNI_RSVD);
0775 if (!rc) {
0776 struct dfs_info3_param *ref = *refs;
0777
0778 for (i = 0; i < *numrefs; i++)
0779 convert_delimiter(ref[i].path_name, '\\');
0780 }
0781 return rc;
0782 }
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
0794 {
0795 int rc;
0796 struct cache_entry *ce;
0797 struct dfs_info3_param *refs = NULL;
0798 int numrefs = 0;
0799 bool newent = false;
0800
0801 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
0802
0803 down_write(&htable_rw_lock);
0804
0805 ce = lookup_cache_entry(path);
0806 if (!IS_ERR(ce)) {
0807 if (!cache_entry_expired(ce)) {
0808 dump_ce(ce);
0809 up_write(&htable_rw_lock);
0810 return 0;
0811 }
0812 } else {
0813 newent = true;
0814 }
0815
0816
0817
0818
0819
0820 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
0821 if (rc)
0822 goto out_unlock;
0823
0824 dump_refs(refs, numrefs);
0825
0826 if (!newent) {
0827 rc = update_cache_entry_locked(ce, refs, numrefs);
0828 goto out_unlock;
0829 }
0830
0831 rc = add_cache_entry_locked(refs, numrefs);
0832
0833 out_unlock:
0834 up_write(&htable_rw_lock);
0835 free_dfs_info_array(refs, numrefs);
0836 return rc;
0837 }
0838
0839
0840
0841
0842
0843
0844 static int setup_referral(const char *path, struct cache_entry *ce,
0845 struct dfs_info3_param *ref, const char *target)
0846 {
0847 int rc;
0848
0849 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
0850
0851 memset(ref, 0, sizeof(*ref));
0852
0853 ref->path_name = kstrdup(path, GFP_ATOMIC);
0854 if (!ref->path_name)
0855 return -ENOMEM;
0856
0857 ref->node_name = kstrdup(target, GFP_ATOMIC);
0858 if (!ref->node_name) {
0859 rc = -ENOMEM;
0860 goto err_free_path;
0861 }
0862
0863 ref->path_consumed = ce->path_consumed;
0864 ref->ttl = ce->ttl;
0865 ref->server_type = ce->srvtype;
0866 ref->ref_flag = ce->ref_flags;
0867 ref->flags = ce->hdr_flags;
0868
0869 return 0;
0870
0871 err_free_path:
0872 kfree(ref->path_name);
0873 ref->path_name = NULL;
0874 return rc;
0875 }
0876
0877
0878 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
0879 {
0880 int rc;
0881 struct list_head *head = &tl->tl_list;
0882 struct cache_dfs_tgt *t;
0883 struct dfs_cache_tgt_iterator *it, *nit;
0884
0885 memset(tl, 0, sizeof(*tl));
0886 INIT_LIST_HEAD(head);
0887
0888 list_for_each_entry(t, &ce->tlist, list) {
0889 it = kzalloc(sizeof(*it), GFP_ATOMIC);
0890 if (!it) {
0891 rc = -ENOMEM;
0892 goto err_free_it;
0893 }
0894
0895 it->it_name = kstrdup(t->name, GFP_ATOMIC);
0896 if (!it->it_name) {
0897 kfree(it);
0898 rc = -ENOMEM;
0899 goto err_free_it;
0900 }
0901 it->it_path_consumed = t->path_consumed;
0902
0903 if (ce->tgthint == t)
0904 list_add(&it->it_list, head);
0905 else
0906 list_add_tail(&it->it_list, head);
0907 }
0908
0909 tl->tl_numtgts = ce->numtgts;
0910
0911 return 0;
0912
0913 err_free_it:
0914 list_for_each_entry_safe(it, nit, head, it_list) {
0915 list_del(&it->it_list);
0916 kfree(it->it_name);
0917 kfree(it);
0918 }
0919 return rc;
0920 }
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944 int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
0945 int remap, const char *path, struct dfs_info3_param *ref,
0946 struct dfs_cache_tgt_list *tgt_list)
0947 {
0948 int rc;
0949 const char *npath;
0950 struct cache_entry *ce;
0951
0952 npath = dfs_cache_canonical_path(path, cp, remap);
0953 if (IS_ERR(npath))
0954 return PTR_ERR(npath);
0955
0956 rc = cache_refresh_path(xid, ses, npath);
0957 if (rc)
0958 goto out_free_path;
0959
0960 down_read(&htable_rw_lock);
0961
0962 ce = lookup_cache_entry(npath);
0963 if (IS_ERR(ce)) {
0964 up_read(&htable_rw_lock);
0965 rc = PTR_ERR(ce);
0966 goto out_free_path;
0967 }
0968
0969 if (ref)
0970 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
0971 else
0972 rc = 0;
0973 if (!rc && tgt_list)
0974 rc = get_targets(ce, tgt_list);
0975
0976 up_read(&htable_rw_lock);
0977
0978 out_free_path:
0979 kfree(npath);
0980 return rc;
0981 }
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
1000 struct dfs_cache_tgt_list *tgt_list)
1001 {
1002 int rc;
1003 struct cache_entry *ce;
1004
1005 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1006
1007 down_read(&htable_rw_lock);
1008
1009 ce = lookup_cache_entry(path);
1010 if (IS_ERR(ce)) {
1011 rc = PTR_ERR(ce);
1012 goto out_unlock;
1013 }
1014
1015 if (ref)
1016 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
1017 else
1018 rc = 0;
1019 if (!rc && tgt_list)
1020 rc = get_targets(ce, tgt_list);
1021
1022 out_unlock:
1023 up_read(&htable_rw_lock);
1024 return rc;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045 int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
1046 const struct nls_table *cp, int remap, const char *path,
1047 const struct dfs_cache_tgt_iterator *it)
1048 {
1049 int rc;
1050 const char *npath;
1051 struct cache_entry *ce;
1052 struct cache_dfs_tgt *t;
1053
1054 npath = dfs_cache_canonical_path(path, cp, remap);
1055 if (IS_ERR(npath))
1056 return PTR_ERR(npath);
1057
1058 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
1059
1060 rc = cache_refresh_path(xid, ses, npath);
1061 if (rc)
1062 goto out_free_path;
1063
1064 down_write(&htable_rw_lock);
1065
1066 ce = lookup_cache_entry(npath);
1067 if (IS_ERR(ce)) {
1068 rc = PTR_ERR(ce);
1069 goto out_unlock;
1070 }
1071
1072 t = ce->tgthint;
1073
1074 if (likely(!strcasecmp(it->it_name, t->name)))
1075 goto out_unlock;
1076
1077 list_for_each_entry(t, &ce->tlist, list) {
1078 if (!strcasecmp(t->name, it->it_name)) {
1079 ce->tgthint = t;
1080 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1081 it->it_name);
1082 break;
1083 }
1084 }
1085
1086 out_unlock:
1087 up_write(&htable_rw_lock);
1088 out_free_path:
1089 kfree(npath);
1090 return rc;
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 int dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
1108 {
1109 int rc;
1110 struct cache_entry *ce;
1111 struct cache_dfs_tgt *t;
1112
1113 if (!it)
1114 return -EINVAL;
1115
1116 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1117
1118 down_write(&htable_rw_lock);
1119
1120 ce = lookup_cache_entry(path);
1121 if (IS_ERR(ce)) {
1122 rc = PTR_ERR(ce);
1123 goto out_unlock;
1124 }
1125
1126 rc = 0;
1127 t = ce->tgthint;
1128
1129 if (unlikely(!strcasecmp(it->it_name, t->name)))
1130 goto out_unlock;
1131
1132 list_for_each_entry(t, &ce->tlist, list) {
1133 if (!strcasecmp(t->name, it->it_name)) {
1134 ce->tgthint = t;
1135 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1136 it->it_name);
1137 break;
1138 }
1139 }
1140
1141 out_unlock:
1142 up_write(&htable_rw_lock);
1143 return rc;
1144 }
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
1157 struct dfs_info3_param *ref)
1158 {
1159 int rc;
1160 struct cache_entry *ce;
1161
1162 if (!it || !ref)
1163 return -EINVAL;
1164
1165 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1166
1167 down_read(&htable_rw_lock);
1168
1169 ce = lookup_cache_entry(path);
1170 if (IS_ERR(ce)) {
1171 rc = PTR_ERR(ce);
1172 goto out_unlock;
1173 }
1174
1175 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1176
1177 rc = setup_referral(path, ce, ref, it->it_name);
1178
1179 out_unlock:
1180 up_read(&htable_rw_lock);
1181 return rc;
1182 }
1183
1184
1185
1186
1187
1188
1189
1190 void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
1191 {
1192 struct mount_group *mg;
1193
1194 if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
1195 return;
1196
1197 mg = get_mount_group(mount_id);
1198 if (WARN_ON_ONCE(IS_ERR(mg)))
1199 return;
1200
1201 spin_lock(&mg->lock);
1202 if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
1203 mg->sessions[mg->num_sessions++] = ses;
1204 spin_unlock(&mg->lock);
1205 kref_put(&mg->refcount, mount_group_release);
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215 void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
1216 {
1217 struct mount_group *mg;
1218
1219 if (!mount_id || uuid_is_null(mount_id))
1220 return;
1221
1222 mutex_lock(&mount_group_list_lock);
1223 mg = find_mount_group_locked(mount_id);
1224 if (IS_ERR(mg)) {
1225 mutex_unlock(&mount_group_list_lock);
1226 return;
1227 }
1228 mutex_unlock(&mount_group_list_lock);
1229 kref_put(&mg->refcount, mount_group_release);
1230 }
1231
1232
1233 static const char *parse_target_share(const char *target, char **share)
1234 {
1235 const char *s, *seps = "/\\";
1236 size_t len;
1237
1238 s = strpbrk(target + 1, seps);
1239 if (!s)
1240 return ERR_PTR(-EINVAL);
1241
1242 len = strcspn(s + 1, seps);
1243 if (!len)
1244 return ERR_PTR(-EINVAL);
1245 s += len;
1246
1247 len = s - target + 1;
1248 *share = kstrndup(target, len, GFP_KERNEL);
1249 if (!*share)
1250 return ERR_PTR(-ENOMEM);
1251
1252 s = target + len;
1253 return s + strspn(s, seps);
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1267 char **prefix)
1268 {
1269 char sep;
1270 char *target_share;
1271 char *ppath = NULL;
1272 const char *target_ppath, *dfsref_ppath;
1273 size_t target_pplen, dfsref_pplen;
1274 size_t len, c;
1275
1276 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1277 return -EINVAL;
1278
1279 sep = it->it_name[0];
1280 if (sep != '\\' && sep != '/')
1281 return -EINVAL;
1282
1283 target_ppath = parse_target_share(it->it_name, &target_share);
1284 if (IS_ERR(target_ppath))
1285 return PTR_ERR(target_ppath);
1286
1287
1288 dfsref_ppath = path + it->it_path_consumed;
1289 dfsref_ppath += strspn(dfsref_ppath, "/\\");
1290
1291 target_pplen = strlen(target_ppath);
1292 dfsref_pplen = strlen(dfsref_ppath);
1293
1294
1295 if (target_pplen || dfsref_pplen) {
1296 len = target_pplen + dfsref_pplen + 2;
1297 ppath = kzalloc(len, GFP_KERNEL);
1298 if (!ppath) {
1299 kfree(target_share);
1300 return -ENOMEM;
1301 }
1302 c = strscpy(ppath, target_ppath, len);
1303 if (c && dfsref_pplen)
1304 ppath[c] = sep;
1305 strlcat(ppath, dfsref_ppath, len);
1306 }
1307 *share = target_share;
1308 *prefix = ppath;
1309 return 0;
1310 }
1311
1312 static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
1313 {
1314 char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1315 const char *host;
1316 size_t hostlen;
1317 char *ip = NULL;
1318 struct sockaddr sa;
1319 bool match;
1320 int rc;
1321
1322 if (strcasecmp(s1, s2))
1323 return false;
1324
1325
1326
1327
1328
1329 match = true;
1330 extract_unc_hostname(s1, &host, &hostlen);
1331 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1332
1333 rc = dns_resolve_server_name_to_ip(unc, &ip, NULL);
1334 if (rc < 0) {
1335 cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1336 __func__, (int)hostlen, host);
1337 return true;
1338 }
1339
1340 if (!cifs_convert_address(&sa, ip, strlen(ip))) {
1341 cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
1342 __func__, ip);
1343 } else {
1344 cifs_server_lock(server);
1345 match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
1346 cifs_server_unlock(server);
1347 }
1348
1349 kfree(ip);
1350 return match;
1351 }
1352
1353
1354
1355
1356
1357 static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
1358 const struct dfs_info3_param *refs, int numrefs)
1359 {
1360 struct dfs_cache_tgt_iterator *it;
1361 int i;
1362
1363 for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
1364 for (i = 0; i < numrefs; i++) {
1365 if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
1366 refs[i].node_name))
1367 return;
1368 }
1369 }
1370
1371 cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
1372 cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
1373 }
1374
1375
1376 static int __refresh_tcon(const char *path, struct cifs_ses **sessions, struct cifs_tcon *tcon,
1377 bool force_refresh)
1378 {
1379 struct cifs_ses *ses;
1380 struct cache_entry *ce;
1381 struct dfs_info3_param *refs = NULL;
1382 int numrefs = 0;
1383 bool needs_refresh = false;
1384 struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
1385 int rc = 0;
1386 unsigned int xid;
1387
1388 ses = find_ipc_from_server_path(sessions, path);
1389 if (IS_ERR(ses)) {
1390 cifs_dbg(FYI, "%s: could not find ipc session\n", __func__);
1391 return PTR_ERR(ses);
1392 }
1393
1394 down_read(&htable_rw_lock);
1395 ce = lookup_cache_entry(path);
1396 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1397 if (!IS_ERR(ce)) {
1398 rc = get_targets(ce, &tl);
1399 if (rc)
1400 cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
1401 }
1402 up_read(&htable_rw_lock);
1403
1404 if (!needs_refresh) {
1405 rc = 0;
1406 goto out;
1407 }
1408
1409 xid = get_xid();
1410 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
1411 free_xid(xid);
1412
1413
1414 if (!rc) {
1415 dump_refs(refs, numrefs);
1416
1417 down_write(&htable_rw_lock);
1418 ce = lookup_cache_entry(path);
1419 if (IS_ERR(ce))
1420 add_cache_entry_locked(refs, numrefs);
1421 else if (force_refresh || cache_entry_expired(ce))
1422 update_cache_entry_locked(ce, refs, numrefs);
1423 up_write(&htable_rw_lock);
1424
1425 mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
1426 }
1427
1428 out:
1429 dfs_cache_free_tgts(&tl);
1430 free_dfs_info_array(refs, numrefs);
1431 return rc;
1432 }
1433
1434 static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
1435 {
1436 struct TCP_Server_Info *server = tcon->ses->server;
1437
1438 mutex_lock(&server->refpath_lock);
1439 if (server->origin_fullpath) {
1440 if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
1441 server->origin_fullpath))
1442 __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
1443 __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
1444 }
1445 mutex_unlock(&server->refpath_lock);
1446
1447 return 0;
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1461 {
1462 struct cifs_tcon *tcon;
1463 struct TCP_Server_Info *server;
1464 struct mount_group *mg;
1465 struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
1466 int rc;
1467
1468 if (!cifs_sb || !cifs_sb->master_tlink)
1469 return -EINVAL;
1470
1471 tcon = cifs_sb_master_tcon(cifs_sb);
1472 server = tcon->ses->server;
1473
1474 if (!server->origin_fullpath) {
1475 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1476 return 0;
1477 }
1478
1479 if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
1480 cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
1481 return -EINVAL;
1482 }
1483
1484 mutex_lock(&mount_group_list_lock);
1485 mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
1486 if (IS_ERR(mg)) {
1487 mutex_unlock(&mount_group_list_lock);
1488 cifs_dbg(FYI, "%s: no ipc session for refreshing referral\n", __func__);
1489 return PTR_ERR(mg);
1490 }
1491 kref_get(&mg->refcount);
1492 mutex_unlock(&mount_group_list_lock);
1493
1494 spin_lock(&mg->lock);
1495 memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0]));
1496 spin_unlock(&mg->lock);
1497
1498
1499
1500
1501
1502 cifs_autodisable_serverino(cifs_sb);
1503
1504
1505
1506
1507 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1508 rc = refresh_tcon(sessions, tcon, true);
1509
1510 kref_put(&mg->refcount, mount_group_release);
1511 return rc;
1512 }
1513
1514
1515
1516
1517
1518 static void refresh_mounts(struct cifs_ses **sessions)
1519 {
1520 struct TCP_Server_Info *server;
1521 struct cifs_ses *ses;
1522 struct cifs_tcon *tcon, *ntcon;
1523 struct list_head tcons;
1524
1525 INIT_LIST_HEAD(&tcons);
1526
1527 spin_lock(&cifs_tcp_ses_lock);
1528 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1529 spin_lock(&server->srv_lock);
1530 if (!server->is_dfs_conn) {
1531 spin_unlock(&server->srv_lock);
1532 continue;
1533 }
1534 spin_unlock(&server->srv_lock);
1535
1536 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1537 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1538 spin_lock(&tcon->tc_lock);
1539 if (!tcon->ipc && !tcon->need_reconnect) {
1540 tcon->tc_count++;
1541 list_add_tail(&tcon->ulist, &tcons);
1542 }
1543 spin_unlock(&tcon->tc_lock);
1544 }
1545 }
1546 }
1547 spin_unlock(&cifs_tcp_ses_lock);
1548
1549 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1550 struct TCP_Server_Info *server = tcon->ses->server;
1551
1552 list_del_init(&tcon->ulist);
1553
1554 mutex_lock(&server->refpath_lock);
1555 if (server->origin_fullpath) {
1556 if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
1557 server->origin_fullpath))
1558 __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
1559 __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
1560 }
1561 mutex_unlock(&server->refpath_lock);
1562
1563 cifs_put_tcon(tcon);
1564 }
1565 }
1566
1567 static void refresh_cache(struct cifs_ses **sessions)
1568 {
1569 int i;
1570 struct cifs_ses *ses;
1571 unsigned int xid;
1572 char *ref_paths[CACHE_MAX_ENTRIES];
1573 int count = 0;
1574 struct cache_entry *ce;
1575
1576
1577
1578
1579
1580
1581
1582
1583 down_read(&htable_rw_lock);
1584 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
1585 struct hlist_head *l = &cache_htable[i];
1586
1587 hlist_for_each_entry(ce, l, hlist) {
1588 if (count == ARRAY_SIZE(ref_paths))
1589 goto out_unlock;
1590 if (hlist_unhashed(&ce->hlist) || !cache_entry_expired(ce) ||
1591 IS_ERR(find_ipc_from_server_path(sessions, ce->path)))
1592 continue;
1593 ref_paths[count++] = kstrdup(ce->path, GFP_ATOMIC);
1594 }
1595 }
1596
1597 out_unlock:
1598 up_read(&htable_rw_lock);
1599
1600 for (i = 0; i < count; i++) {
1601 char *path = ref_paths[i];
1602 struct dfs_info3_param *refs = NULL;
1603 int numrefs = 0;
1604 int rc = 0;
1605
1606 if (!path)
1607 continue;
1608
1609 ses = find_ipc_from_server_path(sessions, path);
1610 if (IS_ERR(ses))
1611 goto next_referral;
1612
1613 xid = get_xid();
1614 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
1615 free_xid(xid);
1616
1617 if (!rc) {
1618 down_write(&htable_rw_lock);
1619 ce = lookup_cache_entry(path);
1620
1621
1622
1623
1624 if (!IS_ERR(ce) && cache_entry_expired(ce))
1625 update_cache_entry_locked(ce, refs, numrefs);
1626 up_write(&htable_rw_lock);
1627 }
1628
1629 next_referral:
1630 kfree(path);
1631 free_dfs_info_array(refs, numrefs);
1632 }
1633 }
1634
1635
1636
1637
1638
1639 static void refresh_cache_worker(struct work_struct *work)
1640 {
1641 struct list_head mglist;
1642 struct mount_group *mg, *tmp_mg;
1643 struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
1644 int max_sessions = ARRAY_SIZE(sessions) - 1;
1645 int i = 0, count;
1646
1647 INIT_LIST_HEAD(&mglist);
1648
1649
1650 mutex_lock(&mount_group_list_lock);
1651 list_for_each_entry(mg, &mount_group_list, list) {
1652 kref_get(&mg->refcount);
1653 list_add(&mg->refresh_list, &mglist);
1654 }
1655 mutex_unlock(&mount_group_list_lock);
1656
1657
1658 list_for_each_entry(mg, &mglist, refresh_list) {
1659 if (i >= max_sessions)
1660 break;
1661
1662 spin_lock(&mg->lock);
1663 if (i + mg->num_sessions > max_sessions)
1664 count = max_sessions - i;
1665 else
1666 count = mg->num_sessions;
1667 memcpy(&sessions[i], mg->sessions, count * sizeof(mg->sessions[0]));
1668 spin_unlock(&mg->lock);
1669 i += count;
1670 }
1671
1672 if (sessions[0]) {
1673
1674 refresh_mounts(sessions);
1675 refresh_cache(sessions);
1676 }
1677
1678 list_for_each_entry_safe(mg, tmp_mg, &mglist, refresh_list) {
1679 list_del_init(&mg->refresh_list);
1680 kref_put(&mg->refcount, mount_group_release);
1681 }
1682
1683 spin_lock(&cache_ttl_lock);
1684 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1685 spin_unlock(&cache_ttl_lock);
1686 }