0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/slab.h>
0009 #include <linux/key.h>
0010 #include <linux/ctype.h>
0011 #include <linux/dns_resolver.h>
0012 #include <linux/sched.h>
0013 #include <linux/inet.h>
0014 #include <linux/namei.h>
0015 #include <keys/rxrpc-type.h>
0016 #include "internal.h"
0017
0018 static unsigned __read_mostly afs_cell_gc_delay = 10;
0019 static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
0020 static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
0021 static atomic_t cell_debug_id;
0022
0023 static void afs_queue_cell_manager(struct afs_net *);
0024 static void afs_manage_cell_work(struct work_struct *);
0025
0026 static void afs_dec_cells_outstanding(struct afs_net *net)
0027 {
0028 if (atomic_dec_and_test(&net->cells_outstanding))
0029 wake_up_var(&net->cells_outstanding);
0030 }
0031
0032
0033
0034
0035
0036 static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
0037 {
0038 if (net->live) {
0039 atomic_inc(&net->cells_outstanding);
0040 if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
0041 afs_dec_cells_outstanding(net);
0042 } else {
0043 afs_queue_cell_manager(net);
0044 }
0045 }
0046
0047
0048
0049
0050
0051 static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
0052 const char *name, unsigned int namesz,
0053 enum afs_cell_trace reason)
0054 {
0055 struct afs_cell *cell = NULL;
0056 struct rb_node *p;
0057 int n;
0058
0059 _enter("%*.*s", namesz, namesz, name);
0060
0061 if (name && namesz == 0)
0062 return ERR_PTR(-EINVAL);
0063 if (namesz > AFS_MAXCELLNAME)
0064 return ERR_PTR(-ENAMETOOLONG);
0065
0066 if (!name) {
0067 cell = net->ws_cell;
0068 if (!cell)
0069 return ERR_PTR(-EDESTADDRREQ);
0070 goto found;
0071 }
0072
0073 p = net->cells.rb_node;
0074 while (p) {
0075 cell = rb_entry(p, struct afs_cell, net_node);
0076
0077 n = strncasecmp(cell->name, name,
0078 min_t(size_t, cell->name_len, namesz));
0079 if (n == 0)
0080 n = cell->name_len - namesz;
0081 if (n < 0)
0082 p = p->rb_left;
0083 else if (n > 0)
0084 p = p->rb_right;
0085 else
0086 goto found;
0087 }
0088
0089 return ERR_PTR(-ENOENT);
0090
0091 found:
0092 return afs_use_cell(cell, reason);
0093 }
0094
0095
0096
0097
0098 struct afs_cell *afs_find_cell(struct afs_net *net,
0099 const char *name, unsigned int namesz,
0100 enum afs_cell_trace reason)
0101 {
0102 struct afs_cell *cell;
0103
0104 down_read(&net->cells_lock);
0105 cell = afs_find_cell_locked(net, name, namesz, reason);
0106 up_read(&net->cells_lock);
0107 return cell;
0108 }
0109
0110
0111
0112
0113
0114 static struct afs_cell *afs_alloc_cell(struct afs_net *net,
0115 const char *name, unsigned int namelen,
0116 const char *addresses)
0117 {
0118 struct afs_vlserver_list *vllist;
0119 struct afs_cell *cell;
0120 int i, ret;
0121
0122 ASSERT(name);
0123 if (namelen == 0)
0124 return ERR_PTR(-EINVAL);
0125 if (namelen > AFS_MAXCELLNAME) {
0126 _leave(" = -ENAMETOOLONG");
0127 return ERR_PTR(-ENAMETOOLONG);
0128 }
0129
0130
0131
0132
0133 if (name[0] == '.')
0134 return ERR_PTR(-EINVAL);
0135 for (i = 0; i < namelen; i++) {
0136 char ch = name[i];
0137 if (!isprint(ch) || ch == '/' || ch == '@')
0138 return ERR_PTR(-EINVAL);
0139 }
0140
0141 _enter("%*.*s,%s", namelen, namelen, name, addresses);
0142
0143 cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
0144 if (!cell) {
0145 _leave(" = -ENOMEM");
0146 return ERR_PTR(-ENOMEM);
0147 }
0148
0149 cell->name = kmalloc(namelen + 1, GFP_KERNEL);
0150 if (!cell->name) {
0151 kfree(cell);
0152 return ERR_PTR(-ENOMEM);
0153 }
0154
0155 cell->net = net;
0156 cell->name_len = namelen;
0157 for (i = 0; i < namelen; i++)
0158 cell->name[i] = tolower(name[i]);
0159 cell->name[i] = 0;
0160
0161 refcount_set(&cell->ref, 1);
0162 atomic_set(&cell->active, 0);
0163 INIT_WORK(&cell->manager, afs_manage_cell_work);
0164 cell->volumes = RB_ROOT;
0165 INIT_HLIST_HEAD(&cell->proc_volumes);
0166 seqlock_init(&cell->volume_lock);
0167 cell->fs_servers = RB_ROOT;
0168 seqlock_init(&cell->fs_lock);
0169 INIT_LIST_HEAD(&cell->fs_open_mmaps);
0170 init_rwsem(&cell->fs_open_mmaps_lock);
0171 rwlock_init(&cell->vl_servers_lock);
0172 cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
0173
0174
0175
0176
0177 if (addresses) {
0178 vllist = afs_parse_text_addrs(net,
0179 addresses, strlen(addresses), ':',
0180 VL_SERVICE, AFS_VL_PORT);
0181 if (IS_ERR(vllist)) {
0182 ret = PTR_ERR(vllist);
0183 goto parse_failed;
0184 }
0185
0186 vllist->source = DNS_RECORD_FROM_CONFIG;
0187 vllist->status = DNS_LOOKUP_NOT_DONE;
0188 cell->dns_expiry = TIME64_MAX;
0189 } else {
0190 ret = -ENOMEM;
0191 vllist = afs_alloc_vlserver_list(0);
0192 if (!vllist)
0193 goto error;
0194 vllist->source = DNS_RECORD_UNAVAILABLE;
0195 vllist->status = DNS_LOOKUP_NOT_DONE;
0196 cell->dns_expiry = ktime_get_real_seconds();
0197 }
0198
0199 rcu_assign_pointer(cell->vl_servers, vllist);
0200
0201 cell->dns_source = vllist->source;
0202 cell->dns_status = vllist->status;
0203 smp_store_release(&cell->dns_lookup_count, 1);
0204 atomic_inc(&net->cells_outstanding);
0205 cell->debug_id = atomic_inc_return(&cell_debug_id);
0206 trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
0207
0208 _leave(" = %p", cell);
0209 return cell;
0210
0211 parse_failed:
0212 if (ret == -EINVAL)
0213 printk(KERN_ERR "kAFS: bad VL server IP address\n");
0214 error:
0215 kfree(cell->name);
0216 kfree(cell);
0217 _leave(" = %d", ret);
0218 return ERR_PTR(ret);
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 struct afs_cell *afs_lookup_cell(struct afs_net *net,
0235 const char *name, unsigned int namesz,
0236 const char *vllist, bool excl)
0237 {
0238 struct afs_cell *cell, *candidate, *cursor;
0239 struct rb_node *parent, **pp;
0240 enum afs_cell_state state;
0241 int ret, n;
0242
0243 _enter("%s,%s", name, vllist);
0244
0245 if (!excl) {
0246 cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
0247 if (!IS_ERR(cell))
0248 goto wait_for_cell;
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258 candidate = afs_alloc_cell(net, name, namesz, vllist);
0259 if (IS_ERR(candidate)) {
0260 _leave(" = %ld", PTR_ERR(candidate));
0261 return candidate;
0262 }
0263
0264
0265
0266
0267 down_write(&net->cells_lock);
0268
0269 pp = &net->cells.rb_node;
0270 parent = NULL;
0271 while (*pp) {
0272 parent = *pp;
0273 cursor = rb_entry(parent, struct afs_cell, net_node);
0274
0275 n = strncasecmp(cursor->name, name,
0276 min_t(size_t, cursor->name_len, namesz));
0277 if (n == 0)
0278 n = cursor->name_len - namesz;
0279 if (n < 0)
0280 pp = &(*pp)->rb_left;
0281 else if (n > 0)
0282 pp = &(*pp)->rb_right;
0283 else
0284 goto cell_already_exists;
0285 }
0286
0287 cell = candidate;
0288 candidate = NULL;
0289 atomic_set(&cell->active, 2);
0290 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert);
0291 rb_link_node_rcu(&cell->net_node, parent, pp);
0292 rb_insert_color(&cell->net_node, &net->cells);
0293 up_write(&net->cells_lock);
0294
0295 afs_queue_cell(cell, afs_cell_trace_get_queue_new);
0296
0297 wait_for_cell:
0298 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active),
0299 afs_cell_trace_wait);
0300 _debug("wait_for_cell");
0301 wait_var_event(&cell->state,
0302 ({
0303 state = smp_load_acquire(&cell->state);
0304 state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
0305 }));
0306
0307
0308 if (state == AFS_CELL_REMOVED) {
0309 ret = cell->error;
0310 goto error;
0311 }
0312
0313 _leave(" = %p [cell]", cell);
0314 return cell;
0315
0316 cell_already_exists:
0317 _debug("cell exists");
0318 cell = cursor;
0319 if (excl) {
0320 ret = -EEXIST;
0321 } else {
0322 afs_use_cell(cursor, afs_cell_trace_use_lookup);
0323 ret = 0;
0324 }
0325 up_write(&net->cells_lock);
0326 if (candidate)
0327 afs_put_cell(candidate, afs_cell_trace_put_candidate);
0328 if (ret == 0)
0329 goto wait_for_cell;
0330 goto error_noput;
0331 error:
0332 afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
0333 error_noput:
0334 _leave(" = %d [error]", ret);
0335 return ERR_PTR(ret);
0336 }
0337
0338
0339
0340
0341
0342
0343 int afs_cell_init(struct afs_net *net, const char *rootcell)
0344 {
0345 struct afs_cell *old_root, *new_root;
0346 const char *cp, *vllist;
0347 size_t len;
0348
0349 _enter("");
0350
0351 if (!rootcell) {
0352
0353
0354
0355 _leave(" = 0 [no root]");
0356 return 0;
0357 }
0358
0359 cp = strchr(rootcell, ':');
0360 if (!cp) {
0361 _debug("kAFS: no VL server IP addresses specified");
0362 vllist = NULL;
0363 len = strlen(rootcell);
0364 } else {
0365 vllist = cp + 1;
0366 len = cp - rootcell;
0367 }
0368
0369
0370 new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
0371 if (IS_ERR(new_root)) {
0372 _leave(" = %ld", PTR_ERR(new_root));
0373 return PTR_ERR(new_root);
0374 }
0375
0376 if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
0377 afs_use_cell(new_root, afs_cell_trace_use_pin);
0378
0379
0380 down_write(&net->cells_lock);
0381 afs_see_cell(new_root, afs_cell_trace_see_ws);
0382 old_root = net->ws_cell;
0383 net->ws_cell = new_root;
0384 up_write(&net->cells_lock);
0385
0386 afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
0387 _leave(" = 0");
0388 return 0;
0389 }
0390
0391
0392
0393
0394 static int afs_update_cell(struct afs_cell *cell)
0395 {
0396 struct afs_vlserver_list *vllist, *old = NULL, *p;
0397 unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
0398 unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
0399 time64_t now, expiry = 0;
0400 int ret = 0;
0401
0402 _enter("%s", cell->name);
0403
0404 vllist = afs_dns_query(cell, &expiry);
0405 if (IS_ERR(vllist)) {
0406 ret = PTR_ERR(vllist);
0407
0408 _debug("%s: fail %d", cell->name, ret);
0409 if (ret == -ENOMEM)
0410 goto out_wake;
0411
0412 ret = -ENOMEM;
0413 vllist = afs_alloc_vlserver_list(0);
0414 if (!vllist)
0415 goto out_wake;
0416
0417 switch (ret) {
0418 case -ENODATA:
0419 case -EDESTADDRREQ:
0420 vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
0421 break;
0422 case -EAGAIN:
0423 case -ECONNREFUSED:
0424 vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
0425 break;
0426 default:
0427 vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
0428 break;
0429 }
0430 }
0431
0432 _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
0433 cell->dns_status = vllist->status;
0434
0435 now = ktime_get_real_seconds();
0436 if (min_ttl > max_ttl)
0437 max_ttl = min_ttl;
0438 if (expiry < now + min_ttl)
0439 expiry = now + min_ttl;
0440 else if (expiry > now + max_ttl)
0441 expiry = now + max_ttl;
0442
0443 _debug("%s: status %d", cell->name, vllist->status);
0444 if (vllist->source == DNS_RECORD_UNAVAILABLE) {
0445 switch (vllist->status) {
0446 case DNS_LOOKUP_GOT_NOT_FOUND:
0447
0448
0449
0450 cell->dns_expiry = expiry;
0451 break;
0452
0453 case DNS_LOOKUP_BAD:
0454 case DNS_LOOKUP_GOT_LOCAL_FAILURE:
0455 case DNS_LOOKUP_GOT_TEMP_FAILURE:
0456 case DNS_LOOKUP_GOT_NS_FAILURE:
0457 default:
0458 cell->dns_expiry = now + 10;
0459 break;
0460 }
0461 } else {
0462 cell->dns_expiry = expiry;
0463 }
0464
0465
0466
0467
0468 write_lock(&cell->vl_servers_lock);
0469 p = rcu_dereference_protected(cell->vl_servers, true);
0470 if (vllist->nr_servers > 0 || p->nr_servers == 0) {
0471 rcu_assign_pointer(cell->vl_servers, vllist);
0472 cell->dns_source = vllist->source;
0473 old = p;
0474 }
0475 write_unlock(&cell->vl_servers_lock);
0476 afs_put_vlserverlist(cell->net, old);
0477
0478 out_wake:
0479 smp_store_release(&cell->dns_lookup_count,
0480 cell->dns_lookup_count + 1);
0481 wake_up_var(&cell->dns_lookup_count);
0482 _leave(" = %d", ret);
0483 return ret;
0484 }
0485
0486
0487
0488
0489 static void afs_cell_destroy(struct rcu_head *rcu)
0490 {
0491 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
0492 struct afs_net *net = cell->net;
0493 int r;
0494
0495 _enter("%p{%s}", cell, cell->name);
0496
0497 r = refcount_read(&cell->ref);
0498 ASSERTCMP(r, ==, 0);
0499 trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
0500
0501 afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
0502 afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
0503 key_put(cell->anonymous_key);
0504 kfree(cell->name);
0505 kfree(cell);
0506
0507 afs_dec_cells_outstanding(net);
0508 _leave(" [destroyed]");
0509 }
0510
0511
0512
0513
0514 static void afs_queue_cell_manager(struct afs_net *net)
0515 {
0516 int outstanding = atomic_inc_return(&net->cells_outstanding);
0517
0518 _enter("%d", outstanding);
0519
0520 if (!queue_work(afs_wq, &net->cells_manager))
0521 afs_dec_cells_outstanding(net);
0522 }
0523
0524
0525
0526
0527
0528 void afs_cells_timer(struct timer_list *timer)
0529 {
0530 struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
0531
0532 _enter("");
0533 if (!queue_work(afs_wq, &net->cells_manager))
0534 afs_dec_cells_outstanding(net);
0535 }
0536
0537
0538
0539
0540 struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
0541 {
0542 int r;
0543
0544 __refcount_inc(&cell->ref, &r);
0545 trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
0546 return cell;
0547 }
0548
0549
0550
0551
0552 void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
0553 {
0554 if (cell) {
0555 unsigned int debug_id = cell->debug_id;
0556 unsigned int a;
0557 bool zero;
0558 int r;
0559
0560 a = atomic_read(&cell->active);
0561 zero = __refcount_dec_and_test(&cell->ref, &r);
0562 trace_afs_cell(debug_id, r - 1, a, reason);
0563 if (zero) {
0564 a = atomic_read(&cell->active);
0565 WARN(a != 0, "Cell active count %u > 0\n", a);
0566 call_rcu(&cell->rcu, afs_cell_destroy);
0567 }
0568 }
0569 }
0570
0571
0572
0573
0574 struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
0575 {
0576 int r, a;
0577
0578 r = refcount_read(&cell->ref);
0579 WARN_ON(r == 0);
0580 a = atomic_inc_return(&cell->active);
0581 trace_afs_cell(cell->debug_id, r, a, reason);
0582 return cell;
0583 }
0584
0585
0586
0587
0588
0589 void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
0590 {
0591 unsigned int debug_id;
0592 time64_t now, expire_delay;
0593 int r, a;
0594
0595 if (!cell)
0596 return;
0597
0598 _enter("%s", cell->name);
0599
0600 now = ktime_get_real_seconds();
0601 cell->last_inactive = now;
0602 expire_delay = 0;
0603 if (cell->vl_servers->nr_servers)
0604 expire_delay = afs_cell_gc_delay;
0605
0606 debug_id = cell->debug_id;
0607 r = refcount_read(&cell->ref);
0608 a = atomic_dec_return(&cell->active);
0609 trace_afs_cell(debug_id, r, a, reason);
0610 WARN_ON(a == 0);
0611 if (a == 1)
0612
0613 afs_set_cell_timer(net, expire_delay);
0614 }
0615
0616
0617
0618
0619 void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
0620 {
0621 int r, a;
0622
0623 r = refcount_read(&cell->ref);
0624 a = atomic_read(&cell->active);
0625 trace_afs_cell(cell->debug_id, r, a, reason);
0626 }
0627
0628
0629
0630
0631 void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
0632 {
0633 afs_get_cell(cell, reason);
0634 if (!queue_work(afs_wq, &cell->manager))
0635 afs_put_cell(cell, afs_cell_trace_put_queue_fail);
0636 }
0637
0638
0639
0640
0641 static int afs_alloc_anon_key(struct afs_cell *cell)
0642 {
0643 struct key *key;
0644 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
0645
0646
0647 memcpy(keyname, "afs@", 4);
0648 dp = keyname + 4;
0649 cp = cell->name;
0650 do {
0651 *dp++ = tolower(*cp);
0652 } while (*cp++);
0653
0654 key = rxrpc_get_null_key(keyname);
0655 if (IS_ERR(key))
0656 return PTR_ERR(key);
0657
0658 cell->anonymous_key = key;
0659
0660 _debug("anon key %p{%x}",
0661 cell->anonymous_key, key_serial(cell->anonymous_key));
0662 return 0;
0663 }
0664
0665
0666
0667
0668 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
0669 {
0670 struct hlist_node **p;
0671 struct afs_cell *pcell;
0672 int ret;
0673
0674 if (!cell->anonymous_key) {
0675 ret = afs_alloc_anon_key(cell);
0676 if (ret < 0)
0677 return ret;
0678 }
0679
0680 ret = afs_proc_cell_setup(cell);
0681 if (ret < 0)
0682 return ret;
0683
0684 mutex_lock(&net->proc_cells_lock);
0685 for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
0686 pcell = hlist_entry(*p, struct afs_cell, proc_link);
0687 if (strcmp(cell->name, pcell->name) < 0)
0688 break;
0689 }
0690
0691 cell->proc_link.pprev = p;
0692 cell->proc_link.next = *p;
0693 rcu_assign_pointer(*p, &cell->proc_link.next);
0694 if (cell->proc_link.next)
0695 cell->proc_link.next->pprev = &cell->proc_link.next;
0696
0697 afs_dynroot_mkdir(net, cell);
0698 mutex_unlock(&net->proc_cells_lock);
0699 return 0;
0700 }
0701
0702
0703
0704
0705 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
0706 {
0707 _enter("%s", cell->name);
0708
0709 afs_proc_cell_remove(cell);
0710
0711 mutex_lock(&net->proc_cells_lock);
0712 hlist_del_rcu(&cell->proc_link);
0713 afs_dynroot_rmdir(net, cell);
0714 mutex_unlock(&net->proc_cells_lock);
0715
0716 _leave("");
0717 }
0718
0719
0720
0721
0722
0723 static void afs_manage_cell(struct afs_cell *cell)
0724 {
0725 struct afs_net *net = cell->net;
0726 int ret, active;
0727
0728 _enter("%s", cell->name);
0729
0730 again:
0731 _debug("state %u", cell->state);
0732 switch (cell->state) {
0733 case AFS_CELL_INACTIVE:
0734 case AFS_CELL_FAILED:
0735 down_write(&net->cells_lock);
0736 active = 1;
0737 if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
0738 rb_erase(&cell->net_node, &net->cells);
0739 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0,
0740 afs_cell_trace_unuse_delete);
0741 smp_store_release(&cell->state, AFS_CELL_REMOVED);
0742 }
0743 up_write(&net->cells_lock);
0744 if (cell->state == AFS_CELL_REMOVED) {
0745 wake_up_var(&cell->state);
0746 goto final_destruction;
0747 }
0748 if (cell->state == AFS_CELL_FAILED)
0749 goto done;
0750 smp_store_release(&cell->state, AFS_CELL_UNSET);
0751 wake_up_var(&cell->state);
0752 goto again;
0753
0754 case AFS_CELL_UNSET:
0755 smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
0756 wake_up_var(&cell->state);
0757 goto again;
0758
0759 case AFS_CELL_ACTIVATING:
0760 ret = afs_activate_cell(net, cell);
0761 if (ret < 0)
0762 goto activation_failed;
0763
0764 smp_store_release(&cell->state, AFS_CELL_ACTIVE);
0765 wake_up_var(&cell->state);
0766 goto again;
0767
0768 case AFS_CELL_ACTIVE:
0769 if (atomic_read(&cell->active) > 1) {
0770 if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
0771 ret = afs_update_cell(cell);
0772 if (ret < 0)
0773 cell->error = ret;
0774 }
0775 goto done;
0776 }
0777 smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
0778 wake_up_var(&cell->state);
0779 goto again;
0780
0781 case AFS_CELL_DEACTIVATING:
0782 if (atomic_read(&cell->active) > 1)
0783 goto reverse_deactivation;
0784 afs_deactivate_cell(net, cell);
0785 smp_store_release(&cell->state, AFS_CELL_INACTIVE);
0786 wake_up_var(&cell->state);
0787 goto again;
0788
0789 case AFS_CELL_REMOVED:
0790 goto done;
0791
0792 default:
0793 break;
0794 }
0795 _debug("bad state %u", cell->state);
0796 BUG();
0797
0798 activation_failed:
0799 cell->error = ret;
0800 afs_deactivate_cell(net, cell);
0801
0802 smp_store_release(&cell->state, AFS_CELL_FAILED);
0803 wake_up_var(&cell->state);
0804 goto again;
0805
0806 reverse_deactivation:
0807 smp_store_release(&cell->state, AFS_CELL_ACTIVE);
0808 wake_up_var(&cell->state);
0809 _leave(" [deact->act]");
0810 return;
0811
0812 done:
0813 _leave(" [done %u]", cell->state);
0814 return;
0815
0816 final_destruction:
0817
0818 afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
0819 cell->root_volume = NULL;
0820 afs_put_cell(cell, afs_cell_trace_put_destroy);
0821 }
0822
0823 static void afs_manage_cell_work(struct work_struct *work)
0824 {
0825 struct afs_cell *cell = container_of(work, struct afs_cell, manager);
0826
0827 afs_manage_cell(cell);
0828 afs_put_cell(cell, afs_cell_trace_put_queue_work);
0829 }
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844 void afs_manage_cells(struct work_struct *work)
0845 {
0846 struct afs_net *net = container_of(work, struct afs_net, cells_manager);
0847 struct rb_node *cursor;
0848 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
0849 bool purging = !net->live;
0850
0851 _enter("");
0852
0853
0854
0855
0856
0857 down_read(&net->cells_lock);
0858
0859 for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
0860 struct afs_cell *cell =
0861 rb_entry(cursor, struct afs_cell, net_node);
0862 unsigned active;
0863 bool sched_cell = false;
0864
0865 active = atomic_read(&cell->active);
0866 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
0867 active, afs_cell_trace_manage);
0868
0869 ASSERTCMP(active, >=, 1);
0870
0871 if (purging) {
0872 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
0873 active = atomic_dec_return(&cell->active);
0874 trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
0875 active, afs_cell_trace_unuse_pin);
0876 }
0877 }
0878
0879 if (active == 1) {
0880 struct afs_vlserver_list *vllist;
0881 time64_t expire_at = cell->last_inactive;
0882
0883 read_lock(&cell->vl_servers_lock);
0884 vllist = rcu_dereference_protected(
0885 cell->vl_servers,
0886 lockdep_is_held(&cell->vl_servers_lock));
0887 if (vllist->nr_servers > 0)
0888 expire_at += afs_cell_gc_delay;
0889 read_unlock(&cell->vl_servers_lock);
0890 if (purging || expire_at <= now)
0891 sched_cell = true;
0892 else if (expire_at < next_manage)
0893 next_manage = expire_at;
0894 }
0895
0896 if (!purging) {
0897 if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
0898 sched_cell = true;
0899 }
0900
0901 if (sched_cell)
0902 afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
0903 }
0904
0905 up_read(&net->cells_lock);
0906
0907
0908
0909
0910
0911 if (!purging && next_manage < TIME64_MAX) {
0912 now = ktime_get_real_seconds();
0913
0914 if (next_manage - now <= 0) {
0915 if (queue_work(afs_wq, &net->cells_manager))
0916 atomic_inc(&net->cells_outstanding);
0917 } else {
0918 afs_set_cell_timer(net, next_manage - now);
0919 }
0920 }
0921
0922 afs_dec_cells_outstanding(net);
0923 _leave(" [%d]", atomic_read(&net->cells_outstanding));
0924 }
0925
0926
0927
0928
0929 void afs_cell_purge(struct afs_net *net)
0930 {
0931 struct afs_cell *ws;
0932
0933 _enter("");
0934
0935 down_write(&net->cells_lock);
0936 ws = net->ws_cell;
0937 net->ws_cell = NULL;
0938 up_write(&net->cells_lock);
0939 afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
0940
0941 _debug("del timer");
0942 if (del_timer_sync(&net->cells_timer))
0943 atomic_dec(&net->cells_outstanding);
0944
0945 _debug("kick mgr");
0946 afs_queue_cell_manager(net);
0947
0948 _debug("wait");
0949 wait_var_event(&net->cells_outstanding,
0950 !atomic_read(&net->cells_outstanding));
0951 _leave("");
0952 }