0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/types.h>
0012 #include <linux/fs.h>
0013 #include <linux/file.h>
0014 #include <linux/slab.h>
0015 #include <linux/signal.h>
0016 #include <linux/sched.h>
0017 #include <linux/kmod.h>
0018 #include <linux/list.h>
0019 #include <linux/module.h>
0020 #include <linux/ctype.h>
0021 #include <linux/string_helpers.h>
0022 #include <linux/uaccess.h>
0023 #include <linux/poll.h>
0024 #include <linux/seq_file.h>
0025 #include <linux/proc_fs.h>
0026 #include <linux/net.h>
0027 #include <linux/workqueue.h>
0028 #include <linux/mutex.h>
0029 #include <linux/pagemap.h>
0030 #include <asm/ioctls.h>
0031 #include <linux/sunrpc/types.h>
0032 #include <linux/sunrpc/cache.h>
0033 #include <linux/sunrpc/stats.h>
0034 #include <linux/sunrpc/rpc_pipe_fs.h>
0035 #include <trace/events/sunrpc.h>
0036
0037 #include "netns.h"
0038 #include "fail.h"
0039
0040 #define RPCDBG_FACILITY RPCDBG_CACHE
0041
0042 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
0043 static void cache_revisit_request(struct cache_head *item);
0044
0045 static void cache_init(struct cache_head *h, struct cache_detail *detail)
0046 {
0047 time64_t now = seconds_since_boot();
0048 INIT_HLIST_NODE(&h->cache_list);
0049 h->flags = 0;
0050 kref_init(&h->ref);
0051 h->expiry_time = now + CACHE_NEW_EXPIRY;
0052 if (now <= detail->flush_time)
0053
0054 now = detail->flush_time + 1;
0055 h->last_refresh = now;
0056 }
0057
0058 static void cache_fresh_unlocked(struct cache_head *head,
0059 struct cache_detail *detail);
0060
0061 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
0062 struct cache_head *key,
0063 int hash)
0064 {
0065 struct hlist_head *head = &detail->hash_table[hash];
0066 struct cache_head *tmp;
0067
0068 rcu_read_lock();
0069 hlist_for_each_entry_rcu(tmp, head, cache_list) {
0070 if (!detail->match(tmp, key))
0071 continue;
0072 if (test_bit(CACHE_VALID, &tmp->flags) &&
0073 cache_is_expired(detail, tmp))
0074 continue;
0075 tmp = cache_get_rcu(tmp);
0076 rcu_read_unlock();
0077 return tmp;
0078 }
0079 rcu_read_unlock();
0080 return NULL;
0081 }
0082
0083 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
0084 struct cache_detail *cd)
0085 {
0086
0087 hlist_del_init_rcu(&ch->cache_list);
0088 set_bit(CACHE_CLEANED, &ch->flags);
0089 cd->entries --;
0090 }
0091
0092 static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
0093 struct cache_detail *cd)
0094 {
0095 cache_fresh_unlocked(ch, cd);
0096 cache_put(ch, cd);
0097 }
0098
0099 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
0100 struct cache_head *key,
0101 int hash)
0102 {
0103 struct cache_head *new, *tmp, *freeme = NULL;
0104 struct hlist_head *head = &detail->hash_table[hash];
0105
0106 new = detail->alloc();
0107 if (!new)
0108 return NULL;
0109
0110
0111
0112
0113 cache_init(new, detail);
0114 detail->init(new, key);
0115
0116 spin_lock(&detail->hash_lock);
0117
0118
0119 hlist_for_each_entry_rcu(tmp, head, cache_list,
0120 lockdep_is_held(&detail->hash_lock)) {
0121 if (!detail->match(tmp, key))
0122 continue;
0123 if (test_bit(CACHE_VALID, &tmp->flags) &&
0124 cache_is_expired(detail, tmp)) {
0125 sunrpc_begin_cache_remove_entry(tmp, detail);
0126 trace_cache_entry_expired(detail, tmp);
0127 freeme = tmp;
0128 break;
0129 }
0130 cache_get(tmp);
0131 spin_unlock(&detail->hash_lock);
0132 cache_put(new, detail);
0133 return tmp;
0134 }
0135
0136 hlist_add_head_rcu(&new->cache_list, head);
0137 detail->entries++;
0138 cache_get(new);
0139 spin_unlock(&detail->hash_lock);
0140
0141 if (freeme)
0142 sunrpc_end_cache_remove_entry(freeme, detail);
0143 return new;
0144 }
0145
0146 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
0147 struct cache_head *key, int hash)
0148 {
0149 struct cache_head *ret;
0150
0151 ret = sunrpc_cache_find_rcu(detail, key, hash);
0152 if (ret)
0153 return ret;
0154
0155 return sunrpc_cache_add_entry(detail, key, hash);
0156 }
0157 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
0158
0159 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
0160
0161 static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
0162 struct cache_detail *detail)
0163 {
0164 time64_t now = seconds_since_boot();
0165 if (now <= detail->flush_time)
0166
0167 now = detail->flush_time + 1;
0168 head->expiry_time = expiry;
0169 head->last_refresh = now;
0170 smp_wmb();
0171 set_bit(CACHE_VALID, &head->flags);
0172 }
0173
0174 static void cache_fresh_unlocked(struct cache_head *head,
0175 struct cache_detail *detail)
0176 {
0177 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
0178 cache_revisit_request(head);
0179 cache_dequeue(detail, head);
0180 }
0181 }
0182
0183 static void cache_make_negative(struct cache_detail *detail,
0184 struct cache_head *h)
0185 {
0186 set_bit(CACHE_NEGATIVE, &h->flags);
0187 trace_cache_entry_make_negative(detail, h);
0188 }
0189
0190 static void cache_entry_update(struct cache_detail *detail,
0191 struct cache_head *h,
0192 struct cache_head *new)
0193 {
0194 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
0195 detail->update(h, new);
0196 trace_cache_entry_update(detail, h);
0197 } else {
0198 cache_make_negative(detail, h);
0199 }
0200 }
0201
0202 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
0203 struct cache_head *new, struct cache_head *old, int hash)
0204 {
0205
0206
0207
0208
0209 struct cache_head *tmp;
0210
0211 if (!test_bit(CACHE_VALID, &old->flags)) {
0212 spin_lock(&detail->hash_lock);
0213 if (!test_bit(CACHE_VALID, &old->flags)) {
0214 cache_entry_update(detail, old, new);
0215 cache_fresh_locked(old, new->expiry_time, detail);
0216 spin_unlock(&detail->hash_lock);
0217 cache_fresh_unlocked(old, detail);
0218 return old;
0219 }
0220 spin_unlock(&detail->hash_lock);
0221 }
0222
0223 tmp = detail->alloc();
0224 if (!tmp) {
0225 cache_put(old, detail);
0226 return NULL;
0227 }
0228 cache_init(tmp, detail);
0229 detail->init(tmp, old);
0230
0231 spin_lock(&detail->hash_lock);
0232 cache_entry_update(detail, tmp, new);
0233 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
0234 detail->entries++;
0235 cache_get(tmp);
0236 cache_fresh_locked(tmp, new->expiry_time, detail);
0237 cache_fresh_locked(old, 0, detail);
0238 spin_unlock(&detail->hash_lock);
0239 cache_fresh_unlocked(tmp, detail);
0240 cache_fresh_unlocked(old, detail);
0241 cache_put(old, detail);
0242 return tmp;
0243 }
0244 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
0245
0246 static inline int cache_is_valid(struct cache_head *h)
0247 {
0248 if (!test_bit(CACHE_VALID, &h->flags))
0249 return -EAGAIN;
0250 else {
0251
0252 if (test_bit(CACHE_NEGATIVE, &h->flags))
0253 return -ENOENT;
0254 else {
0255
0256
0257
0258
0259
0260
0261 smp_rmb();
0262 return 0;
0263 }
0264 }
0265 }
0266
0267 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
0268 {
0269 int rv;
0270
0271 spin_lock(&detail->hash_lock);
0272 rv = cache_is_valid(h);
0273 if (rv == -EAGAIN) {
0274 cache_make_negative(detail, h);
0275 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
0276 detail);
0277 rv = -ENOENT;
0278 }
0279 spin_unlock(&detail->hash_lock);
0280 cache_fresh_unlocked(h, detail);
0281 return rv;
0282 }
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 int cache_check(struct cache_detail *detail,
0299 struct cache_head *h, struct cache_req *rqstp)
0300 {
0301 int rv;
0302 time64_t refresh_age, age;
0303
0304
0305 rv = cache_is_valid(h);
0306
0307
0308 refresh_age = (h->expiry_time - h->last_refresh);
0309 age = seconds_since_boot() - h->last_refresh;
0310
0311 if (rqstp == NULL) {
0312 if (rv == -EAGAIN)
0313 rv = -ENOENT;
0314 } else if (rv == -EAGAIN ||
0315 (h->expiry_time != 0 && age > refresh_age/2)) {
0316 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
0317 refresh_age, age);
0318 switch (detail->cache_upcall(detail, h)) {
0319 case -EINVAL:
0320 rv = try_to_negate_entry(detail, h);
0321 break;
0322 case -EAGAIN:
0323 cache_fresh_unlocked(h, detail);
0324 break;
0325 }
0326 }
0327
0328 if (rv == -EAGAIN) {
0329 if (!cache_defer_req(rqstp, h)) {
0330
0331
0332
0333
0334 rv = cache_is_valid(h);
0335 if (rv == -EAGAIN)
0336 rv = -ETIMEDOUT;
0337 }
0338 }
0339 if (rv)
0340 cache_put(h, detail);
0341 return rv;
0342 }
0343 EXPORT_SYMBOL_GPL(cache_check);
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 static LIST_HEAD(cache_list);
0378 static DEFINE_SPINLOCK(cache_list_lock);
0379 static struct cache_detail *current_detail;
0380 static int current_index;
0381
0382 static void do_cache_clean(struct work_struct *work);
0383 static struct delayed_work cache_cleaner;
0384
0385 void sunrpc_init_cache_detail(struct cache_detail *cd)
0386 {
0387 spin_lock_init(&cd->hash_lock);
0388 INIT_LIST_HEAD(&cd->queue);
0389 spin_lock(&cache_list_lock);
0390 cd->nextcheck = 0;
0391 cd->entries = 0;
0392 atomic_set(&cd->writers, 0);
0393 cd->last_close = 0;
0394 cd->last_warn = -1;
0395 list_add(&cd->others, &cache_list);
0396 spin_unlock(&cache_list_lock);
0397
0398
0399 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
0400 }
0401 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
0402
0403 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
0404 {
0405 cache_purge(cd);
0406 spin_lock(&cache_list_lock);
0407 spin_lock(&cd->hash_lock);
0408 if (current_detail == cd)
0409 current_detail = NULL;
0410 list_del_init(&cd->others);
0411 spin_unlock(&cd->hash_lock);
0412 spin_unlock(&cache_list_lock);
0413 if (list_empty(&cache_list)) {
0414
0415 cancel_delayed_work_sync(&cache_cleaner);
0416 }
0417 }
0418 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
0419
0420
0421
0422
0423
0424
0425
0426 static int cache_clean(void)
0427 {
0428 int rv = 0;
0429 struct list_head *next;
0430
0431 spin_lock(&cache_list_lock);
0432
0433
0434 while (current_detail == NULL ||
0435 current_index >= current_detail->hash_size) {
0436 if (current_detail)
0437 next = current_detail->others.next;
0438 else
0439 next = cache_list.next;
0440 if (next == &cache_list) {
0441 current_detail = NULL;
0442 spin_unlock(&cache_list_lock);
0443 return -1;
0444 }
0445 current_detail = list_entry(next, struct cache_detail, others);
0446 if (current_detail->nextcheck > seconds_since_boot())
0447 current_index = current_detail->hash_size;
0448 else {
0449 current_index = 0;
0450 current_detail->nextcheck = seconds_since_boot()+30*60;
0451 }
0452 }
0453
0454
0455 while (current_detail &&
0456 current_index < current_detail->hash_size &&
0457 hlist_empty(¤t_detail->hash_table[current_index]))
0458 current_index++;
0459
0460
0461
0462 if (current_detail && current_index < current_detail->hash_size) {
0463 struct cache_head *ch = NULL;
0464 struct cache_detail *d;
0465 struct hlist_head *head;
0466 struct hlist_node *tmp;
0467
0468 spin_lock(¤t_detail->hash_lock);
0469
0470
0471
0472 head = ¤t_detail->hash_table[current_index];
0473 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
0474 if (current_detail->nextcheck > ch->expiry_time)
0475 current_detail->nextcheck = ch->expiry_time+1;
0476 if (!cache_is_expired(current_detail, ch))
0477 continue;
0478
0479 sunrpc_begin_cache_remove_entry(ch, current_detail);
0480 trace_cache_entry_expired(current_detail, ch);
0481 rv = 1;
0482 break;
0483 }
0484
0485 spin_unlock(¤t_detail->hash_lock);
0486 d = current_detail;
0487 if (!ch)
0488 current_index ++;
0489 spin_unlock(&cache_list_lock);
0490 if (ch)
0491 sunrpc_end_cache_remove_entry(ch, d);
0492 } else
0493 spin_unlock(&cache_list_lock);
0494
0495 return rv;
0496 }
0497
0498
0499
0500
0501 static void do_cache_clean(struct work_struct *work)
0502 {
0503 int delay;
0504
0505 if (list_empty(&cache_list))
0506 return;
0507
0508 if (cache_clean() == -1)
0509 delay = round_jiffies_relative(30*HZ);
0510 else
0511 delay = 5;
0512
0513 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
0514 }
0515
0516
0517
0518
0519
0520
0521
0522 void cache_flush(void)
0523 {
0524 while (cache_clean() != -1)
0525 cond_resched();
0526 while (cache_clean() != -1)
0527 cond_resched();
0528 }
0529 EXPORT_SYMBOL_GPL(cache_flush);
0530
0531 void cache_purge(struct cache_detail *detail)
0532 {
0533 struct cache_head *ch = NULL;
0534 struct hlist_head *head = NULL;
0535 int i = 0;
0536
0537 spin_lock(&detail->hash_lock);
0538 if (!detail->entries) {
0539 spin_unlock(&detail->hash_lock);
0540 return;
0541 }
0542
0543 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
0544 for (i = 0; i < detail->hash_size; i++) {
0545 head = &detail->hash_table[i];
0546 while (!hlist_empty(head)) {
0547 ch = hlist_entry(head->first, struct cache_head,
0548 cache_list);
0549 sunrpc_begin_cache_remove_entry(ch, detail);
0550 spin_unlock(&detail->hash_lock);
0551 sunrpc_end_cache_remove_entry(ch, detail);
0552 spin_lock(&detail->hash_lock);
0553 }
0554 }
0555 spin_unlock(&detail->hash_lock);
0556 }
0557 EXPORT_SYMBOL_GPL(cache_purge);
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
0576 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
0577
0578 #define DFR_MAX 300
0579
0580 static DEFINE_SPINLOCK(cache_defer_lock);
0581 static LIST_HEAD(cache_defer_list);
0582 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
0583 static int cache_defer_cnt;
0584
0585 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
0586 {
0587 hlist_del_init(&dreq->hash);
0588 if (!list_empty(&dreq->recent)) {
0589 list_del_init(&dreq->recent);
0590 cache_defer_cnt--;
0591 }
0592 }
0593
0594 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
0595 {
0596 int hash = DFR_HASH(item);
0597
0598 INIT_LIST_HEAD(&dreq->recent);
0599 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
0600 }
0601
0602 static void setup_deferral(struct cache_deferred_req *dreq,
0603 struct cache_head *item,
0604 int count_me)
0605 {
0606
0607 dreq->item = item;
0608
0609 spin_lock(&cache_defer_lock);
0610
0611 __hash_deferred_req(dreq, item);
0612
0613 if (count_me) {
0614 cache_defer_cnt++;
0615 list_add(&dreq->recent, &cache_defer_list);
0616 }
0617
0618 spin_unlock(&cache_defer_lock);
0619
0620 }
0621
0622 struct thread_deferred_req {
0623 struct cache_deferred_req handle;
0624 struct completion completion;
0625 };
0626
0627 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
0628 {
0629 struct thread_deferred_req *dr =
0630 container_of(dreq, struct thread_deferred_req, handle);
0631 complete(&dr->completion);
0632 }
0633
0634 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
0635 {
0636 struct thread_deferred_req sleeper;
0637 struct cache_deferred_req *dreq = &sleeper.handle;
0638
0639 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
0640 dreq->revisit = cache_restart_thread;
0641
0642 setup_deferral(dreq, item, 0);
0643
0644 if (!test_bit(CACHE_PENDING, &item->flags) ||
0645 wait_for_completion_interruptible_timeout(
0646 &sleeper.completion, req->thread_wait) <= 0) {
0647
0648
0649
0650 spin_lock(&cache_defer_lock);
0651 if (!hlist_unhashed(&sleeper.handle.hash)) {
0652 __unhash_deferred_req(&sleeper.handle);
0653 spin_unlock(&cache_defer_lock);
0654 } else {
0655
0656
0657
0658
0659
0660 spin_unlock(&cache_defer_lock);
0661 wait_for_completion(&sleeper.completion);
0662 }
0663 }
0664 }
0665
0666 static void cache_limit_defers(void)
0667 {
0668
0669
0670
0671 struct cache_deferred_req *discard = NULL;
0672
0673 if (cache_defer_cnt <= DFR_MAX)
0674 return;
0675
0676 spin_lock(&cache_defer_lock);
0677
0678
0679 if (cache_defer_cnt > DFR_MAX) {
0680 if (prandom_u32() & 1)
0681 discard = list_entry(cache_defer_list.next,
0682 struct cache_deferred_req, recent);
0683 else
0684 discard = list_entry(cache_defer_list.prev,
0685 struct cache_deferred_req, recent);
0686 __unhash_deferred_req(discard);
0687 }
0688 spin_unlock(&cache_defer_lock);
0689 if (discard)
0690 discard->revisit(discard, 1);
0691 }
0692
0693 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
0694 static inline bool cache_defer_immediately(void)
0695 {
0696 return !fail_sunrpc.ignore_cache_wait &&
0697 should_fail(&fail_sunrpc.attr, 1);
0698 }
0699 #else
0700 static inline bool cache_defer_immediately(void)
0701 {
0702 return false;
0703 }
0704 #endif
0705
0706
0707 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
0708 {
0709 struct cache_deferred_req *dreq;
0710
0711 if (!cache_defer_immediately()) {
0712 cache_wait_req(req, item);
0713 if (!test_bit(CACHE_PENDING, &item->flags))
0714 return false;
0715 }
0716
0717 dreq = req->defer(req);
0718 if (dreq == NULL)
0719 return false;
0720 setup_deferral(dreq, item, 1);
0721 if (!test_bit(CACHE_PENDING, &item->flags))
0722
0723
0724
0725 cache_revisit_request(item);
0726
0727 cache_limit_defers();
0728 return true;
0729 }
0730
0731 static void cache_revisit_request(struct cache_head *item)
0732 {
0733 struct cache_deferred_req *dreq;
0734 struct list_head pending;
0735 struct hlist_node *tmp;
0736 int hash = DFR_HASH(item);
0737
0738 INIT_LIST_HEAD(&pending);
0739 spin_lock(&cache_defer_lock);
0740
0741 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
0742 if (dreq->item == item) {
0743 __unhash_deferred_req(dreq);
0744 list_add(&dreq->recent, &pending);
0745 }
0746
0747 spin_unlock(&cache_defer_lock);
0748
0749 while (!list_empty(&pending)) {
0750 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
0751 list_del_init(&dreq->recent);
0752 dreq->revisit(dreq, 0);
0753 }
0754 }
0755
0756 void cache_clean_deferred(void *owner)
0757 {
0758 struct cache_deferred_req *dreq, *tmp;
0759 struct list_head pending;
0760
0761
0762 INIT_LIST_HEAD(&pending);
0763 spin_lock(&cache_defer_lock);
0764
0765 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
0766 if (dreq->owner == owner) {
0767 __unhash_deferred_req(dreq);
0768 list_add(&dreq->recent, &pending);
0769 }
0770 }
0771 spin_unlock(&cache_defer_lock);
0772
0773 while (!list_empty(&pending)) {
0774 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
0775 list_del_init(&dreq->recent);
0776 dreq->revisit(dreq, 1);
0777 }
0778 }
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 static DEFINE_SPINLOCK(queue_lock);
0797
0798 struct cache_queue {
0799 struct list_head list;
0800 int reader;
0801 };
0802 struct cache_request {
0803 struct cache_queue q;
0804 struct cache_head *item;
0805 char * buf;
0806 int len;
0807 int readers;
0808 };
0809 struct cache_reader {
0810 struct cache_queue q;
0811 int offset;
0812 };
0813
0814 static int cache_request(struct cache_detail *detail,
0815 struct cache_request *crq)
0816 {
0817 char *bp = crq->buf;
0818 int len = PAGE_SIZE;
0819
0820 detail->cache_request(detail, crq->item, &bp, &len);
0821 if (len < 0)
0822 return -E2BIG;
0823 return PAGE_SIZE - len;
0824 }
0825
0826 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
0827 loff_t *ppos, struct cache_detail *cd)
0828 {
0829 struct cache_reader *rp = filp->private_data;
0830 struct cache_request *rq;
0831 struct inode *inode = file_inode(filp);
0832 int err;
0833
0834 if (count == 0)
0835 return 0;
0836
0837 inode_lock(inode);
0838
0839 again:
0840 spin_lock(&queue_lock);
0841
0842 while (rp->q.list.next != &cd->queue &&
0843 list_entry(rp->q.list.next, struct cache_queue, list)
0844 ->reader) {
0845 struct list_head *next = rp->q.list.next;
0846 list_move(&rp->q.list, next);
0847 }
0848 if (rp->q.list.next == &cd->queue) {
0849 spin_unlock(&queue_lock);
0850 inode_unlock(inode);
0851 WARN_ON_ONCE(rp->offset);
0852 return 0;
0853 }
0854 rq = container_of(rp->q.list.next, struct cache_request, q.list);
0855 WARN_ON_ONCE(rq->q.reader);
0856 if (rp->offset == 0)
0857 rq->readers++;
0858 spin_unlock(&queue_lock);
0859
0860 if (rq->len == 0) {
0861 err = cache_request(cd, rq);
0862 if (err < 0)
0863 goto out;
0864 rq->len = err;
0865 }
0866
0867 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
0868 err = -EAGAIN;
0869 spin_lock(&queue_lock);
0870 list_move(&rp->q.list, &rq->q.list);
0871 spin_unlock(&queue_lock);
0872 } else {
0873 if (rp->offset + count > rq->len)
0874 count = rq->len - rp->offset;
0875 err = -EFAULT;
0876 if (copy_to_user(buf, rq->buf + rp->offset, count))
0877 goto out;
0878 rp->offset += count;
0879 if (rp->offset >= rq->len) {
0880 rp->offset = 0;
0881 spin_lock(&queue_lock);
0882 list_move(&rp->q.list, &rq->q.list);
0883 spin_unlock(&queue_lock);
0884 }
0885 err = 0;
0886 }
0887 out:
0888 if (rp->offset == 0) {
0889
0890 spin_lock(&queue_lock);
0891 rq->readers--;
0892 if (rq->readers == 0 &&
0893 !test_bit(CACHE_PENDING, &rq->item->flags)) {
0894 list_del(&rq->q.list);
0895 spin_unlock(&queue_lock);
0896 cache_put(rq->item, cd);
0897 kfree(rq->buf);
0898 kfree(rq);
0899 } else
0900 spin_unlock(&queue_lock);
0901 }
0902 if (err == -EAGAIN)
0903 goto again;
0904 inode_unlock(inode);
0905 return err ? err : count;
0906 }
0907
0908 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
0909 size_t count, struct cache_detail *cd)
0910 {
0911 ssize_t ret;
0912
0913 if (count == 0)
0914 return -EINVAL;
0915 if (copy_from_user(kaddr, buf, count))
0916 return -EFAULT;
0917 kaddr[count] = '\0';
0918 ret = cd->cache_parse(cd, kaddr, count);
0919 if (!ret)
0920 ret = count;
0921 return ret;
0922 }
0923
0924 static ssize_t cache_downcall(struct address_space *mapping,
0925 const char __user *buf,
0926 size_t count, struct cache_detail *cd)
0927 {
0928 char *write_buf;
0929 ssize_t ret = -ENOMEM;
0930
0931 if (count >= 32768) {
0932 ret = -EINVAL;
0933 goto out;
0934 }
0935
0936 write_buf = kvmalloc(count + 1, GFP_KERNEL);
0937 if (!write_buf)
0938 goto out;
0939
0940 ret = cache_do_downcall(write_buf, buf, count, cd);
0941 kvfree(write_buf);
0942 out:
0943 return ret;
0944 }
0945
0946 static ssize_t cache_write(struct file *filp, const char __user *buf,
0947 size_t count, loff_t *ppos,
0948 struct cache_detail *cd)
0949 {
0950 struct address_space *mapping = filp->f_mapping;
0951 struct inode *inode = file_inode(filp);
0952 ssize_t ret = -EINVAL;
0953
0954 if (!cd->cache_parse)
0955 goto out;
0956
0957 inode_lock(inode);
0958 ret = cache_downcall(mapping, buf, count, cd);
0959 inode_unlock(inode);
0960 out:
0961 return ret;
0962 }
0963
0964 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
0965
0966 static __poll_t cache_poll(struct file *filp, poll_table *wait,
0967 struct cache_detail *cd)
0968 {
0969 __poll_t mask;
0970 struct cache_reader *rp = filp->private_data;
0971 struct cache_queue *cq;
0972
0973 poll_wait(filp, &queue_wait, wait);
0974
0975
0976 mask = EPOLLOUT | EPOLLWRNORM;
0977
0978 if (!rp)
0979 return mask;
0980
0981 spin_lock(&queue_lock);
0982
0983 for (cq= &rp->q; &cq->list != &cd->queue;
0984 cq = list_entry(cq->list.next, struct cache_queue, list))
0985 if (!cq->reader) {
0986 mask |= EPOLLIN | EPOLLRDNORM;
0987 break;
0988 }
0989 spin_unlock(&queue_lock);
0990 return mask;
0991 }
0992
0993 static int cache_ioctl(struct inode *ino, struct file *filp,
0994 unsigned int cmd, unsigned long arg,
0995 struct cache_detail *cd)
0996 {
0997 int len = 0;
0998 struct cache_reader *rp = filp->private_data;
0999 struct cache_queue *cq;
1000
1001 if (cmd != FIONREAD || !rp)
1002 return -EINVAL;
1003
1004 spin_lock(&queue_lock);
1005
1006
1007
1008
1009 for (cq= &rp->q; &cq->list != &cd->queue;
1010 cq = list_entry(cq->list.next, struct cache_queue, list))
1011 if (!cq->reader) {
1012 struct cache_request *cr =
1013 container_of(cq, struct cache_request, q);
1014 len = cr->len - rp->offset;
1015 break;
1016 }
1017 spin_unlock(&queue_lock);
1018
1019 return put_user(len, (int __user *)arg);
1020 }
1021
1022 static int cache_open(struct inode *inode, struct file *filp,
1023 struct cache_detail *cd)
1024 {
1025 struct cache_reader *rp = NULL;
1026
1027 if (!cd || !try_module_get(cd->owner))
1028 return -EACCES;
1029 nonseekable_open(inode, filp);
1030 if (filp->f_mode & FMODE_READ) {
1031 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1032 if (!rp) {
1033 module_put(cd->owner);
1034 return -ENOMEM;
1035 }
1036 rp->offset = 0;
1037 rp->q.reader = 1;
1038
1039 spin_lock(&queue_lock);
1040 list_add(&rp->q.list, &cd->queue);
1041 spin_unlock(&queue_lock);
1042 }
1043 if (filp->f_mode & FMODE_WRITE)
1044 atomic_inc(&cd->writers);
1045 filp->private_data = rp;
1046 return 0;
1047 }
1048
1049 static int cache_release(struct inode *inode, struct file *filp,
1050 struct cache_detail *cd)
1051 {
1052 struct cache_reader *rp = filp->private_data;
1053
1054 if (rp) {
1055 spin_lock(&queue_lock);
1056 if (rp->offset) {
1057 struct cache_queue *cq;
1058 for (cq= &rp->q; &cq->list != &cd->queue;
1059 cq = list_entry(cq->list.next, struct cache_queue, list))
1060 if (!cq->reader) {
1061 container_of(cq, struct cache_request, q)
1062 ->readers--;
1063 break;
1064 }
1065 rp->offset = 0;
1066 }
1067 list_del(&rp->q.list);
1068 spin_unlock(&queue_lock);
1069
1070 filp->private_data = NULL;
1071 kfree(rp);
1072
1073 }
1074 if (filp->f_mode & FMODE_WRITE) {
1075 atomic_dec(&cd->writers);
1076 cd->last_close = seconds_since_boot();
1077 }
1078 module_put(cd->owner);
1079 return 0;
1080 }
1081
1082
1083
1084 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1085 {
1086 struct cache_queue *cq, *tmp;
1087 struct cache_request *cr;
1088 struct list_head dequeued;
1089
1090 INIT_LIST_HEAD(&dequeued);
1091 spin_lock(&queue_lock);
1092 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1093 if (!cq->reader) {
1094 cr = container_of(cq, struct cache_request, q);
1095 if (cr->item != ch)
1096 continue;
1097 if (test_bit(CACHE_PENDING, &ch->flags))
1098
1099 break;
1100 if (cr->readers != 0)
1101 continue;
1102 list_move(&cr->q.list, &dequeued);
1103 }
1104 spin_unlock(&queue_lock);
1105 while (!list_empty(&dequeued)) {
1106 cr = list_entry(dequeued.next, struct cache_request, q.list);
1107 list_del(&cr->q.list);
1108 cache_put(cr->item, detail);
1109 kfree(cr->buf);
1110 kfree(cr);
1111 }
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 void qword_add(char **bpp, int *lp, char *str)
1124 {
1125 char *bp = *bpp;
1126 int len = *lp;
1127 int ret;
1128
1129 if (len < 0) return;
1130
1131 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1132 if (ret >= len) {
1133 bp += len;
1134 len = -1;
1135 } else {
1136 bp += ret;
1137 len -= ret;
1138 *bp++ = ' ';
1139 len--;
1140 }
1141 *bpp = bp;
1142 *lp = len;
1143 }
1144 EXPORT_SYMBOL_GPL(qword_add);
1145
1146 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1147 {
1148 char *bp = *bpp;
1149 int len = *lp;
1150
1151 if (len < 0) return;
1152
1153 if (len > 2) {
1154 *bp++ = '\\';
1155 *bp++ = 'x';
1156 len -= 2;
1157 while (blen && len >= 2) {
1158 bp = hex_byte_pack(bp, *buf++);
1159 len -= 2;
1160 blen--;
1161 }
1162 }
1163 if (blen || len<1) len = -1;
1164 else {
1165 *bp++ = ' ';
1166 len--;
1167 }
1168 *bpp = bp;
1169 *lp = len;
1170 }
1171 EXPORT_SYMBOL_GPL(qword_addhex);
1172
1173 static void warn_no_listener(struct cache_detail *detail)
1174 {
1175 if (detail->last_warn != detail->last_close) {
1176 detail->last_warn = detail->last_close;
1177 if (detail->warn_no_listener)
1178 detail->warn_no_listener(detail, detail->last_close != 0);
1179 }
1180 }
1181
1182 static bool cache_listeners_exist(struct cache_detail *detail)
1183 {
1184 if (atomic_read(&detail->writers))
1185 return true;
1186 if (detail->last_close == 0)
1187
1188 return false;
1189 if (detail->last_close < seconds_since_boot() - 30)
1190
1191
1192
1193
1194
1195 return false;
1196 return true;
1197 }
1198
1199
1200
1201
1202
1203
1204
1205 static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1206 {
1207 char *buf;
1208 struct cache_request *crq;
1209 int ret = 0;
1210
1211 if (test_bit(CACHE_CLEANED, &h->flags))
1212
1213 return -EAGAIN;
1214
1215 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1216 if (!buf)
1217 return -EAGAIN;
1218
1219 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1220 if (!crq) {
1221 kfree(buf);
1222 return -EAGAIN;
1223 }
1224
1225 crq->q.reader = 0;
1226 crq->buf = buf;
1227 crq->len = 0;
1228 crq->readers = 0;
1229 spin_lock(&queue_lock);
1230 if (test_bit(CACHE_PENDING, &h->flags)) {
1231 crq->item = cache_get(h);
1232 list_add_tail(&crq->q.list, &detail->queue);
1233 trace_cache_entry_upcall(detail, h);
1234 } else
1235
1236 ret = -EAGAIN;
1237 spin_unlock(&queue_lock);
1238 wake_up(&queue_wait);
1239 if (ret == -EAGAIN) {
1240 kfree(buf);
1241 kfree(crq);
1242 }
1243 return ret;
1244 }
1245
1246 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1247 {
1248 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1249 return 0;
1250 return cache_pipe_upcall(detail, h);
1251 }
1252 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1253
1254 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1255 struct cache_head *h)
1256 {
1257 if (!cache_listeners_exist(detail)) {
1258 warn_no_listener(detail);
1259 trace_cache_entry_no_listener(detail, h);
1260 return -EINVAL;
1261 }
1262 return sunrpc_cache_pipe_upcall(detail, h);
1263 }
1264 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278 int qword_get(char **bpp, char *dest, int bufsize)
1279 {
1280
1281 char *bp = *bpp;
1282 int len = 0;
1283
1284 while (*bp == ' ') bp++;
1285
1286 if (bp[0] == '\\' && bp[1] == 'x') {
1287
1288 bp += 2;
1289 while (len < bufsize - 1) {
1290 int h, l;
1291
1292 h = hex_to_bin(bp[0]);
1293 if (h < 0)
1294 break;
1295
1296 l = hex_to_bin(bp[1]);
1297 if (l < 0)
1298 break;
1299
1300 *dest++ = (h << 4) | l;
1301 bp += 2;
1302 len++;
1303 }
1304 } else {
1305
1306 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1307 if (*bp == '\\' &&
1308 isodigit(bp[1]) && (bp[1] <= '3') &&
1309 isodigit(bp[2]) &&
1310 isodigit(bp[3])) {
1311 int byte = (*++bp -'0');
1312 bp++;
1313 byte = (byte << 3) | (*bp++ - '0');
1314 byte = (byte << 3) | (*bp++ - '0');
1315 *dest++ = byte;
1316 len++;
1317 } else {
1318 *dest++ = *bp++;
1319 len++;
1320 }
1321 }
1322 }
1323
1324 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1325 return -1;
1326 while (*bp == ' ') bp++;
1327 *bpp = bp;
1328 *dest = '\0';
1329 return len;
1330 }
1331 EXPORT_SYMBOL_GPL(qword_get);
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1342 {
1343 loff_t n = *pos;
1344 unsigned int hash, entry;
1345 struct cache_head *ch;
1346 struct cache_detail *cd = m->private;
1347
1348 if (!n--)
1349 return SEQ_START_TOKEN;
1350 hash = n >> 32;
1351 entry = n & ((1LL<<32) - 1);
1352
1353 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1354 if (!entry--)
1355 return ch;
1356 n &= ~((1LL<<32) - 1);
1357 do {
1358 hash++;
1359 n += 1LL<<32;
1360 } while(hash < cd->hash_size &&
1361 hlist_empty(&cd->hash_table[hash]));
1362 if (hash >= cd->hash_size)
1363 return NULL;
1364 *pos = n+1;
1365 return hlist_entry_safe(rcu_dereference_raw(
1366 hlist_first_rcu(&cd->hash_table[hash])),
1367 struct cache_head, cache_list);
1368 }
1369
1370 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1371 {
1372 struct cache_head *ch = p;
1373 int hash = (*pos >> 32);
1374 struct cache_detail *cd = m->private;
1375
1376 if (p == SEQ_START_TOKEN)
1377 hash = 0;
1378 else if (ch->cache_list.next == NULL) {
1379 hash++;
1380 *pos += 1LL<<32;
1381 } else {
1382 ++*pos;
1383 return hlist_entry_safe(rcu_dereference_raw(
1384 hlist_next_rcu(&ch->cache_list)),
1385 struct cache_head, cache_list);
1386 }
1387 *pos &= ~((1LL<<32) - 1);
1388 while (hash < cd->hash_size &&
1389 hlist_empty(&cd->hash_table[hash])) {
1390 hash++;
1391 *pos += 1LL<<32;
1392 }
1393 if (hash >= cd->hash_size)
1394 return NULL;
1395 ++*pos;
1396 return hlist_entry_safe(rcu_dereference_raw(
1397 hlist_first_rcu(&cd->hash_table[hash])),
1398 struct cache_head, cache_list);
1399 }
1400
1401 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1402 __acquires(RCU)
1403 {
1404 rcu_read_lock();
1405 return __cache_seq_start(m, pos);
1406 }
1407 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1408
1409 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1410 {
1411 return cache_seq_next(file, p, pos);
1412 }
1413 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1414
1415 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1416 __releases(RCU)
1417 {
1418 rcu_read_unlock();
1419 }
1420 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1421
1422 static int c_show(struct seq_file *m, void *p)
1423 {
1424 struct cache_head *cp = p;
1425 struct cache_detail *cd = m->private;
1426
1427 if (p == SEQ_START_TOKEN)
1428 return cd->cache_show(m, cd, NULL);
1429
1430 ifdebug(CACHE)
1431 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1432 convert_to_wallclock(cp->expiry_time),
1433 kref_read(&cp->ref), cp->flags);
1434 cache_get(cp);
1435 if (cache_check(cd, cp, NULL))
1436
1437 seq_puts(m, "# ");
1438 else {
1439 if (cache_is_expired(cd, cp))
1440 seq_puts(m, "# ");
1441 cache_put(cp, cd);
1442 }
1443
1444 return cd->cache_show(m, cd, cp);
1445 }
1446
1447 static const struct seq_operations cache_content_op = {
1448 .start = cache_seq_start_rcu,
1449 .next = cache_seq_next_rcu,
1450 .stop = cache_seq_stop_rcu,
1451 .show = c_show,
1452 };
1453
1454 static int content_open(struct inode *inode, struct file *file,
1455 struct cache_detail *cd)
1456 {
1457 struct seq_file *seq;
1458 int err;
1459
1460 if (!cd || !try_module_get(cd->owner))
1461 return -EACCES;
1462
1463 err = seq_open(file, &cache_content_op);
1464 if (err) {
1465 module_put(cd->owner);
1466 return err;
1467 }
1468
1469 seq = file->private_data;
1470 seq->private = cd;
1471 return 0;
1472 }
1473
1474 static int content_release(struct inode *inode, struct file *file,
1475 struct cache_detail *cd)
1476 {
1477 int ret = seq_release(inode, file);
1478 module_put(cd->owner);
1479 return ret;
1480 }
1481
1482 static int open_flush(struct inode *inode, struct file *file,
1483 struct cache_detail *cd)
1484 {
1485 if (!cd || !try_module_get(cd->owner))
1486 return -EACCES;
1487 return nonseekable_open(inode, file);
1488 }
1489
1490 static int release_flush(struct inode *inode, struct file *file,
1491 struct cache_detail *cd)
1492 {
1493 module_put(cd->owner);
1494 return 0;
1495 }
1496
1497 static ssize_t read_flush(struct file *file, char __user *buf,
1498 size_t count, loff_t *ppos,
1499 struct cache_detail *cd)
1500 {
1501 char tbuf[22];
1502 size_t len;
1503
1504 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1505 convert_to_wallclock(cd->flush_time));
1506 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1507 }
1508
1509 static ssize_t write_flush(struct file *file, const char __user *buf,
1510 size_t count, loff_t *ppos,
1511 struct cache_detail *cd)
1512 {
1513 char tbuf[20];
1514 char *ep;
1515 time64_t now;
1516
1517 if (*ppos || count > sizeof(tbuf)-1)
1518 return -EINVAL;
1519 if (copy_from_user(tbuf, buf, count))
1520 return -EFAULT;
1521 tbuf[count] = 0;
1522 simple_strtoul(tbuf, &ep, 0);
1523 if (*ep && *ep != '\n')
1524 return -EINVAL;
1525
1526
1527
1528
1529
1530 now = seconds_since_boot();
1531
1532
1533
1534
1535
1536
1537
1538 if (cd->flush_time >= now)
1539 now = cd->flush_time + 1;
1540
1541 cd->flush_time = now;
1542 cd->nextcheck = now;
1543 cache_flush();
1544
1545 if (cd->flush)
1546 cd->flush();
1547
1548 *ppos += count;
1549 return count;
1550 }
1551
1552 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1553 size_t count, loff_t *ppos)
1554 {
1555 struct cache_detail *cd = pde_data(file_inode(filp));
1556
1557 return cache_read(filp, buf, count, ppos, cd);
1558 }
1559
1560 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1561 size_t count, loff_t *ppos)
1562 {
1563 struct cache_detail *cd = pde_data(file_inode(filp));
1564
1565 return cache_write(filp, buf, count, ppos, cd);
1566 }
1567
1568 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1569 {
1570 struct cache_detail *cd = pde_data(file_inode(filp));
1571
1572 return cache_poll(filp, wait, cd);
1573 }
1574
1575 static long cache_ioctl_procfs(struct file *filp,
1576 unsigned int cmd, unsigned long arg)
1577 {
1578 struct inode *inode = file_inode(filp);
1579 struct cache_detail *cd = pde_data(inode);
1580
1581 return cache_ioctl(inode, filp, cmd, arg, cd);
1582 }
1583
1584 static int cache_open_procfs(struct inode *inode, struct file *filp)
1585 {
1586 struct cache_detail *cd = pde_data(inode);
1587
1588 return cache_open(inode, filp, cd);
1589 }
1590
1591 static int cache_release_procfs(struct inode *inode, struct file *filp)
1592 {
1593 struct cache_detail *cd = pde_data(inode);
1594
1595 return cache_release(inode, filp, cd);
1596 }
1597
1598 static const struct proc_ops cache_channel_proc_ops = {
1599 .proc_lseek = no_llseek,
1600 .proc_read = cache_read_procfs,
1601 .proc_write = cache_write_procfs,
1602 .proc_poll = cache_poll_procfs,
1603 .proc_ioctl = cache_ioctl_procfs,
1604 .proc_open = cache_open_procfs,
1605 .proc_release = cache_release_procfs,
1606 };
1607
1608 static int content_open_procfs(struct inode *inode, struct file *filp)
1609 {
1610 struct cache_detail *cd = pde_data(inode);
1611
1612 return content_open(inode, filp, cd);
1613 }
1614
1615 static int content_release_procfs(struct inode *inode, struct file *filp)
1616 {
1617 struct cache_detail *cd = pde_data(inode);
1618
1619 return content_release(inode, filp, cd);
1620 }
1621
1622 static const struct proc_ops content_proc_ops = {
1623 .proc_open = content_open_procfs,
1624 .proc_read = seq_read,
1625 .proc_lseek = seq_lseek,
1626 .proc_release = content_release_procfs,
1627 };
1628
1629 static int open_flush_procfs(struct inode *inode, struct file *filp)
1630 {
1631 struct cache_detail *cd = pde_data(inode);
1632
1633 return open_flush(inode, filp, cd);
1634 }
1635
1636 static int release_flush_procfs(struct inode *inode, struct file *filp)
1637 {
1638 struct cache_detail *cd = pde_data(inode);
1639
1640 return release_flush(inode, filp, cd);
1641 }
1642
1643 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1644 size_t count, loff_t *ppos)
1645 {
1646 struct cache_detail *cd = pde_data(file_inode(filp));
1647
1648 return read_flush(filp, buf, count, ppos, cd);
1649 }
1650
1651 static ssize_t write_flush_procfs(struct file *filp,
1652 const char __user *buf,
1653 size_t count, loff_t *ppos)
1654 {
1655 struct cache_detail *cd = pde_data(file_inode(filp));
1656
1657 return write_flush(filp, buf, count, ppos, cd);
1658 }
1659
1660 static const struct proc_ops cache_flush_proc_ops = {
1661 .proc_open = open_flush_procfs,
1662 .proc_read = read_flush_procfs,
1663 .proc_write = write_flush_procfs,
1664 .proc_release = release_flush_procfs,
1665 .proc_lseek = no_llseek,
1666 };
1667
1668 static void remove_cache_proc_entries(struct cache_detail *cd)
1669 {
1670 if (cd->procfs) {
1671 proc_remove(cd->procfs);
1672 cd->procfs = NULL;
1673 }
1674 }
1675
1676 #ifdef CONFIG_PROC_FS
1677 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1678 {
1679 struct proc_dir_entry *p;
1680 struct sunrpc_net *sn;
1681
1682 sn = net_generic(net, sunrpc_net_id);
1683 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1684 if (cd->procfs == NULL)
1685 goto out_nomem;
1686
1687 p = proc_create_data("flush", S_IFREG | 0600,
1688 cd->procfs, &cache_flush_proc_ops, cd);
1689 if (p == NULL)
1690 goto out_nomem;
1691
1692 if (cd->cache_request || cd->cache_parse) {
1693 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1694 &cache_channel_proc_ops, cd);
1695 if (p == NULL)
1696 goto out_nomem;
1697 }
1698 if (cd->cache_show) {
1699 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1700 &content_proc_ops, cd);
1701 if (p == NULL)
1702 goto out_nomem;
1703 }
1704 return 0;
1705 out_nomem:
1706 remove_cache_proc_entries(cd);
1707 return -ENOMEM;
1708 }
1709 #else
1710 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1711 {
1712 return 0;
1713 }
1714 #endif
1715
1716 void __init cache_initialize(void)
1717 {
1718 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1719 }
1720
1721 int cache_register_net(struct cache_detail *cd, struct net *net)
1722 {
1723 int ret;
1724
1725 sunrpc_init_cache_detail(cd);
1726 ret = create_cache_proc_entries(cd, net);
1727 if (ret)
1728 sunrpc_destroy_cache_detail(cd);
1729 return ret;
1730 }
1731 EXPORT_SYMBOL_GPL(cache_register_net);
1732
1733 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1734 {
1735 remove_cache_proc_entries(cd);
1736 sunrpc_destroy_cache_detail(cd);
1737 }
1738 EXPORT_SYMBOL_GPL(cache_unregister_net);
1739
1740 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1741 {
1742 struct cache_detail *cd;
1743 int i;
1744
1745 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1746 if (cd == NULL)
1747 return ERR_PTR(-ENOMEM);
1748
1749 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1750 GFP_KERNEL);
1751 if (cd->hash_table == NULL) {
1752 kfree(cd);
1753 return ERR_PTR(-ENOMEM);
1754 }
1755
1756 for (i = 0; i < cd->hash_size; i++)
1757 INIT_HLIST_HEAD(&cd->hash_table[i]);
1758 cd->net = net;
1759 return cd;
1760 }
1761 EXPORT_SYMBOL_GPL(cache_create_net);
1762
1763 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1764 {
1765 kfree(cd->hash_table);
1766 kfree(cd);
1767 }
1768 EXPORT_SYMBOL_GPL(cache_destroy_net);
1769
1770 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1771 size_t count, loff_t *ppos)
1772 {
1773 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1774
1775 return cache_read(filp, buf, count, ppos, cd);
1776 }
1777
1778 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1779 size_t count, loff_t *ppos)
1780 {
1781 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1782
1783 return cache_write(filp, buf, count, ppos, cd);
1784 }
1785
1786 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1787 {
1788 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1789
1790 return cache_poll(filp, wait, cd);
1791 }
1792
1793 static long cache_ioctl_pipefs(struct file *filp,
1794 unsigned int cmd, unsigned long arg)
1795 {
1796 struct inode *inode = file_inode(filp);
1797 struct cache_detail *cd = RPC_I(inode)->private;
1798
1799 return cache_ioctl(inode, filp, cmd, arg, cd);
1800 }
1801
1802 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1803 {
1804 struct cache_detail *cd = RPC_I(inode)->private;
1805
1806 return cache_open(inode, filp, cd);
1807 }
1808
1809 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1810 {
1811 struct cache_detail *cd = RPC_I(inode)->private;
1812
1813 return cache_release(inode, filp, cd);
1814 }
1815
1816 const struct file_operations cache_file_operations_pipefs = {
1817 .owner = THIS_MODULE,
1818 .llseek = no_llseek,
1819 .read = cache_read_pipefs,
1820 .write = cache_write_pipefs,
1821 .poll = cache_poll_pipefs,
1822 .unlocked_ioctl = cache_ioctl_pipefs,
1823 .open = cache_open_pipefs,
1824 .release = cache_release_pipefs,
1825 };
1826
1827 static int content_open_pipefs(struct inode *inode, struct file *filp)
1828 {
1829 struct cache_detail *cd = RPC_I(inode)->private;
1830
1831 return content_open(inode, filp, cd);
1832 }
1833
1834 static int content_release_pipefs(struct inode *inode, struct file *filp)
1835 {
1836 struct cache_detail *cd = RPC_I(inode)->private;
1837
1838 return content_release(inode, filp, cd);
1839 }
1840
1841 const struct file_operations content_file_operations_pipefs = {
1842 .open = content_open_pipefs,
1843 .read = seq_read,
1844 .llseek = seq_lseek,
1845 .release = content_release_pipefs,
1846 };
1847
1848 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1849 {
1850 struct cache_detail *cd = RPC_I(inode)->private;
1851
1852 return open_flush(inode, filp, cd);
1853 }
1854
1855 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1856 {
1857 struct cache_detail *cd = RPC_I(inode)->private;
1858
1859 return release_flush(inode, filp, cd);
1860 }
1861
1862 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1863 size_t count, loff_t *ppos)
1864 {
1865 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1866
1867 return read_flush(filp, buf, count, ppos, cd);
1868 }
1869
1870 static ssize_t write_flush_pipefs(struct file *filp,
1871 const char __user *buf,
1872 size_t count, loff_t *ppos)
1873 {
1874 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1875
1876 return write_flush(filp, buf, count, ppos, cd);
1877 }
1878
1879 const struct file_operations cache_flush_operations_pipefs = {
1880 .open = open_flush_pipefs,
1881 .read = read_flush_pipefs,
1882 .write = write_flush_pipefs,
1883 .release = release_flush_pipefs,
1884 .llseek = no_llseek,
1885 };
1886
1887 int sunrpc_cache_register_pipefs(struct dentry *parent,
1888 const char *name, umode_t umode,
1889 struct cache_detail *cd)
1890 {
1891 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1892 if (IS_ERR(dir))
1893 return PTR_ERR(dir);
1894 cd->pipefs = dir;
1895 return 0;
1896 }
1897 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1898
1899 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1900 {
1901 if (cd->pipefs) {
1902 rpc_remove_cache_dir(cd->pipefs);
1903 cd->pipefs = NULL;
1904 }
1905 }
1906 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1907
1908 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1909 {
1910 spin_lock(&cd->hash_lock);
1911 if (!hlist_unhashed(&h->cache_list)){
1912 sunrpc_begin_cache_remove_entry(h, cd);
1913 spin_unlock(&cd->hash_lock);
1914 sunrpc_end_cache_remove_entry(h, cd);
1915 } else
1916 spin_unlock(&cd->hash_lock);
1917 }
1918 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);