Back to home page

LXR

 
 

    


0001 /*
0002    lru_cache.c
0003 
0004    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
0005 
0006    Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
0007    Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
0008    Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
0009 
0010    drbd is free software; you can redistribute it and/or modify
0011    it under the terms of the GNU General Public License as published by
0012    the Free Software Foundation; either version 2, or (at your option)
0013    any later version.
0014 
0015    drbd is distributed in the hope that it will be useful,
0016    but WITHOUT ANY WARRANTY; without even the implied warranty of
0017    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0018    GNU General Public License for more details.
0019 
0020    You should have received a copy of the GNU General Public License
0021    along with drbd; see the file COPYING.  If not, write to
0022    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
0023 
0024  */
0025 
0026 #include <linux/module.h>
0027 #include <linux/bitops.h>
0028 #include <linux/slab.h>
0029 #include <linux/string.h> /* for memset */
0030 #include <linux/seq_file.h> /* for seq_printf */
0031 #include <linux/lru_cache.h>
0032 
0033 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
0034           "Lars Ellenberg <lars@linbit.com>");
0035 MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
0036 MODULE_LICENSE("GPL");
0037 
0038 /* this is developers aid only.
0039  * it catches concurrent access (lack of locking on the users part) */
0040 #define PARANOIA_ENTRY() do {       \
0041     BUG_ON(!lc);            \
0042     BUG_ON(!lc->nr_elements);   \
0043     BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
0044 } while (0)
0045 
0046 #define RETURN(x...)     do { \
0047     clear_bit_unlock(__LC_PARANOIA, &lc->flags); \
0048     return x ; } while (0)
0049 
0050 /* BUG() if e is not one of the elements tracked by lc */
0051 #define PARANOIA_LC_ELEMENT(lc, e) do { \
0052     struct lru_cache *lc_ = (lc);   \
0053     struct lc_element *e_ = (e);    \
0054     unsigned i = e_->lc_index;  \
0055     BUG_ON(i >= lc_->nr_elements);  \
0056     BUG_ON(lc_->lc_element[i] != e_); } while (0)
0057 
0058 
0059 /* We need to atomically
0060  *  - try to grab the lock (set LC_LOCKED)
0061  *  - only if there is no pending transaction
0062  *    (neither LC_DIRTY nor LC_STARVING is set)
0063  * Because of PARANOIA_ENTRY() above abusing lc->flags as well,
0064  * it is not sufficient to just say
0065  *  return 0 == cmpxchg(&lc->flags, 0, LC_LOCKED);
0066  */
0067 int lc_try_lock(struct lru_cache *lc)
0068 {
0069     unsigned long val;
0070     do {
0071         val = cmpxchg(&lc->flags, 0, LC_LOCKED);
0072     } while (unlikely (val == LC_PARANOIA));
0073     /* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */
0074     return 0 == val;
0075 #if 0
0076     /* Alternative approach, spin in case someone enters or leaves a
0077      * PARANOIA_ENTRY()/RETURN() section. */
0078     unsigned long old, new, val;
0079     do {
0080         old = lc->flags & LC_PARANOIA;
0081         new = old | LC_LOCKED;
0082         val = cmpxchg(&lc->flags, old, new);
0083     } while (unlikely (val == (old ^ LC_PARANOIA)));
0084     return old == val;
0085 #endif
0086 }
0087 
0088 /**
0089  * lc_create - prepares to track objects in an active set
0090  * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
0091  * @max_pending_changes: maximum changes to accumulate until a transaction is required
0092  * @e_count: number of elements allowed to be active simultaneously
0093  * @e_size: size of the tracked objects
0094  * @e_off: offset to the &struct lc_element member in a tracked object
0095  *
0096  * Returns a pointer to a newly initialized struct lru_cache on success,
0097  * or NULL on (allocation) failure.
0098  */
0099 struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
0100         unsigned max_pending_changes,
0101         unsigned e_count, size_t e_size, size_t e_off)
0102 {
0103     struct hlist_head *slot = NULL;
0104     struct lc_element **element = NULL;
0105     struct lru_cache *lc;
0106     struct lc_element *e;
0107     unsigned cache_obj_size = kmem_cache_size(cache);
0108     unsigned i;
0109 
0110     WARN_ON(cache_obj_size < e_size);
0111     if (cache_obj_size < e_size)
0112         return NULL;
0113 
0114     /* e_count too big; would probably fail the allocation below anyways.
0115      * for typical use cases, e_count should be few thousand at most. */
0116     if (e_count > LC_MAX_ACTIVE)
0117         return NULL;
0118 
0119     slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
0120     if (!slot)
0121         goto out_fail;
0122     element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
0123     if (!element)
0124         goto out_fail;
0125 
0126     lc = kzalloc(sizeof(*lc), GFP_KERNEL);
0127     if (!lc)
0128         goto out_fail;
0129 
0130     INIT_LIST_HEAD(&lc->in_use);
0131     INIT_LIST_HEAD(&lc->lru);
0132     INIT_LIST_HEAD(&lc->free);
0133     INIT_LIST_HEAD(&lc->to_be_changed);
0134 
0135     lc->name = name;
0136     lc->element_size = e_size;
0137     lc->element_off = e_off;
0138     lc->nr_elements = e_count;
0139     lc->max_pending_changes = max_pending_changes;
0140     lc->lc_cache = cache;
0141     lc->lc_element = element;
0142     lc->lc_slot = slot;
0143 
0144     /* preallocate all objects */
0145     for (i = 0; i < e_count; i++) {
0146         void *p = kmem_cache_alloc(cache, GFP_KERNEL);
0147         if (!p)
0148             break;
0149         memset(p, 0, lc->element_size);
0150         e = p + e_off;
0151         e->lc_index = i;
0152         e->lc_number = LC_FREE;
0153         e->lc_new_number = LC_FREE;
0154         list_add(&e->list, &lc->free);
0155         element[i] = e;
0156     }
0157     if (i == e_count)
0158         return lc;
0159 
0160     /* else: could not allocate all elements, give up */
0161     for (i--; i; i--) {
0162         void *p = element[i];
0163         kmem_cache_free(cache, p - e_off);
0164     }
0165     kfree(lc);
0166 out_fail:
0167     kfree(element);
0168     kfree(slot);
0169     return NULL;
0170 }
0171 
0172 static void lc_free_by_index(struct lru_cache *lc, unsigned i)
0173 {
0174     void *p = lc->lc_element[i];
0175     WARN_ON(!p);
0176     if (p) {
0177         p -= lc->element_off;
0178         kmem_cache_free(lc->lc_cache, p);
0179     }
0180 }
0181 
0182 /**
0183  * lc_destroy - frees memory allocated by lc_create()
0184  * @lc: the lru cache to destroy
0185  */
0186 void lc_destroy(struct lru_cache *lc)
0187 {
0188     unsigned i;
0189     if (!lc)
0190         return;
0191     for (i = 0; i < lc->nr_elements; i++)
0192         lc_free_by_index(lc, i);
0193     kfree(lc->lc_element);
0194     kfree(lc->lc_slot);
0195     kfree(lc);
0196 }
0197 
0198 /**
0199  * lc_reset - does a full reset for @lc and the hash table slots.
0200  * @lc: the lru cache to operate on
0201  *
0202  * It is roughly the equivalent of re-allocating a fresh lru_cache object,
0203  * basically a short cut to lc_destroy(lc); lc = lc_create(...);
0204  */
0205 void lc_reset(struct lru_cache *lc)
0206 {
0207     unsigned i;
0208 
0209     INIT_LIST_HEAD(&lc->in_use);
0210     INIT_LIST_HEAD(&lc->lru);
0211     INIT_LIST_HEAD(&lc->free);
0212     INIT_LIST_HEAD(&lc->to_be_changed);
0213     lc->used = 0;
0214     lc->hits = 0;
0215     lc->misses = 0;
0216     lc->starving = 0;
0217     lc->locked = 0;
0218     lc->changed = 0;
0219     lc->pending_changes = 0;
0220     lc->flags = 0;
0221     memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
0222 
0223     for (i = 0; i < lc->nr_elements; i++) {
0224         struct lc_element *e = lc->lc_element[i];
0225         void *p = e;
0226         p -= lc->element_off;
0227         memset(p, 0, lc->element_size);
0228         /* re-init it */
0229         e->lc_index = i;
0230         e->lc_number = LC_FREE;
0231         e->lc_new_number = LC_FREE;
0232         list_add(&e->list, &lc->free);
0233     }
0234 }
0235 
0236 /**
0237  * lc_seq_printf_stats - print stats about @lc into @seq
0238  * @seq: the seq_file to print into
0239  * @lc: the lru cache to print statistics of
0240  */
0241 void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
0242 {
0243     /* NOTE:
0244      * total calls to lc_get are
0245      * (starving + hits + misses)
0246      * misses include "locked" count (update from an other thread in
0247      * progress) and "changed", when this in fact lead to an successful
0248      * update of the cache.
0249      */
0250     seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
0251            lc->name, lc->used, lc->nr_elements,
0252            lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
0253 }
0254 
0255 static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
0256 {
0257     return  lc->lc_slot + (enr % lc->nr_elements);
0258 }
0259 
0260 
0261 static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
0262         bool include_changing)
0263 {
0264     struct lc_element *e;
0265 
0266     BUG_ON(!lc);
0267     BUG_ON(!lc->nr_elements);
0268     hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
0269         /* "about to be changed" elements, pending transaction commit,
0270          * are hashed by their "new number". "Normal" elements have
0271          * lc_number == lc_new_number. */
0272         if (e->lc_new_number != enr)
0273             continue;
0274         if (e->lc_new_number == e->lc_number || include_changing)
0275             return e;
0276         break;
0277     }
0278     return NULL;
0279 }
0280 
0281 /**
0282  * lc_find - find element by label, if present in the hash table
0283  * @lc: The lru_cache object
0284  * @enr: element number
0285  *
0286  * Returns the pointer to an element, if the element with the requested
0287  * "label" or element number is present in the hash table,
0288  * or NULL if not found. Does not change the refcnt.
0289  * Ignores elements that are "about to be used", i.e. not yet in the active
0290  * set, but still pending transaction commit.
0291  */
0292 struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
0293 {
0294     return __lc_find(lc, enr, 0);
0295 }
0296 
0297 /**
0298  * lc_is_used - find element by label
0299  * @lc: The lru_cache object
0300  * @enr: element number
0301  *
0302  * Returns true, if the element with the requested "label" or element number is
0303  * present in the hash table, and is used (refcnt > 0).
0304  * Also finds elements that are not _currently_ used but only "about to be
0305  * used", i.e. on the "to_be_changed" list, pending transaction commit.
0306  */
0307 bool lc_is_used(struct lru_cache *lc, unsigned int enr)
0308 {
0309     struct lc_element *e = __lc_find(lc, enr, 1);
0310     return e && e->refcnt;
0311 }
0312 
0313 /**
0314  * lc_del - removes an element from the cache
0315  * @lc: The lru_cache object
0316  * @e: The element to remove
0317  *
0318  * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
0319  * sets @e->enr to %LC_FREE.
0320  */
0321 void lc_del(struct lru_cache *lc, struct lc_element *e)
0322 {
0323     PARANOIA_ENTRY();
0324     PARANOIA_LC_ELEMENT(lc, e);
0325     BUG_ON(e->refcnt);
0326 
0327     e->lc_number = e->lc_new_number = LC_FREE;
0328     hlist_del_init(&e->colision);
0329     list_move(&e->list, &lc->free);
0330     RETURN();
0331 }
0332 
0333 static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned new_number)
0334 {
0335     struct list_head *n;
0336     struct lc_element *e;
0337 
0338     if (!list_empty(&lc->free))
0339         n = lc->free.next;
0340     else if (!list_empty(&lc->lru))
0341         n = lc->lru.prev;
0342     else
0343         return NULL;
0344 
0345     e = list_entry(n, struct lc_element, list);
0346     PARANOIA_LC_ELEMENT(lc, e);
0347 
0348     e->lc_new_number = new_number;
0349     if (!hlist_unhashed(&e->colision))
0350         __hlist_del(&e->colision);
0351     hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
0352     list_move(&e->list, &lc->to_be_changed);
0353 
0354     return e;
0355 }
0356 
0357 static int lc_unused_element_available(struct lru_cache *lc)
0358 {
0359     if (!list_empty(&lc->free))
0360         return 1; /* something on the free list */
0361     if (!list_empty(&lc->lru))
0362         return 1;  /* something to evict */
0363 
0364     return 0;
0365 }
0366 
0367 /* used as internal flags to __lc_get */
0368 enum {
0369     LC_GET_MAY_CHANGE = 1,
0370     LC_GET_MAY_USE_UNCOMMITTED = 2,
0371 };
0372 
0373 static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
0374 {
0375     struct lc_element *e;
0376 
0377     PARANOIA_ENTRY();
0378     if (lc->flags & LC_STARVING) {
0379         ++lc->starving;
0380         RETURN(NULL);
0381     }
0382 
0383     e = __lc_find(lc, enr, 1);
0384     /* if lc_new_number != lc_number,
0385      * this enr is currently being pulled in already,
0386      * and will be available once the pending transaction
0387      * has been committed. */
0388     if (e) {
0389         if (e->lc_new_number != e->lc_number) {
0390             /* It has been found above, but on the "to_be_changed"
0391              * list, not yet committed.  Don't pull it in twice,
0392              * wait for the transaction, then try again...
0393              */
0394             if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
0395                 RETURN(NULL);
0396             /* ... unless the caller is aware of the implications,
0397              * probably preparing a cumulative transaction. */
0398             ++e->refcnt;
0399             ++lc->hits;
0400             RETURN(e);
0401         }
0402         /* else: lc_new_number == lc_number; a real hit. */
0403         ++lc->hits;
0404         if (e->refcnt++ == 0)
0405             lc->used++;
0406         list_move(&e->list, &lc->in_use); /* Not evictable... */
0407         RETURN(e);
0408     }
0409     /* e == NULL */
0410 
0411     ++lc->misses;
0412     if (!(flags & LC_GET_MAY_CHANGE))
0413         RETURN(NULL);
0414 
0415     /* To avoid races with lc_try_lock(), first, mark us dirty
0416      * (using test_and_set_bit, as it implies memory barriers), ... */
0417     test_and_set_bit(__LC_DIRTY, &lc->flags);
0418 
0419     /* ... only then check if it is locked anyways. If lc_unlock clears
0420      * the dirty bit again, that's not a problem, we will come here again.
0421      */
0422     if (test_bit(__LC_LOCKED, &lc->flags)) {
0423         ++lc->locked;
0424         RETURN(NULL);
0425     }
0426 
0427     /* In case there is nothing available and we can not kick out
0428      * the LRU element, we have to wait ...
0429      */
0430     if (!lc_unused_element_available(lc)) {
0431         __set_bit(__LC_STARVING, &lc->flags);
0432         RETURN(NULL);
0433     }
0434 
0435     /* It was not present in the active set.  We are going to recycle an
0436      * unused (or even "free") element, but we won't accumulate more than
0437      * max_pending_changes changes.  */
0438     if (lc->pending_changes >= lc->max_pending_changes)
0439         RETURN(NULL);
0440 
0441     e = lc_prepare_for_change(lc, enr);
0442     BUG_ON(!e);
0443 
0444     clear_bit(__LC_STARVING, &lc->flags);
0445     BUG_ON(++e->refcnt != 1);
0446     lc->used++;
0447     lc->pending_changes++;
0448 
0449     RETURN(e);
0450 }
0451 
0452 /**
0453  * lc_get - get element by label, maybe change the active set
0454  * @lc: the lru cache to operate on
0455  * @enr: the label to look up
0456  *
0457  * Finds an element in the cache, increases its usage count,
0458  * "touches" and returns it.
0459  *
0460  * In case the requested number is not present, it needs to be added to the
0461  * cache. Therefore it is possible that an other element becomes evicted from
0462  * the cache. In either case, the user is notified so he is able to e.g. keep
0463  * a persistent log of the cache changes, and therefore the objects in use.
0464  *
0465  * Return values:
0466  *  NULL
0467  *     The cache was marked %LC_STARVING,
0468  *     or the requested label was not in the active set
0469  *     and a changing transaction is still pending (@lc was marked %LC_DIRTY).
0470  *     Or no unused or free element could be recycled (@lc will be marked as
0471  *     %LC_STARVING, blocking further lc_get() operations).
0472  *
0473  *  pointer to the element with the REQUESTED element number.
0474  *     In this case, it can be used right away
0475  *
0476  *  pointer to an UNUSED element with some different element number,
0477  *          where that different number may also be %LC_FREE.
0478  *
0479  *          In this case, the cache is marked %LC_DIRTY,
0480  *          so lc_try_lock() will no longer succeed.
0481  *          The returned element pointer is moved to the "to_be_changed" list,
0482  *          and registered with the new element number on the hash collision chains,
0483  *          so it is possible to pick it up from lc_is_used().
0484  *          Up to "max_pending_changes" (see lc_create()) can be accumulated.
0485  *          The user now should do whatever housekeeping is necessary,
0486  *          typically serialize on lc_try_lock_for_transaction(), then call
0487  *          lc_committed(lc) and lc_unlock(), to finish the change.
0488  *
0489  * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
0490  *       any cache set change.
0491  */
0492 struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
0493 {
0494     return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
0495 }
0496 
0497 /**
0498  * lc_get_cumulative - like lc_get; also finds to-be-changed elements
0499  * @lc: the lru cache to operate on
0500  * @enr: the label to look up
0501  *
0502  * Unlike lc_get this also returns the element for @enr, if it is belonging to
0503  * a pending transaction, so the return values are like for lc_get(),
0504  * plus:
0505  *
0506  * pointer to an element already on the "to_be_changed" list.
0507  *  In this case, the cache was already marked %LC_DIRTY.
0508  *
0509  * Caller needs to make sure that the pending transaction is completed,
0510  * before proceeding to actually use this element.
0511  */
0512 struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
0513 {
0514     return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
0515 }
0516 
0517 /**
0518  * lc_try_get - get element by label, if present; do not change the active set
0519  * @lc: the lru cache to operate on
0520  * @enr: the label to look up
0521  *
0522  * Finds an element in the cache, increases its usage count,
0523  * "touches" and returns it.
0524  *
0525  * Return values:
0526  *  NULL
0527  *     The cache was marked %LC_STARVING,
0528  *     or the requested label was not in the active set
0529  *
0530  *  pointer to the element with the REQUESTED element number.
0531  *     In this case, it can be used right away
0532  */
0533 struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
0534 {
0535     return __lc_get(lc, enr, 0);
0536 }
0537 
0538 /**
0539  * lc_committed - tell @lc that pending changes have been recorded
0540  * @lc: the lru cache to operate on
0541  *
0542  * User is expected to serialize on explicit lc_try_lock_for_transaction()
0543  * before the transaction is started, and later needs to lc_unlock() explicitly
0544  * as well.
0545  */
0546 void lc_committed(struct lru_cache *lc)
0547 {
0548     struct lc_element *e, *tmp;
0549 
0550     PARANOIA_ENTRY();
0551     list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) {
0552         /* count number of changes, not number of transactions */
0553         ++lc->changed;
0554         e->lc_number = e->lc_new_number;
0555         list_move(&e->list, &lc->in_use);
0556     }
0557     lc->pending_changes = 0;
0558     RETURN();
0559 }
0560 
0561 
0562 /**
0563  * lc_put - give up refcnt of @e
0564  * @lc: the lru cache to operate on
0565  * @e: the element to put
0566  *
0567  * If refcnt reaches zero, the element is moved to the lru list,
0568  * and a %LC_STARVING (if set) is cleared.
0569  * Returns the new (post-decrement) refcnt.
0570  */
0571 unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
0572 {
0573     PARANOIA_ENTRY();
0574     PARANOIA_LC_ELEMENT(lc, e);
0575     BUG_ON(e->refcnt == 0);
0576     BUG_ON(e->lc_number != e->lc_new_number);
0577     if (--e->refcnt == 0) {
0578         /* move it to the front of LRU. */
0579         list_move(&e->list, &lc->lru);
0580         lc->used--;
0581         clear_bit_unlock(__LC_STARVING, &lc->flags);
0582     }
0583     RETURN(e->refcnt);
0584 }
0585 
0586 /**
0587  * lc_element_by_index
0588  * @lc: the lru cache to operate on
0589  * @i: the index of the element to return
0590  */
0591 struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
0592 {
0593     BUG_ON(i >= lc->nr_elements);
0594     BUG_ON(lc->lc_element[i] == NULL);
0595     BUG_ON(lc->lc_element[i]->lc_index != i);
0596     return lc->lc_element[i];
0597 }
0598 
0599 /**
0600  * lc_index_of
0601  * @lc: the lru cache to operate on
0602  * @e: the element to query for its index position in lc->element
0603  */
0604 unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
0605 {
0606     PARANOIA_LC_ELEMENT(lc, e);
0607     return e->lc_index;
0608 }
0609 
0610 /**
0611  * lc_set - associate index with label
0612  * @lc: the lru cache to operate on
0613  * @enr: the label to set
0614  * @index: the element index to associate label with.
0615  *
0616  * Used to initialize the active set to some previously recorded state.
0617  */
0618 void lc_set(struct lru_cache *lc, unsigned int enr, int index)
0619 {
0620     struct lc_element *e;
0621     struct list_head *lh;
0622 
0623     if (index < 0 || index >= lc->nr_elements)
0624         return;
0625 
0626     e = lc_element_by_index(lc, index);
0627     BUG_ON(e->lc_number != e->lc_new_number);
0628     BUG_ON(e->refcnt != 0);
0629 
0630     e->lc_number = e->lc_new_number = enr;
0631     hlist_del_init(&e->colision);
0632     if (enr == LC_FREE)
0633         lh = &lc->free;
0634     else {
0635         hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
0636         lh = &lc->lru;
0637     }
0638     list_move(&e->list, lh);
0639 }
0640 
0641 /**
0642  * lc_dump - Dump a complete LRU cache to seq in textual form.
0643  * @lc: the lru cache to operate on
0644  * @seq: the &struct seq_file pointer to seq_printf into
0645  * @utext: user supplied additional "heading" or other info
0646  * @detail: function pointer the user may provide to dump further details
0647  * of the object the lc_element is embedded in. May be NULL.
0648  * Note: a leading space ' ' and trailing newline '\n' is implied.
0649  */
0650 void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
0651          void (*detail) (struct seq_file *, struct lc_element *))
0652 {
0653     unsigned int nr_elements = lc->nr_elements;
0654     struct lc_element *e;
0655     int i;
0656 
0657     seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext);
0658     for (i = 0; i < nr_elements; i++) {
0659         e = lc_element_by_index(lc, i);
0660         if (e->lc_number != e->lc_new_number)
0661             seq_printf(seq, "\t%5d: %6d %8d %6d ",
0662                 i, e->lc_number, e->lc_new_number, e->refcnt);
0663         else
0664             seq_printf(seq, "\t%5d: %6d %-8s %6d ",
0665                 i, e->lc_number, "-\"-", e->refcnt);
0666         if (detail)
0667             detail(seq, e);
0668         seq_putc(seq, '\n');
0669     }
0670 }
0671 
0672 EXPORT_SYMBOL(lc_create);
0673 EXPORT_SYMBOL(lc_reset);
0674 EXPORT_SYMBOL(lc_destroy);
0675 EXPORT_SYMBOL(lc_set);
0676 EXPORT_SYMBOL(lc_del);
0677 EXPORT_SYMBOL(lc_try_get);
0678 EXPORT_SYMBOL(lc_find);
0679 EXPORT_SYMBOL(lc_get);
0680 EXPORT_SYMBOL(lc_put);
0681 EXPORT_SYMBOL(lc_committed);
0682 EXPORT_SYMBOL(lc_element_by_index);
0683 EXPORT_SYMBOL(lc_index_of);
0684 EXPORT_SYMBOL(lc_seq_printf_stats);
0685 EXPORT_SYMBOL(lc_seq_dump_details);
0686 EXPORT_SYMBOL(lc_try_lock);
0687 EXPORT_SYMBOL(lc_is_used);
0688 EXPORT_SYMBOL(lc_get_cumulative);