Back to home page

LXR

 
 

    


0001 /*
0002  * zswap.c - zswap driver file
0003  *
0004  * zswap is a backend for frontswap that takes pages that are in the process
0005  * of being swapped out and attempts to compress and store them in a
0006  * RAM-based memory pool.  This can result in a significant I/O reduction on
0007  * the swap device and, in the case where decompressing from RAM is faster
0008  * than reading from the swap device, can also improve workload performance.
0009  *
0010  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
0011  *
0012  * This program is free software; you can redistribute it and/or
0013  * modify it under the terms of the GNU General Public License
0014  * as published by the Free Software Foundation; either version 2
0015  * of the License, or (at your option) any later version.
0016  *
0017  * This program is distributed in the hope that it will be useful,
0018  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0019  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0020  * GNU General Public License for more details.
0021 */
0022 
0023 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0024 
0025 #include <linux/module.h>
0026 #include <linux/cpu.h>
0027 #include <linux/highmem.h>
0028 #include <linux/slab.h>
0029 #include <linux/spinlock.h>
0030 #include <linux/types.h>
0031 #include <linux/atomic.h>
0032 #include <linux/frontswap.h>
0033 #include <linux/rbtree.h>
0034 #include <linux/swap.h>
0035 #include <linux/crypto.h>
0036 #include <linux/mempool.h>
0037 #include <linux/zpool.h>
0038 
0039 #include <linux/mm_types.h>
0040 #include <linux/page-flags.h>
0041 #include <linux/swapops.h>
0042 #include <linux/writeback.h>
0043 #include <linux/pagemap.h>
0044 
0045 /*********************************
0046 * statistics
0047 **********************************/
0048 /* Total bytes used by the compressed storage */
0049 static u64 zswap_pool_total_size;
0050 /* The number of compressed pages currently stored in zswap */
0051 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
0052 
0053 /*
0054  * The statistics below are not protected from concurrent access for
0055  * performance reasons so they may not be a 100% accurate.  However,
0056  * they do provide useful information on roughly how many times a
0057  * certain event is occurring.
0058 */
0059 
0060 /* Pool limit was hit (see zswap_max_pool_percent) */
0061 static u64 zswap_pool_limit_hit;
0062 /* Pages written back when pool limit was reached */
0063 static u64 zswap_written_back_pages;
0064 /* Store failed due to a reclaim failure after pool limit was reached */
0065 static u64 zswap_reject_reclaim_fail;
0066 /* Compressed page was too big for the allocator to (optimally) store */
0067 static u64 zswap_reject_compress_poor;
0068 /* Store failed because underlying allocator could not get memory */
0069 static u64 zswap_reject_alloc_fail;
0070 /* Store failed because the entry metadata could not be allocated (rare) */
0071 static u64 zswap_reject_kmemcache_fail;
0072 /* Duplicate store was encountered (rare) */
0073 static u64 zswap_duplicate_entry;
0074 
0075 /*********************************
0076 * tunables
0077 **********************************/
0078 
0079 /* Enable/disable zswap (disabled by default) */
0080 static bool zswap_enabled;
0081 static int zswap_enabled_param_set(const char *,
0082                    const struct kernel_param *);
0083 static struct kernel_param_ops zswap_enabled_param_ops = {
0084     .set =      zswap_enabled_param_set,
0085     .get =      param_get_bool,
0086 };
0087 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
0088 
0089 /* Crypto compressor to use */
0090 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
0091 static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
0092 static int zswap_compressor_param_set(const char *,
0093                       const struct kernel_param *);
0094 static struct kernel_param_ops zswap_compressor_param_ops = {
0095     .set =      zswap_compressor_param_set,
0096     .get =      param_get_charp,
0097     .free =     param_free_charp,
0098 };
0099 module_param_cb(compressor, &zswap_compressor_param_ops,
0100         &zswap_compressor, 0644);
0101 
0102 /* Compressed storage zpool to use */
0103 #define ZSWAP_ZPOOL_DEFAULT "zbud"
0104 static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
0105 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
0106 static struct kernel_param_ops zswap_zpool_param_ops = {
0107     .set =      zswap_zpool_param_set,
0108     .get =      param_get_charp,
0109     .free =     param_free_charp,
0110 };
0111 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
0112 
0113 /* The maximum percentage of memory that the compressed pool can occupy */
0114 static unsigned int zswap_max_pool_percent = 20;
0115 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
0116 
0117 /*********************************
0118 * data structures
0119 **********************************/
0120 
0121 struct zswap_pool {
0122     struct zpool *zpool;
0123     struct crypto_comp * __percpu *tfm;
0124     struct kref kref;
0125     struct list_head list;
0126     struct work_struct work;
0127     struct hlist_node node;
0128     char tfm_name[CRYPTO_MAX_ALG_NAME];
0129 };
0130 
0131 /*
0132  * struct zswap_entry
0133  *
0134  * This structure contains the metadata for tracking a single compressed
0135  * page within zswap.
0136  *
0137  * rbnode - links the entry into red-black tree for the appropriate swap type
0138  * offset - the swap offset for the entry.  Index into the red-black tree.
0139  * refcount - the number of outstanding reference to the entry. This is needed
0140  *            to protect against premature freeing of the entry by code
0141  *            concurrent calls to load, invalidate, and writeback.  The lock
0142  *            for the zswap_tree structure that contains the entry must
0143  *            be held while changing the refcount.  Since the lock must
0144  *            be held, there is no reason to also make refcount atomic.
0145  * length - the length in bytes of the compressed page data.  Needed during
0146  *          decompression
0147  * pool - the zswap_pool the entry's data is in
0148  * handle - zpool allocation handle that stores the compressed page data
0149  */
0150 struct zswap_entry {
0151     struct rb_node rbnode;
0152     pgoff_t offset;
0153     int refcount;
0154     unsigned int length;
0155     struct zswap_pool *pool;
0156     unsigned long handle;
0157 };
0158 
0159 struct zswap_header {
0160     swp_entry_t swpentry;
0161 };
0162 
0163 /*
0164  * The tree lock in the zswap_tree struct protects a few things:
0165  * - the rbtree
0166  * - the refcount field of each entry in the tree
0167  */
0168 struct zswap_tree {
0169     struct rb_root rbroot;
0170     spinlock_t lock;
0171 };
0172 
0173 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
0174 
0175 /* RCU-protected iteration */
0176 static LIST_HEAD(zswap_pools);
0177 /* protects zswap_pools list modification */
0178 static DEFINE_SPINLOCK(zswap_pools_lock);
0179 /* pool counter to provide unique names to zpool */
0180 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
0181 
0182 /* used by param callback function */
0183 static bool zswap_init_started;
0184 
0185 /* fatal error during init */
0186 static bool zswap_init_failed;
0187 
0188 /*********************************
0189 * helpers and fwd declarations
0190 **********************************/
0191 
0192 #define zswap_pool_debug(msg, p)                \
0193     pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,     \
0194          zpool_get_type((p)->zpool))
0195 
0196 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
0197 static int zswap_pool_get(struct zswap_pool *pool);
0198 static void zswap_pool_put(struct zswap_pool *pool);
0199 
0200 static const struct zpool_ops zswap_zpool_ops = {
0201     .evict = zswap_writeback_entry
0202 };
0203 
0204 static bool zswap_is_full(void)
0205 {
0206     return totalram_pages * zswap_max_pool_percent / 100 <
0207         DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
0208 }
0209 
0210 static void zswap_update_total_size(void)
0211 {
0212     struct zswap_pool *pool;
0213     u64 total = 0;
0214 
0215     rcu_read_lock();
0216 
0217     list_for_each_entry_rcu(pool, &zswap_pools, list)
0218         total += zpool_get_total_size(pool->zpool);
0219 
0220     rcu_read_unlock();
0221 
0222     zswap_pool_total_size = total;
0223 }
0224 
0225 /*********************************
0226 * zswap entry functions
0227 **********************************/
0228 static struct kmem_cache *zswap_entry_cache;
0229 
0230 static int __init zswap_entry_cache_create(void)
0231 {
0232     zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
0233     return zswap_entry_cache == NULL;
0234 }
0235 
0236 static void __init zswap_entry_cache_destroy(void)
0237 {
0238     kmem_cache_destroy(zswap_entry_cache);
0239 }
0240 
0241 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
0242 {
0243     struct zswap_entry *entry;
0244     entry = kmem_cache_alloc(zswap_entry_cache, gfp);
0245     if (!entry)
0246         return NULL;
0247     entry->refcount = 1;
0248     RB_CLEAR_NODE(&entry->rbnode);
0249     return entry;
0250 }
0251 
0252 static void zswap_entry_cache_free(struct zswap_entry *entry)
0253 {
0254     kmem_cache_free(zswap_entry_cache, entry);
0255 }
0256 
0257 /*********************************
0258 * rbtree functions
0259 **********************************/
0260 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
0261 {
0262     struct rb_node *node = root->rb_node;
0263     struct zswap_entry *entry;
0264 
0265     while (node) {
0266         entry = rb_entry(node, struct zswap_entry, rbnode);
0267         if (entry->offset > offset)
0268             node = node->rb_left;
0269         else if (entry->offset < offset)
0270             node = node->rb_right;
0271         else
0272             return entry;
0273     }
0274     return NULL;
0275 }
0276 
0277 /*
0278  * In the case that a entry with the same offset is found, a pointer to
0279  * the existing entry is stored in dupentry and the function returns -EEXIST
0280  */
0281 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
0282             struct zswap_entry **dupentry)
0283 {
0284     struct rb_node **link = &root->rb_node, *parent = NULL;
0285     struct zswap_entry *myentry;
0286 
0287     while (*link) {
0288         parent = *link;
0289         myentry = rb_entry(parent, struct zswap_entry, rbnode);
0290         if (myentry->offset > entry->offset)
0291             link = &(*link)->rb_left;
0292         else if (myentry->offset < entry->offset)
0293             link = &(*link)->rb_right;
0294         else {
0295             *dupentry = myentry;
0296             return -EEXIST;
0297         }
0298     }
0299     rb_link_node(&entry->rbnode, parent, link);
0300     rb_insert_color(&entry->rbnode, root);
0301     return 0;
0302 }
0303 
0304 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
0305 {
0306     if (!RB_EMPTY_NODE(&entry->rbnode)) {
0307         rb_erase(&entry->rbnode, root);
0308         RB_CLEAR_NODE(&entry->rbnode);
0309     }
0310 }
0311 
0312 /*
0313  * Carries out the common pattern of freeing and entry's zpool allocation,
0314  * freeing the entry itself, and decrementing the number of stored pages.
0315  */
0316 static void zswap_free_entry(struct zswap_entry *entry)
0317 {
0318     zpool_free(entry->pool->zpool, entry->handle);
0319     zswap_pool_put(entry->pool);
0320     zswap_entry_cache_free(entry);
0321     atomic_dec(&zswap_stored_pages);
0322     zswap_update_total_size();
0323 }
0324 
0325 /* caller must hold the tree lock */
0326 static void zswap_entry_get(struct zswap_entry *entry)
0327 {
0328     entry->refcount++;
0329 }
0330 
0331 /* caller must hold the tree lock
0332 * remove from the tree and free it, if nobody reference the entry
0333 */
0334 static void zswap_entry_put(struct zswap_tree *tree,
0335             struct zswap_entry *entry)
0336 {
0337     int refcount = --entry->refcount;
0338 
0339     BUG_ON(refcount < 0);
0340     if (refcount == 0) {
0341         zswap_rb_erase(&tree->rbroot, entry);
0342         zswap_free_entry(entry);
0343     }
0344 }
0345 
0346 /* caller must hold the tree lock */
0347 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
0348                 pgoff_t offset)
0349 {
0350     struct zswap_entry *entry;
0351 
0352     entry = zswap_rb_search(root, offset);
0353     if (entry)
0354         zswap_entry_get(entry);
0355 
0356     return entry;
0357 }
0358 
0359 /*********************************
0360 * per-cpu code
0361 **********************************/
0362 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
0363 
0364 static int zswap_dstmem_prepare(unsigned int cpu)
0365 {
0366     u8 *dst;
0367 
0368     dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
0369     if (!dst) {
0370         pr_err("can't allocate compressor buffer\n");
0371         return -ENOMEM;
0372     }
0373     per_cpu(zswap_dstmem, cpu) = dst;
0374     return 0;
0375 }
0376 
0377 static int zswap_dstmem_dead(unsigned int cpu)
0378 {
0379     u8 *dst;
0380 
0381     dst = per_cpu(zswap_dstmem, cpu);
0382     kfree(dst);
0383     per_cpu(zswap_dstmem, cpu) = NULL;
0384 
0385     return 0;
0386 }
0387 
0388 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
0389 {
0390     struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
0391     struct crypto_comp *tfm;
0392 
0393     if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
0394         return 0;
0395 
0396     tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
0397     if (IS_ERR_OR_NULL(tfm)) {
0398         pr_err("could not alloc crypto comp %s : %ld\n",
0399                pool->tfm_name, PTR_ERR(tfm));
0400         return -ENOMEM;
0401     }
0402     *per_cpu_ptr(pool->tfm, cpu) = tfm;
0403     return 0;
0404 }
0405 
0406 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
0407 {
0408     struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
0409     struct crypto_comp *tfm;
0410 
0411     tfm = *per_cpu_ptr(pool->tfm, cpu);
0412     if (!IS_ERR_OR_NULL(tfm))
0413         crypto_free_comp(tfm);
0414     *per_cpu_ptr(pool->tfm, cpu) = NULL;
0415     return 0;
0416 }
0417 
0418 /*********************************
0419 * pool functions
0420 **********************************/
0421 
0422 static struct zswap_pool *__zswap_pool_current(void)
0423 {
0424     struct zswap_pool *pool;
0425 
0426     pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
0427     WARN_ON(!pool);
0428 
0429     return pool;
0430 }
0431 
0432 static struct zswap_pool *zswap_pool_current(void)
0433 {
0434     assert_spin_locked(&zswap_pools_lock);
0435 
0436     return __zswap_pool_current();
0437 }
0438 
0439 static struct zswap_pool *zswap_pool_current_get(void)
0440 {
0441     struct zswap_pool *pool;
0442 
0443     rcu_read_lock();
0444 
0445     pool = __zswap_pool_current();
0446     if (!pool || !zswap_pool_get(pool))
0447         pool = NULL;
0448 
0449     rcu_read_unlock();
0450 
0451     return pool;
0452 }
0453 
0454 static struct zswap_pool *zswap_pool_last_get(void)
0455 {
0456     struct zswap_pool *pool, *last = NULL;
0457 
0458     rcu_read_lock();
0459 
0460     list_for_each_entry_rcu(pool, &zswap_pools, list)
0461         last = pool;
0462     if (!WARN_ON(!last) && !zswap_pool_get(last))
0463         last = NULL;
0464 
0465     rcu_read_unlock();
0466 
0467     return last;
0468 }
0469 
0470 /* type and compressor must be null-terminated */
0471 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
0472 {
0473     struct zswap_pool *pool;
0474 
0475     assert_spin_locked(&zswap_pools_lock);
0476 
0477     list_for_each_entry_rcu(pool, &zswap_pools, list) {
0478         if (strcmp(pool->tfm_name, compressor))
0479             continue;
0480         if (strcmp(zpool_get_type(pool->zpool), type))
0481             continue;
0482         /* if we can't get it, it's about to be destroyed */
0483         if (!zswap_pool_get(pool))
0484             continue;
0485         return pool;
0486     }
0487 
0488     return NULL;
0489 }
0490 
0491 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
0492 {
0493     struct zswap_pool *pool;
0494     char name[38]; /* 'zswap' + 32 char (max) num + \0 */
0495     gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
0496     int ret;
0497 
0498     pool = kzalloc(sizeof(*pool), GFP_KERNEL);
0499     if (!pool) {
0500         pr_err("pool alloc failed\n");
0501         return NULL;
0502     }
0503 
0504     /* unique name for each pool specifically required by zsmalloc */
0505     snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
0506 
0507     pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
0508     if (!pool->zpool) {
0509         pr_err("%s zpool not available\n", type);
0510         goto error;
0511     }
0512     pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
0513 
0514     strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
0515     pool->tfm = alloc_percpu(struct crypto_comp *);
0516     if (!pool->tfm) {
0517         pr_err("percpu alloc failed\n");
0518         goto error;
0519     }
0520 
0521     ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
0522                        &pool->node);
0523     if (ret)
0524         goto error;
0525     pr_debug("using %s compressor\n", pool->tfm_name);
0526 
0527     /* being the current pool takes 1 ref; this func expects the
0528      * caller to always add the new pool as the current pool
0529      */
0530     kref_init(&pool->kref);
0531     INIT_LIST_HEAD(&pool->list);
0532 
0533     zswap_pool_debug("created", pool);
0534 
0535     return pool;
0536 
0537 error:
0538     free_percpu(pool->tfm);
0539     if (pool->zpool)
0540         zpool_destroy_pool(pool->zpool);
0541     kfree(pool);
0542     return NULL;
0543 }
0544 
0545 static __init struct zswap_pool *__zswap_pool_create_fallback(void)
0546 {
0547     if (!crypto_has_comp(zswap_compressor, 0, 0)) {
0548         if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
0549             pr_err("default compressor %s not available\n",
0550                    zswap_compressor);
0551             return NULL;
0552         }
0553         pr_err("compressor %s not available, using default %s\n",
0554                zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
0555         param_free_charp(&zswap_compressor);
0556         zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
0557     }
0558     if (!zpool_has_pool(zswap_zpool_type)) {
0559         if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
0560             pr_err("default zpool %s not available\n",
0561                    zswap_zpool_type);
0562             return NULL;
0563         }
0564         pr_err("zpool %s not available, using default %s\n",
0565                zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
0566         param_free_charp(&zswap_zpool_type);
0567         zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
0568     }
0569 
0570     return zswap_pool_create(zswap_zpool_type, zswap_compressor);
0571 }
0572 
0573 static void zswap_pool_destroy(struct zswap_pool *pool)
0574 {
0575     zswap_pool_debug("destroying", pool);
0576 
0577     cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
0578     free_percpu(pool->tfm);
0579     zpool_destroy_pool(pool->zpool);
0580     kfree(pool);
0581 }
0582 
0583 static int __must_check zswap_pool_get(struct zswap_pool *pool)
0584 {
0585     return kref_get_unless_zero(&pool->kref);
0586 }
0587 
0588 static void __zswap_pool_release(struct work_struct *work)
0589 {
0590     struct zswap_pool *pool = container_of(work, typeof(*pool), work);
0591 
0592     synchronize_rcu();
0593 
0594     /* nobody should have been able to get a kref... */
0595     WARN_ON(kref_get_unless_zero(&pool->kref));
0596 
0597     /* pool is now off zswap_pools list and has no references. */
0598     zswap_pool_destroy(pool);
0599 }
0600 
0601 static void __zswap_pool_empty(struct kref *kref)
0602 {
0603     struct zswap_pool *pool;
0604 
0605     pool = container_of(kref, typeof(*pool), kref);
0606 
0607     spin_lock(&zswap_pools_lock);
0608 
0609     WARN_ON(pool == zswap_pool_current());
0610 
0611     list_del_rcu(&pool->list);
0612 
0613     INIT_WORK(&pool->work, __zswap_pool_release);
0614     schedule_work(&pool->work);
0615 
0616     spin_unlock(&zswap_pools_lock);
0617 }
0618 
0619 static void zswap_pool_put(struct zswap_pool *pool)
0620 {
0621     kref_put(&pool->kref, __zswap_pool_empty);
0622 }
0623 
0624 /*********************************
0625 * param callbacks
0626 **********************************/
0627 
0628 /* val must be a null-terminated string */
0629 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
0630                  char *type, char *compressor)
0631 {
0632     struct zswap_pool *pool, *put_pool = NULL;
0633     char *s = strstrip((char *)val);
0634     int ret;
0635 
0636     if (zswap_init_failed) {
0637         pr_err("can't set param, initialization failed\n");
0638         return -ENODEV;
0639     }
0640 
0641     /* no change required */
0642     if (!strcmp(s, *(char **)kp->arg))
0643         return 0;
0644 
0645     /* if this is load-time (pre-init) param setting,
0646      * don't create a pool; that's done during init.
0647      */
0648     if (!zswap_init_started)
0649         return param_set_charp(s, kp);
0650 
0651     if (!type) {
0652         if (!zpool_has_pool(s)) {
0653             pr_err("zpool %s not available\n", s);
0654             return -ENOENT;
0655         }
0656         type = s;
0657     } else if (!compressor) {
0658         if (!crypto_has_comp(s, 0, 0)) {
0659             pr_err("compressor %s not available\n", s);
0660             return -ENOENT;
0661         }
0662         compressor = s;
0663     } else {
0664         WARN_ON(1);
0665         return -EINVAL;
0666     }
0667 
0668     spin_lock(&zswap_pools_lock);
0669 
0670     pool = zswap_pool_find_get(type, compressor);
0671     if (pool) {
0672         zswap_pool_debug("using existing", pool);
0673         list_del_rcu(&pool->list);
0674     } else {
0675         spin_unlock(&zswap_pools_lock);
0676         pool = zswap_pool_create(type, compressor);
0677         spin_lock(&zswap_pools_lock);
0678     }
0679 
0680     if (pool)
0681         ret = param_set_charp(s, kp);
0682     else
0683         ret = -EINVAL;
0684 
0685     if (!ret) {
0686         put_pool = zswap_pool_current();
0687         list_add_rcu(&pool->list, &zswap_pools);
0688     } else if (pool) {
0689         /* add the possibly pre-existing pool to the end of the pools
0690          * list; if it's new (and empty) then it'll be removed and
0691          * destroyed by the put after we drop the lock
0692          */
0693         list_add_tail_rcu(&pool->list, &zswap_pools);
0694         put_pool = pool;
0695     }
0696 
0697     spin_unlock(&zswap_pools_lock);
0698 
0699     /* drop the ref from either the old current pool,
0700      * or the new pool we failed to add
0701      */
0702     if (put_pool)
0703         zswap_pool_put(put_pool);
0704 
0705     return ret;
0706 }
0707 
0708 static int zswap_compressor_param_set(const char *val,
0709                       const struct kernel_param *kp)
0710 {
0711     return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
0712 }
0713 
0714 static int zswap_zpool_param_set(const char *val,
0715                  const struct kernel_param *kp)
0716 {
0717     return __zswap_param_set(val, kp, NULL, zswap_compressor);
0718 }
0719 
0720 static int zswap_enabled_param_set(const char *val,
0721                    const struct kernel_param *kp)
0722 {
0723     if (zswap_init_failed) {
0724         pr_err("can't enable, initialization failed\n");
0725         return -ENODEV;
0726     }
0727 
0728     return param_set_bool(val, kp);
0729 }
0730 
0731 /*********************************
0732 * writeback code
0733 **********************************/
0734 /* return enum for zswap_get_swap_cache_page */
0735 enum zswap_get_swap_ret {
0736     ZSWAP_SWAPCACHE_NEW,
0737     ZSWAP_SWAPCACHE_EXIST,
0738     ZSWAP_SWAPCACHE_FAIL,
0739 };
0740 
0741 /*
0742  * zswap_get_swap_cache_page
0743  *
0744  * This is an adaption of read_swap_cache_async()
0745  *
0746  * This function tries to find a page with the given swap entry
0747  * in the swapper_space address space (the swap cache).  If the page
0748  * is found, it is returned in retpage.  Otherwise, a page is allocated,
0749  * added to the swap cache, and returned in retpage.
0750  *
0751  * If success, the swap cache page is returned in retpage
0752  * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
0753  * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
0754  *     the new page is added to swapcache and locked
0755  * Returns ZSWAP_SWAPCACHE_FAIL on error
0756  */
0757 static int zswap_get_swap_cache_page(swp_entry_t entry,
0758                 struct page **retpage)
0759 {
0760     bool page_was_allocated;
0761 
0762     *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
0763             NULL, 0, &page_was_allocated);
0764     if (page_was_allocated)
0765         return ZSWAP_SWAPCACHE_NEW;
0766     if (!*retpage)
0767         return ZSWAP_SWAPCACHE_FAIL;
0768     return ZSWAP_SWAPCACHE_EXIST;
0769 }
0770 
0771 /*
0772  * Attempts to free an entry by adding a page to the swap cache,
0773  * decompressing the entry data into the page, and issuing a
0774  * bio write to write the page back to the swap device.
0775  *
0776  * This can be thought of as a "resumed writeback" of the page
0777  * to the swap device.  We are basically resuming the same swap
0778  * writeback path that was intercepted with the frontswap_store()
0779  * in the first place.  After the page has been decompressed into
0780  * the swap cache, the compressed version stored by zswap can be
0781  * freed.
0782  */
0783 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
0784 {
0785     struct zswap_header *zhdr;
0786     swp_entry_t swpentry;
0787     struct zswap_tree *tree;
0788     pgoff_t offset;
0789     struct zswap_entry *entry;
0790     struct page *page;
0791     struct crypto_comp *tfm;
0792     u8 *src, *dst;
0793     unsigned int dlen;
0794     int ret;
0795     struct writeback_control wbc = {
0796         .sync_mode = WB_SYNC_NONE,
0797     };
0798 
0799     /* extract swpentry from data */
0800     zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
0801     swpentry = zhdr->swpentry; /* here */
0802     zpool_unmap_handle(pool, handle);
0803     tree = zswap_trees[swp_type(swpentry)];
0804     offset = swp_offset(swpentry);
0805 
0806     /* find and ref zswap entry */
0807     spin_lock(&tree->lock);
0808     entry = zswap_entry_find_get(&tree->rbroot, offset);
0809     if (!entry) {
0810         /* entry was invalidated */
0811         spin_unlock(&tree->lock);
0812         return 0;
0813     }
0814     spin_unlock(&tree->lock);
0815     BUG_ON(offset != entry->offset);
0816 
0817     /* try to allocate swap cache page */
0818     switch (zswap_get_swap_cache_page(swpentry, &page)) {
0819     case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
0820         ret = -ENOMEM;
0821         goto fail;
0822 
0823     case ZSWAP_SWAPCACHE_EXIST:
0824         /* page is already in the swap cache, ignore for now */
0825         put_page(page);
0826         ret = -EEXIST;
0827         goto fail;
0828 
0829     case ZSWAP_SWAPCACHE_NEW: /* page is locked */
0830         /* decompress */
0831         dlen = PAGE_SIZE;
0832         src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
0833                 ZPOOL_MM_RO) + sizeof(struct zswap_header);
0834         dst = kmap_atomic(page);
0835         tfm = *get_cpu_ptr(entry->pool->tfm);
0836         ret = crypto_comp_decompress(tfm, src, entry->length,
0837                          dst, &dlen);
0838         put_cpu_ptr(entry->pool->tfm);
0839         kunmap_atomic(dst);
0840         zpool_unmap_handle(entry->pool->zpool, entry->handle);
0841         BUG_ON(ret);
0842         BUG_ON(dlen != PAGE_SIZE);
0843 
0844         /* page is up to date */
0845         SetPageUptodate(page);
0846     }
0847 
0848     /* move it to the tail of the inactive list after end_writeback */
0849     SetPageReclaim(page);
0850 
0851     /* start writeback */
0852     __swap_writepage(page, &wbc, end_swap_bio_write);
0853     put_page(page);
0854     zswap_written_back_pages++;
0855 
0856     spin_lock(&tree->lock);
0857     /* drop local reference */
0858     zswap_entry_put(tree, entry);
0859 
0860     /*
0861     * There are two possible situations for entry here:
0862     * (1) refcount is 1(normal case),  entry is valid and on the tree
0863     * (2) refcount is 0, entry is freed and not on the tree
0864     *     because invalidate happened during writeback
0865     *  search the tree and free the entry if find entry
0866     */
0867     if (entry == zswap_rb_search(&tree->rbroot, offset))
0868         zswap_entry_put(tree, entry);
0869     spin_unlock(&tree->lock);
0870 
0871     goto end;
0872 
0873     /*
0874     * if we get here due to ZSWAP_SWAPCACHE_EXIST
0875     * a load may happening concurrently
0876     * it is safe and okay to not free the entry
0877     * if we free the entry in the following put
0878     * it it either okay to return !0
0879     */
0880 fail:
0881     spin_lock(&tree->lock);
0882     zswap_entry_put(tree, entry);
0883     spin_unlock(&tree->lock);
0884 
0885 end:
0886     return ret;
0887 }
0888 
0889 static int zswap_shrink(void)
0890 {
0891     struct zswap_pool *pool;
0892     int ret;
0893 
0894     pool = zswap_pool_last_get();
0895     if (!pool)
0896         return -ENOENT;
0897 
0898     ret = zpool_shrink(pool->zpool, 1, NULL);
0899 
0900     zswap_pool_put(pool);
0901 
0902     return ret;
0903 }
0904 
0905 /*********************************
0906 * frontswap hooks
0907 **********************************/
0908 /* attempts to compress and store an single page */
0909 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
0910                 struct page *page)
0911 {
0912     struct zswap_tree *tree = zswap_trees[type];
0913     struct zswap_entry *entry, *dupentry;
0914     struct crypto_comp *tfm;
0915     int ret;
0916     unsigned int dlen = PAGE_SIZE, len;
0917     unsigned long handle;
0918     char *buf;
0919     u8 *src, *dst;
0920     struct zswap_header *zhdr;
0921 
0922     if (!zswap_enabled || !tree) {
0923         ret = -ENODEV;
0924         goto reject;
0925     }
0926 
0927     /* reclaim space if needed */
0928     if (zswap_is_full()) {
0929         zswap_pool_limit_hit++;
0930         if (zswap_shrink()) {
0931             zswap_reject_reclaim_fail++;
0932             ret = -ENOMEM;
0933             goto reject;
0934         }
0935     }
0936 
0937     /* allocate entry */
0938     entry = zswap_entry_cache_alloc(GFP_KERNEL);
0939     if (!entry) {
0940         zswap_reject_kmemcache_fail++;
0941         ret = -ENOMEM;
0942         goto reject;
0943     }
0944 
0945     /* if entry is successfully added, it keeps the reference */
0946     entry->pool = zswap_pool_current_get();
0947     if (!entry->pool) {
0948         ret = -EINVAL;
0949         goto freepage;
0950     }
0951 
0952     /* compress */
0953     dst = get_cpu_var(zswap_dstmem);
0954     tfm = *get_cpu_ptr(entry->pool->tfm);
0955     src = kmap_atomic(page);
0956     ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
0957     kunmap_atomic(src);
0958     put_cpu_ptr(entry->pool->tfm);
0959     if (ret) {
0960         ret = -EINVAL;
0961         goto put_dstmem;
0962     }
0963 
0964     /* store */
0965     len = dlen + sizeof(struct zswap_header);
0966     ret = zpool_malloc(entry->pool->zpool, len,
0967                __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
0968                &handle);
0969     if (ret == -ENOSPC) {
0970         zswap_reject_compress_poor++;
0971         goto put_dstmem;
0972     }
0973     if (ret) {
0974         zswap_reject_alloc_fail++;
0975         goto put_dstmem;
0976     }
0977     zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
0978     zhdr->swpentry = swp_entry(type, offset);
0979     buf = (u8 *)(zhdr + 1);
0980     memcpy(buf, dst, dlen);
0981     zpool_unmap_handle(entry->pool->zpool, handle);
0982     put_cpu_var(zswap_dstmem);
0983 
0984     /* populate entry */
0985     entry->offset = offset;
0986     entry->handle = handle;
0987     entry->length = dlen;
0988 
0989     /* map */
0990     spin_lock(&tree->lock);
0991     do {
0992         ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
0993         if (ret == -EEXIST) {
0994             zswap_duplicate_entry++;
0995             /* remove from rbtree */
0996             zswap_rb_erase(&tree->rbroot, dupentry);
0997             zswap_entry_put(tree, dupentry);
0998         }
0999     } while (ret == -EEXIST);
1000     spin_unlock(&tree->lock);
1001 
1002     /* update stats */
1003     atomic_inc(&zswap_stored_pages);
1004     zswap_update_total_size();
1005 
1006     return 0;
1007 
1008 put_dstmem:
1009     put_cpu_var(zswap_dstmem);
1010     zswap_pool_put(entry->pool);
1011 freepage:
1012     zswap_entry_cache_free(entry);
1013 reject:
1014     return ret;
1015 }
1016 
1017 /*
1018  * returns 0 if the page was successfully decompressed
1019  * return -1 on entry not found or error
1020 */
1021 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1022                 struct page *page)
1023 {
1024     struct zswap_tree *tree = zswap_trees[type];
1025     struct zswap_entry *entry;
1026     struct crypto_comp *tfm;
1027     u8 *src, *dst;
1028     unsigned int dlen;
1029     int ret;
1030 
1031     /* find */
1032     spin_lock(&tree->lock);
1033     entry = zswap_entry_find_get(&tree->rbroot, offset);
1034     if (!entry) {
1035         /* entry was written back */
1036         spin_unlock(&tree->lock);
1037         return -1;
1038     }
1039     spin_unlock(&tree->lock);
1040 
1041     /* decompress */
1042     dlen = PAGE_SIZE;
1043     src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1044             ZPOOL_MM_RO) + sizeof(struct zswap_header);
1045     dst = kmap_atomic(page);
1046     tfm = *get_cpu_ptr(entry->pool->tfm);
1047     ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1048     put_cpu_ptr(entry->pool->tfm);
1049     kunmap_atomic(dst);
1050     zpool_unmap_handle(entry->pool->zpool, entry->handle);
1051     BUG_ON(ret);
1052 
1053     spin_lock(&tree->lock);
1054     zswap_entry_put(tree, entry);
1055     spin_unlock(&tree->lock);
1056 
1057     return 0;
1058 }
1059 
1060 /* frees an entry in zswap */
1061 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1062 {
1063     struct zswap_tree *tree = zswap_trees[type];
1064     struct zswap_entry *entry;
1065 
1066     /* find */
1067     spin_lock(&tree->lock);
1068     entry = zswap_rb_search(&tree->rbroot, offset);
1069     if (!entry) {
1070         /* entry was written back */
1071         spin_unlock(&tree->lock);
1072         return;
1073     }
1074 
1075     /* remove from rbtree */
1076     zswap_rb_erase(&tree->rbroot, entry);
1077 
1078     /* drop the initial reference from entry creation */
1079     zswap_entry_put(tree, entry);
1080 
1081     spin_unlock(&tree->lock);
1082 }
1083 
1084 /* frees all zswap entries for the given swap type */
1085 static void zswap_frontswap_invalidate_area(unsigned type)
1086 {
1087     struct zswap_tree *tree = zswap_trees[type];
1088     struct zswap_entry *entry, *n;
1089 
1090     if (!tree)
1091         return;
1092 
1093     /* walk the tree and free everything */
1094     spin_lock(&tree->lock);
1095     rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1096         zswap_free_entry(entry);
1097     tree->rbroot = RB_ROOT;
1098     spin_unlock(&tree->lock);
1099     kfree(tree);
1100     zswap_trees[type] = NULL;
1101 }
1102 
1103 static void zswap_frontswap_init(unsigned type)
1104 {
1105     struct zswap_tree *tree;
1106 
1107     tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
1108     if (!tree) {
1109         pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1110         return;
1111     }
1112 
1113     tree->rbroot = RB_ROOT;
1114     spin_lock_init(&tree->lock);
1115     zswap_trees[type] = tree;
1116 }
1117 
1118 static struct frontswap_ops zswap_frontswap_ops = {
1119     .store = zswap_frontswap_store,
1120     .load = zswap_frontswap_load,
1121     .invalidate_page = zswap_frontswap_invalidate_page,
1122     .invalidate_area = zswap_frontswap_invalidate_area,
1123     .init = zswap_frontswap_init
1124 };
1125 
1126 /*********************************
1127 * debugfs functions
1128 **********************************/
1129 #ifdef CONFIG_DEBUG_FS
1130 #include <linux/debugfs.h>
1131 
1132 static struct dentry *zswap_debugfs_root;
1133 
1134 static int __init zswap_debugfs_init(void)
1135 {
1136     if (!debugfs_initialized())
1137         return -ENODEV;
1138 
1139     zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1140     if (!zswap_debugfs_root)
1141         return -ENOMEM;
1142 
1143     debugfs_create_u64("pool_limit_hit", S_IRUGO,
1144             zswap_debugfs_root, &zswap_pool_limit_hit);
1145     debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1146             zswap_debugfs_root, &zswap_reject_reclaim_fail);
1147     debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1148             zswap_debugfs_root, &zswap_reject_alloc_fail);
1149     debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1150             zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1151     debugfs_create_u64("reject_compress_poor", S_IRUGO,
1152             zswap_debugfs_root, &zswap_reject_compress_poor);
1153     debugfs_create_u64("written_back_pages", S_IRUGO,
1154             zswap_debugfs_root, &zswap_written_back_pages);
1155     debugfs_create_u64("duplicate_entry", S_IRUGO,
1156             zswap_debugfs_root, &zswap_duplicate_entry);
1157     debugfs_create_u64("pool_total_size", S_IRUGO,
1158             zswap_debugfs_root, &zswap_pool_total_size);
1159     debugfs_create_atomic_t("stored_pages", S_IRUGO,
1160             zswap_debugfs_root, &zswap_stored_pages);
1161 
1162     return 0;
1163 }
1164 
1165 static void __exit zswap_debugfs_exit(void)
1166 {
1167     debugfs_remove_recursive(zswap_debugfs_root);
1168 }
1169 #else
1170 static int __init zswap_debugfs_init(void)
1171 {
1172     return 0;
1173 }
1174 
1175 static void __exit zswap_debugfs_exit(void) { }
1176 #endif
1177 
1178 /*********************************
1179 * module init and exit
1180 **********************************/
1181 static int __init init_zswap(void)
1182 {
1183     struct zswap_pool *pool;
1184     int ret;
1185 
1186     zswap_init_started = true;
1187 
1188     if (zswap_entry_cache_create()) {
1189         pr_err("entry cache creation failed\n");
1190         goto cache_fail;
1191     }
1192 
1193     ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1194                 zswap_dstmem_prepare, zswap_dstmem_dead);
1195     if (ret) {
1196         pr_err("dstmem alloc failed\n");
1197         goto dstmem_fail;
1198     }
1199 
1200     ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1201                       "mm/zswap_pool:prepare",
1202                       zswap_cpu_comp_prepare,
1203                       zswap_cpu_comp_dead);
1204     if (ret)
1205         goto hp_fail;
1206 
1207     pool = __zswap_pool_create_fallback();
1208     if (!pool) {
1209         pr_err("pool creation failed\n");
1210         goto pool_fail;
1211     }
1212     pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1213         zpool_get_type(pool->zpool));
1214 
1215     list_add(&pool->list, &zswap_pools);
1216 
1217     frontswap_register_ops(&zswap_frontswap_ops);
1218     if (zswap_debugfs_init())
1219         pr_warn("debugfs initialization failed\n");
1220     return 0;
1221 
1222 pool_fail:
1223     cpuhp_remove_state_nocalls(CPUHP_MM_ZSWP_POOL_PREPARE);
1224 hp_fail:
1225     cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1226 dstmem_fail:
1227     zswap_entry_cache_destroy();
1228 cache_fail:
1229     /* if built-in, we aren't unloaded on failure; don't allow use */
1230     zswap_init_failed = true;
1231     zswap_enabled = false;
1232     return -ENOMEM;
1233 }
1234 /* must be late so crypto has time to come up */
1235 late_initcall(init_zswap);
1236 
1237 MODULE_LICENSE("GPL");
1238 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1239 MODULE_DESCRIPTION("Compressed cache for swap pages");