Back to home page

LXR

 
 

    


0001 /*
0002  * z3fold.c
0003  *
0004  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
0005  * Copyright (C) 2016, Sony Mobile Communications Inc.
0006  *
0007  * This implementation is based on zbud written by Seth Jennings.
0008  *
0009  * z3fold is an special purpose allocator for storing compressed pages. It
0010  * can store up to three compressed pages per page which improves the
0011  * compression ratio of zbud while retaining its main concepts (e. g. always
0012  * storing an integral number of objects per page) and simplicity.
0013  * It still has simple and deterministic reclaim properties that make it
0014  * preferable to a higher density approach (with no requirement on integral
0015  * number of object per page) when reclaim is used.
0016  *
0017  * As in zbud, pages are divided into "chunks".  The size of the chunks is
0018  * fixed at compile time and is determined by NCHUNKS_ORDER below.
0019  *
0020  * z3fold doesn't export any API and is meant to be used via zpool API.
0021  */
0022 
0023 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0024 
0025 #include <linux/atomic.h>
0026 #include <linux/list.h>
0027 #include <linux/mm.h>
0028 #include <linux/module.h>
0029 #include <linux/preempt.h>
0030 #include <linux/slab.h>
0031 #include <linux/spinlock.h>
0032 #include <linux/zpool.h>
0033 
0034 /*****************
0035  * Structures
0036 *****************/
0037 /*
0038  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
0039  * adjusting internal fragmentation.  It also determines the number of
0040  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
0041  * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
0042  * in allocated page is occupied by z3fold header, NCHUNKS will be calculated
0043  * to 63 which shows the max number of free chunks in z3fold page, also there
0044  * will be 63 freelists per pool.
0045  */
0046 #define NCHUNKS_ORDER   6
0047 
0048 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
0049 #define CHUNK_SIZE  (1 << CHUNK_SHIFT)
0050 #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
0051 #define NCHUNKS     ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
0052 
0053 #define BUDDY_MASK  ((1 << NCHUNKS_ORDER) - 1)
0054 
0055 struct z3fold_pool;
0056 struct z3fold_ops {
0057     int (*evict)(struct z3fold_pool *pool, unsigned long handle);
0058 };
0059 
0060 /**
0061  * struct z3fold_pool - stores metadata for each z3fold pool
0062  * @lock:   protects all pool fields and first|last_chunk fields of any
0063  *      z3fold page in the pool
0064  * @unbuddied:  array of lists tracking z3fold pages that contain 2- buddies;
0065  *      the lists each z3fold page is added to depends on the size of
0066  *      its free region.
0067  * @buddied:    list tracking the z3fold pages that contain 3 buddies;
0068  *      these z3fold pages are full
0069  * @lru:    list tracking the z3fold pages in LRU order by most recently
0070  *      added buddy.
0071  * @pages_nr:   number of z3fold pages in the pool.
0072  * @ops:    pointer to a structure of user defined operations specified at
0073  *      pool creation time.
0074  *
0075  * This structure is allocated at pool creation time and maintains metadata
0076  * pertaining to a particular z3fold pool.
0077  */
0078 struct z3fold_pool {
0079     spinlock_t lock;
0080     struct list_head unbuddied[NCHUNKS];
0081     struct list_head buddied;
0082     struct list_head lru;
0083     u64 pages_nr;
0084     const struct z3fold_ops *ops;
0085     struct zpool *zpool;
0086     const struct zpool_ops *zpool_ops;
0087 };
0088 
0089 enum buddy {
0090     HEADLESS = 0,
0091     FIRST,
0092     MIDDLE,
0093     LAST,
0094     BUDDIES_MAX
0095 };
0096 
0097 /*
0098  * struct z3fold_header - z3fold page metadata occupying the first chunk of each
0099  *          z3fold page, except for HEADLESS pages
0100  * @buddy:  links the z3fold page into the relevant list in the pool
0101  * @first_chunks:   the size of the first buddy in chunks, 0 if free
0102  * @middle_chunks:  the size of the middle buddy in chunks, 0 if free
0103  * @last_chunks:    the size of the last buddy in chunks, 0 if free
0104  * @first_num:      the starting number (for the first handle)
0105  */
0106 struct z3fold_header {
0107     struct list_head buddy;
0108     unsigned short first_chunks;
0109     unsigned short middle_chunks;
0110     unsigned short last_chunks;
0111     unsigned short start_middle;
0112     unsigned short first_num:NCHUNKS_ORDER;
0113 };
0114 
0115 /*
0116  * Internal z3fold page flags
0117  */
0118 enum z3fold_page_flags {
0119     UNDER_RECLAIM = 0,
0120     PAGE_HEADLESS,
0121     MIDDLE_CHUNK_MAPPED,
0122 };
0123 
0124 /*****************
0125  * Helpers
0126 *****************/
0127 
0128 /* Converts an allocation size in bytes to size in z3fold chunks */
0129 static int size_to_chunks(size_t size)
0130 {
0131     return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
0132 }
0133 
0134 #define for_each_unbuddied_list(_iter, _begin) \
0135     for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
0136 
0137 /* Initializes the z3fold header of a newly allocated z3fold page */
0138 static struct z3fold_header *init_z3fold_page(struct page *page)
0139 {
0140     struct z3fold_header *zhdr = page_address(page);
0141 
0142     INIT_LIST_HEAD(&page->lru);
0143     clear_bit(UNDER_RECLAIM, &page->private);
0144     clear_bit(PAGE_HEADLESS, &page->private);
0145     clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
0146 
0147     zhdr->first_chunks = 0;
0148     zhdr->middle_chunks = 0;
0149     zhdr->last_chunks = 0;
0150     zhdr->first_num = 0;
0151     zhdr->start_middle = 0;
0152     INIT_LIST_HEAD(&zhdr->buddy);
0153     return zhdr;
0154 }
0155 
0156 /* Resets the struct page fields and frees the page */
0157 static void free_z3fold_page(struct z3fold_header *zhdr)
0158 {
0159     __free_page(virt_to_page(zhdr));
0160 }
0161 
0162 /*
0163  * Encodes the handle of a particular buddy within a z3fold page
0164  * Pool lock should be held as this function accesses first_num
0165  */
0166 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
0167 {
0168     unsigned long handle;
0169 
0170     handle = (unsigned long)zhdr;
0171     if (bud != HEADLESS)
0172         handle += (bud + zhdr->first_num) & BUDDY_MASK;
0173     return handle;
0174 }
0175 
0176 /* Returns the z3fold page where a given handle is stored */
0177 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
0178 {
0179     return (struct z3fold_header *)(handle & PAGE_MASK);
0180 }
0181 
0182 /* Returns buddy number */
0183 static enum buddy handle_to_buddy(unsigned long handle)
0184 {
0185     struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
0186     return (handle - zhdr->first_num) & BUDDY_MASK;
0187 }
0188 
0189 /*
0190  * Returns the number of free chunks in a z3fold page.
0191  * NB: can't be used with HEADLESS pages.
0192  */
0193 static int num_free_chunks(struct z3fold_header *zhdr)
0194 {
0195     int nfree;
0196     /*
0197      * If there is a middle object, pick up the bigger free space
0198      * either before or after it. Otherwise just subtract the number
0199      * of chunks occupied by the first and the last objects.
0200      */
0201     if (zhdr->middle_chunks != 0) {
0202         int nfree_before = zhdr->first_chunks ?
0203             0 : zhdr->start_middle - 1;
0204         int nfree_after = zhdr->last_chunks ?
0205             0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;
0206         nfree = max(nfree_before, nfree_after);
0207     } else
0208         nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
0209     return nfree;
0210 }
0211 
0212 /*****************
0213  * API Functions
0214 *****************/
0215 /**
0216  * z3fold_create_pool() - create a new z3fold pool
0217  * @gfp:    gfp flags when allocating the z3fold pool structure
0218  * @ops:    user-defined operations for the z3fold pool
0219  *
0220  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
0221  * failed.
0222  */
0223 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
0224         const struct z3fold_ops *ops)
0225 {
0226     struct z3fold_pool *pool;
0227     int i;
0228 
0229     pool = kzalloc(sizeof(struct z3fold_pool), gfp);
0230     if (!pool)
0231         return NULL;
0232     spin_lock_init(&pool->lock);
0233     for_each_unbuddied_list(i, 0)
0234         INIT_LIST_HEAD(&pool->unbuddied[i]);
0235     INIT_LIST_HEAD(&pool->buddied);
0236     INIT_LIST_HEAD(&pool->lru);
0237     pool->pages_nr = 0;
0238     pool->ops = ops;
0239     return pool;
0240 }
0241 
0242 /**
0243  * z3fold_destroy_pool() - destroys an existing z3fold pool
0244  * @pool:   the z3fold pool to be destroyed
0245  *
0246  * The pool should be emptied before this function is called.
0247  */
0248 static void z3fold_destroy_pool(struct z3fold_pool *pool)
0249 {
0250     kfree(pool);
0251 }
0252 
0253 /* Has to be called with lock held */
0254 static int z3fold_compact_page(struct z3fold_header *zhdr)
0255 {
0256     struct page *page = virt_to_page(zhdr);
0257     void *beg = zhdr;
0258 
0259 
0260     if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
0261         zhdr->middle_chunks != 0 &&
0262         zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
0263         memmove(beg + ZHDR_SIZE_ALIGNED,
0264             beg + (zhdr->start_middle << CHUNK_SHIFT),
0265             zhdr->middle_chunks << CHUNK_SHIFT);
0266         zhdr->first_chunks = zhdr->middle_chunks;
0267         zhdr->middle_chunks = 0;
0268         zhdr->start_middle = 0;
0269         zhdr->first_num++;
0270         return 1;
0271     }
0272     return 0;
0273 }
0274 
0275 /**
0276  * z3fold_alloc() - allocates a region of a given size
0277  * @pool:   z3fold pool from which to allocate
0278  * @size:   size in bytes of the desired allocation
0279  * @gfp:    gfp flags used if the pool needs to grow
0280  * @handle: handle of the new allocation
0281  *
0282  * This function will attempt to find a free region in the pool large enough to
0283  * satisfy the allocation request.  A search of the unbuddied lists is
0284  * performed first. If no suitable free region is found, then a new page is
0285  * allocated and added to the pool to satisfy the request.
0286  *
0287  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
0288  * as z3fold pool pages.
0289  *
0290  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
0291  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
0292  * a new page.
0293  */
0294 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
0295             unsigned long *handle)
0296 {
0297     int chunks = 0, i, freechunks;
0298     struct z3fold_header *zhdr = NULL;
0299     enum buddy bud;
0300     struct page *page;
0301 
0302     if (!size || (gfp & __GFP_HIGHMEM))
0303         return -EINVAL;
0304 
0305     if (size > PAGE_SIZE)
0306         return -ENOSPC;
0307 
0308     if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
0309         bud = HEADLESS;
0310     else {
0311         chunks = size_to_chunks(size);
0312         spin_lock(&pool->lock);
0313 
0314         /* First, try to find an unbuddied z3fold page. */
0315         zhdr = NULL;
0316         for_each_unbuddied_list(i, chunks) {
0317             if (!list_empty(&pool->unbuddied[i])) {
0318                 zhdr = list_first_entry(&pool->unbuddied[i],
0319                         struct z3fold_header, buddy);
0320                 page = virt_to_page(zhdr);
0321                 if (zhdr->first_chunks == 0) {
0322                     if (zhdr->middle_chunks != 0 &&
0323                         chunks >= zhdr->start_middle)
0324                         bud = LAST;
0325                     else
0326                         bud = FIRST;
0327                 } else if (zhdr->last_chunks == 0)
0328                     bud = LAST;
0329                 else if (zhdr->middle_chunks == 0)
0330                     bud = MIDDLE;
0331                 else {
0332                     pr_err("No free chunks in unbuddied\n");
0333                     WARN_ON(1);
0334                     continue;
0335                 }
0336                 list_del(&zhdr->buddy);
0337                 goto found;
0338             }
0339         }
0340         bud = FIRST;
0341         spin_unlock(&pool->lock);
0342     }
0343 
0344     /* Couldn't find unbuddied z3fold page, create new one */
0345     page = alloc_page(gfp);
0346     if (!page)
0347         return -ENOMEM;
0348     spin_lock(&pool->lock);
0349     pool->pages_nr++;
0350     zhdr = init_z3fold_page(page);
0351 
0352     if (bud == HEADLESS) {
0353         set_bit(PAGE_HEADLESS, &page->private);
0354         goto headless;
0355     }
0356 
0357 found:
0358     if (bud == FIRST)
0359         zhdr->first_chunks = chunks;
0360     else if (bud == LAST)
0361         zhdr->last_chunks = chunks;
0362     else {
0363         zhdr->middle_chunks = chunks;
0364         zhdr->start_middle = zhdr->first_chunks + 1;
0365     }
0366 
0367     if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
0368             zhdr->middle_chunks == 0) {
0369         /* Add to unbuddied list */
0370         freechunks = num_free_chunks(zhdr);
0371         list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
0372     } else {
0373         /* Add to buddied list */
0374         list_add(&zhdr->buddy, &pool->buddied);
0375     }
0376 
0377 headless:
0378     /* Add/move z3fold page to beginning of LRU */
0379     if (!list_empty(&page->lru))
0380         list_del(&page->lru);
0381 
0382     list_add(&page->lru, &pool->lru);
0383 
0384     *handle = encode_handle(zhdr, bud);
0385     spin_unlock(&pool->lock);
0386 
0387     return 0;
0388 }
0389 
0390 /**
0391  * z3fold_free() - frees the allocation associated with the given handle
0392  * @pool:   pool in which the allocation resided
0393  * @handle: handle associated with the allocation returned by z3fold_alloc()
0394  *
0395  * In the case that the z3fold page in which the allocation resides is under
0396  * reclaim, as indicated by the PG_reclaim flag being set, this function
0397  * only sets the first|last_chunks to 0.  The page is actually freed
0398  * once both buddies are evicted (see z3fold_reclaim_page() below).
0399  */
0400 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
0401 {
0402     struct z3fold_header *zhdr;
0403     int freechunks;
0404     struct page *page;
0405     enum buddy bud;
0406 
0407     spin_lock(&pool->lock);
0408     zhdr = handle_to_z3fold_header(handle);
0409     page = virt_to_page(zhdr);
0410 
0411     if (test_bit(PAGE_HEADLESS, &page->private)) {
0412         /* HEADLESS page stored */
0413         bud = HEADLESS;
0414     } else {
0415         bud = handle_to_buddy(handle);
0416 
0417         switch (bud) {
0418         case FIRST:
0419             zhdr->first_chunks = 0;
0420             break;
0421         case MIDDLE:
0422             zhdr->middle_chunks = 0;
0423             zhdr->start_middle = 0;
0424             break;
0425         case LAST:
0426             zhdr->last_chunks = 0;
0427             break;
0428         default:
0429             pr_err("%s: unknown bud %d\n", __func__, bud);
0430             WARN_ON(1);
0431             spin_unlock(&pool->lock);
0432             return;
0433         }
0434     }
0435 
0436     if (test_bit(UNDER_RECLAIM, &page->private)) {
0437         /* z3fold page is under reclaim, reclaim will free */
0438         spin_unlock(&pool->lock);
0439         return;
0440     }
0441 
0442     if (bud != HEADLESS) {
0443         /* Remove from existing buddy list */
0444         list_del(&zhdr->buddy);
0445     }
0446 
0447     if (bud == HEADLESS ||
0448         (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 &&
0449             zhdr->last_chunks == 0)) {
0450         /* z3fold page is empty, free */
0451         list_del(&page->lru);
0452         clear_bit(PAGE_HEADLESS, &page->private);
0453         free_z3fold_page(zhdr);
0454         pool->pages_nr--;
0455     } else {
0456         z3fold_compact_page(zhdr);
0457         /* Add to the unbuddied list */
0458         freechunks = num_free_chunks(zhdr);
0459         list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
0460     }
0461 
0462     spin_unlock(&pool->lock);
0463 }
0464 
0465 /**
0466  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
0467  * @pool:   pool from which a page will attempt to be evicted
0468  * @retires:    number of pages on the LRU list for which eviction will
0469  *      be attempted before failing
0470  *
0471  * z3fold reclaim is different from normal system reclaim in that it is done
0472  * from the bottom, up. This is because only the bottom layer, z3fold, has
0473  * information on how the allocations are organized within each z3fold page.
0474  * This has the potential to create interesting locking situations between
0475  * z3fold and the user, however.
0476  *
0477  * To avoid these, this is how z3fold_reclaim_page() should be called:
0478 
0479  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
0480  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
0481  * call the user-defined eviction handler with the pool and handle as
0482  * arguments.
0483  *
0484  * If the handle can not be evicted, the eviction handler should return
0485  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
0486  * appropriate list and try the next z3fold page on the LRU up to
0487  * a user defined number of retries.
0488  *
0489  * If the handle is successfully evicted, the eviction handler should
0490  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
0491  * contains logic to delay freeing the page if the page is under reclaim,
0492  * as indicated by the setting of the PG_reclaim flag on the underlying page.
0493  *
0494  * If all buddies in the z3fold page are successfully evicted, then the
0495  * z3fold page can be freed.
0496  *
0497  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
0498  * no pages to evict or an eviction handler is not registered, -EAGAIN if
0499  * the retry limit was hit.
0500  */
0501 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
0502 {
0503     int i, ret = 0, freechunks;
0504     struct z3fold_header *zhdr;
0505     struct page *page;
0506     unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
0507 
0508     spin_lock(&pool->lock);
0509     if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
0510             retries == 0) {
0511         spin_unlock(&pool->lock);
0512         return -EINVAL;
0513     }
0514     for (i = 0; i < retries; i++) {
0515         page = list_last_entry(&pool->lru, struct page, lru);
0516         list_del(&page->lru);
0517 
0518         /* Protect z3fold page against free */
0519         set_bit(UNDER_RECLAIM, &page->private);
0520         zhdr = page_address(page);
0521         if (!test_bit(PAGE_HEADLESS, &page->private)) {
0522             list_del(&zhdr->buddy);
0523             /*
0524              * We need encode the handles before unlocking, since
0525              * we can race with free that will set
0526              * (first|last)_chunks to 0
0527              */
0528             first_handle = 0;
0529             last_handle = 0;
0530             middle_handle = 0;
0531             if (zhdr->first_chunks)
0532                 first_handle = encode_handle(zhdr, FIRST);
0533             if (zhdr->middle_chunks)
0534                 middle_handle = encode_handle(zhdr, MIDDLE);
0535             if (zhdr->last_chunks)
0536                 last_handle = encode_handle(zhdr, LAST);
0537         } else {
0538             first_handle = encode_handle(zhdr, HEADLESS);
0539             last_handle = middle_handle = 0;
0540         }
0541 
0542         spin_unlock(&pool->lock);
0543 
0544         /* Issue the eviction callback(s) */
0545         if (middle_handle) {
0546             ret = pool->ops->evict(pool, middle_handle);
0547             if (ret)
0548                 goto next;
0549         }
0550         if (first_handle) {
0551             ret = pool->ops->evict(pool, first_handle);
0552             if (ret)
0553                 goto next;
0554         }
0555         if (last_handle) {
0556             ret = pool->ops->evict(pool, last_handle);
0557             if (ret)
0558                 goto next;
0559         }
0560 next:
0561         spin_lock(&pool->lock);
0562         clear_bit(UNDER_RECLAIM, &page->private);
0563         if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
0564             (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
0565              zhdr->middle_chunks == 0)) {
0566             /*
0567              * All buddies are now free, free the z3fold page and
0568              * return success.
0569              */
0570             clear_bit(PAGE_HEADLESS, &page->private);
0571             free_z3fold_page(zhdr);
0572             pool->pages_nr--;
0573             spin_unlock(&pool->lock);
0574             return 0;
0575         }  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
0576             if (zhdr->first_chunks != 0 &&
0577                 zhdr->last_chunks != 0 &&
0578                 zhdr->middle_chunks != 0) {
0579                 /* Full, add to buddied list */
0580                 list_add(&zhdr->buddy, &pool->buddied);
0581             } else {
0582                 z3fold_compact_page(zhdr);
0583                 /* add to unbuddied list */
0584                 freechunks = num_free_chunks(zhdr);
0585                 list_add(&zhdr->buddy,
0586                      &pool->unbuddied[freechunks]);
0587             }
0588         }
0589 
0590         /* add to beginning of LRU */
0591         list_add(&page->lru, &pool->lru);
0592     }
0593     spin_unlock(&pool->lock);
0594     return -EAGAIN;
0595 }
0596 
0597 /**
0598  * z3fold_map() - maps the allocation associated with the given handle
0599  * @pool:   pool in which the allocation resides
0600  * @handle: handle associated with the allocation to be mapped
0601  *
0602  * Extracts the buddy number from handle and constructs the pointer to the
0603  * correct starting chunk within the page.
0604  *
0605  * Returns: a pointer to the mapped allocation
0606  */
0607 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
0608 {
0609     struct z3fold_header *zhdr;
0610     struct page *page;
0611     void *addr;
0612     enum buddy buddy;
0613 
0614     spin_lock(&pool->lock);
0615     zhdr = handle_to_z3fold_header(handle);
0616     addr = zhdr;
0617     page = virt_to_page(zhdr);
0618 
0619     if (test_bit(PAGE_HEADLESS, &page->private))
0620         goto out;
0621 
0622     buddy = handle_to_buddy(handle);
0623     switch (buddy) {
0624     case FIRST:
0625         addr += ZHDR_SIZE_ALIGNED;
0626         break;
0627     case MIDDLE:
0628         addr += zhdr->start_middle << CHUNK_SHIFT;
0629         set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
0630         break;
0631     case LAST:
0632         addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
0633         break;
0634     default:
0635         pr_err("unknown buddy id %d\n", buddy);
0636         WARN_ON(1);
0637         addr = NULL;
0638         break;
0639     }
0640 out:
0641     spin_unlock(&pool->lock);
0642     return addr;
0643 }
0644 
0645 /**
0646  * z3fold_unmap() - unmaps the allocation associated with the given handle
0647  * @pool:   pool in which the allocation resides
0648  * @handle: handle associated with the allocation to be unmapped
0649  */
0650 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
0651 {
0652     struct z3fold_header *zhdr;
0653     struct page *page;
0654     enum buddy buddy;
0655 
0656     spin_lock(&pool->lock);
0657     zhdr = handle_to_z3fold_header(handle);
0658     page = virt_to_page(zhdr);
0659 
0660     if (test_bit(PAGE_HEADLESS, &page->private)) {
0661         spin_unlock(&pool->lock);
0662         return;
0663     }
0664 
0665     buddy = handle_to_buddy(handle);
0666     if (buddy == MIDDLE)
0667         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
0668     spin_unlock(&pool->lock);
0669 }
0670 
0671 /**
0672  * z3fold_get_pool_size() - gets the z3fold pool size in pages
0673  * @pool:   pool whose size is being queried
0674  *
0675  * Returns: size in pages of the given pool.  The pool lock need not be
0676  * taken to access pages_nr.
0677  */
0678 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
0679 {
0680     return pool->pages_nr;
0681 }
0682 
0683 /*****************
0684  * zpool
0685  ****************/
0686 
0687 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
0688 {
0689     if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
0690         return pool->zpool_ops->evict(pool->zpool, handle);
0691     else
0692         return -ENOENT;
0693 }
0694 
0695 static const struct z3fold_ops z3fold_zpool_ops = {
0696     .evict =    z3fold_zpool_evict
0697 };
0698 
0699 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
0700                    const struct zpool_ops *zpool_ops,
0701                    struct zpool *zpool)
0702 {
0703     struct z3fold_pool *pool;
0704 
0705     pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
0706     if (pool) {
0707         pool->zpool = zpool;
0708         pool->zpool_ops = zpool_ops;
0709     }
0710     return pool;
0711 }
0712 
0713 static void z3fold_zpool_destroy(void *pool)
0714 {
0715     z3fold_destroy_pool(pool);
0716 }
0717 
0718 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
0719             unsigned long *handle)
0720 {
0721     return z3fold_alloc(pool, size, gfp, handle);
0722 }
0723 static void z3fold_zpool_free(void *pool, unsigned long handle)
0724 {
0725     z3fold_free(pool, handle);
0726 }
0727 
0728 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
0729             unsigned int *reclaimed)
0730 {
0731     unsigned int total = 0;
0732     int ret = -EINVAL;
0733 
0734     while (total < pages) {
0735         ret = z3fold_reclaim_page(pool, 8);
0736         if (ret < 0)
0737             break;
0738         total++;
0739     }
0740 
0741     if (reclaimed)
0742         *reclaimed = total;
0743 
0744     return ret;
0745 }
0746 
0747 static void *z3fold_zpool_map(void *pool, unsigned long handle,
0748             enum zpool_mapmode mm)
0749 {
0750     return z3fold_map(pool, handle);
0751 }
0752 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
0753 {
0754     z3fold_unmap(pool, handle);
0755 }
0756 
0757 static u64 z3fold_zpool_total_size(void *pool)
0758 {
0759     return z3fold_get_pool_size(pool) * PAGE_SIZE;
0760 }
0761 
0762 static struct zpool_driver z3fold_zpool_driver = {
0763     .type =     "z3fold",
0764     .owner =    THIS_MODULE,
0765     .create =   z3fold_zpool_create,
0766     .destroy =  z3fold_zpool_destroy,
0767     .malloc =   z3fold_zpool_malloc,
0768     .free =     z3fold_zpool_free,
0769     .shrink =   z3fold_zpool_shrink,
0770     .map =      z3fold_zpool_map,
0771     .unmap =    z3fold_zpool_unmap,
0772     .total_size =   z3fold_zpool_total_size,
0773 };
0774 
0775 MODULE_ALIAS("zpool-z3fold");
0776 
0777 static int __init init_z3fold(void)
0778 {
0779     /* Make sure the z3fold header will fit in one chunk */
0780     BUILD_BUG_ON(sizeof(struct z3fold_header) > ZHDR_SIZE_ALIGNED);
0781     zpool_register_driver(&z3fold_zpool_driver);
0782 
0783     return 0;
0784 }
0785 
0786 static void __exit exit_z3fold(void)
0787 {
0788     zpool_unregister_driver(&z3fold_zpool_driver);
0789 }
0790 
0791 module_init(init_z3fold);
0792 module_exit(exit_z3fold);
0793 
0794 MODULE_LICENSE("GPL");
0795 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
0796 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");