Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * mm/percpu.c - percpu memory allocator
0004  *
0005  * Copyright (C) 2009       SUSE Linux Products GmbH
0006  * Copyright (C) 2009       Tejun Heo <tj@kernel.org>
0007  *
0008  * Copyright (C) 2017       Facebook Inc.
0009  * Copyright (C) 2017       Dennis Zhou <dennis@kernel.org>
0010  *
0011  * The percpu allocator handles both static and dynamic areas.  Percpu
0012  * areas are allocated in chunks which are divided into units.  There is
0013  * a 1-to-1 mapping for units to possible cpus.  These units are grouped
0014  * based on NUMA properties of the machine.
0015  *
0016  *  c0                           c1                         c2
0017  *  -------------------          -------------------        ------------
0018  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
0019  *  -------------------  ......  -------------------  ....  ------------
0020  *
0021  * Allocation is done by offsets into a unit's address space.  Ie., an
0022  * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
0023  * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
0024  * and even sparse.  Access is handled by configuring percpu base
0025  * registers according to the cpu to unit mappings and offsetting the
0026  * base address using pcpu_unit_size.
0027  *
0028  * There is special consideration for the first chunk which must handle
0029  * the static percpu variables in the kernel image as allocation services
0030  * are not online yet.  In short, the first chunk is structured like so:
0031  *
0032  *                  <Static | [Reserved] | Dynamic>
0033  *
0034  * The static data is copied from the original section managed by the
0035  * linker.  The reserved section, if non-zero, primarily manages static
0036  * percpu variables from kernel modules.  Finally, the dynamic section
0037  * takes care of normal allocations.
0038  *
0039  * The allocator organizes chunks into lists according to free size and
0040  * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
0041  * flag should be passed.  All memcg-aware allocations are sharing one set
0042  * of chunks and all unaccounted allocations and allocations performed
0043  * by processes belonging to the root memory cgroup are using the second set.
0044  *
0045  * The allocator tries to allocate from the fullest chunk first. Each chunk
0046  * is managed by a bitmap with metadata blocks.  The allocation map is updated
0047  * on every allocation and free to reflect the current state while the boundary
0048  * map is only updated on allocation.  Each metadata block contains
0049  * information to help mitigate the need to iterate over large portions
0050  * of the bitmap.  The reverse mapping from page to chunk is stored in
0051  * the page's index.  Lastly, units are lazily backed and grow in unison.
0052  *
0053  * There is a unique conversion that goes on here between bytes and bits.
0054  * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
0055  * tracks the number of pages it is responsible for in nr_pages.  Helper
0056  * functions are used to convert from between the bytes, bits, and blocks.
0057  * All hints are managed in bits unless explicitly stated.
0058  *
0059  * To use this allocator, arch code should do the following:
0060  *
0061  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
0062  *   regular address to percpu pointer and back if they need to be
0063  *   different from the default
0064  *
0065  * - use pcpu_setup_first_chunk() during percpu area initialization to
0066  *   setup the first chunk containing the kernel static percpu area
0067  */
0068 
0069 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0070 
0071 #include <linux/bitmap.h>
0072 #include <linux/cpumask.h>
0073 #include <linux/memblock.h>
0074 #include <linux/err.h>
0075 #include <linux/lcm.h>
0076 #include <linux/list.h>
0077 #include <linux/log2.h>
0078 #include <linux/mm.h>
0079 #include <linux/module.h>
0080 #include <linux/mutex.h>
0081 #include <linux/percpu.h>
0082 #include <linux/pfn.h>
0083 #include <linux/slab.h>
0084 #include <linux/spinlock.h>
0085 #include <linux/vmalloc.h>
0086 #include <linux/workqueue.h>
0087 #include <linux/kmemleak.h>
0088 #include <linux/sched.h>
0089 #include <linux/sched/mm.h>
0090 #include <linux/memcontrol.h>
0091 
0092 #include <asm/cacheflush.h>
0093 #include <asm/sections.h>
0094 #include <asm/tlbflush.h>
0095 #include <asm/io.h>
0096 
0097 #define CREATE_TRACE_POINTS
0098 #include <trace/events/percpu.h>
0099 
0100 #include "percpu-internal.h"
0101 
0102 /*
0103  * The slots are sorted by the size of the biggest continuous free area.
0104  * 1-31 bytes share the same slot.
0105  */
0106 #define PCPU_SLOT_BASE_SHIFT        5
0107 /* chunks in slots below this are subject to being sidelined on failed alloc */
0108 #define PCPU_SLOT_FAIL_THRESHOLD    3
0109 
0110 #define PCPU_EMPTY_POP_PAGES_LOW    2
0111 #define PCPU_EMPTY_POP_PAGES_HIGH   4
0112 
0113 #ifdef CONFIG_SMP
0114 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
0115 #ifndef __addr_to_pcpu_ptr
0116 #define __addr_to_pcpu_ptr(addr)                    \
0117     (void __percpu *)((unsigned long)(addr) -           \
0118               (unsigned long)pcpu_base_addr +       \
0119               (unsigned long)__per_cpu_start)
0120 #endif
0121 #ifndef __pcpu_ptr_to_addr
0122 #define __pcpu_ptr_to_addr(ptr)                     \
0123     (void __force *)((unsigned long)(ptr) +             \
0124              (unsigned long)pcpu_base_addr -        \
0125              (unsigned long)__per_cpu_start)
0126 #endif
0127 #else   /* CONFIG_SMP */
0128 /* on UP, it's always identity mapped */
0129 #define __addr_to_pcpu_ptr(addr)    (void __percpu *)(addr)
0130 #define __pcpu_ptr_to_addr(ptr)     (void __force *)(ptr)
0131 #endif  /* CONFIG_SMP */
0132 
0133 static int pcpu_unit_pages __ro_after_init;
0134 static int pcpu_unit_size __ro_after_init;
0135 static int pcpu_nr_units __ro_after_init;
0136 static int pcpu_atom_size __ro_after_init;
0137 int pcpu_nr_slots __ro_after_init;
0138 static int pcpu_free_slot __ro_after_init;
0139 int pcpu_sidelined_slot __ro_after_init;
0140 int pcpu_to_depopulate_slot __ro_after_init;
0141 static size_t pcpu_chunk_struct_size __ro_after_init;
0142 
0143 /* cpus with the lowest and highest unit addresses */
0144 static unsigned int pcpu_low_unit_cpu __ro_after_init;
0145 static unsigned int pcpu_high_unit_cpu __ro_after_init;
0146 
0147 /* the address of the first chunk which starts with the kernel static area */
0148 void *pcpu_base_addr __ro_after_init;
0149 
0150 static const int *pcpu_unit_map __ro_after_init;        /* cpu -> unit */
0151 const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
0152 
0153 /* group information, used for vm allocation */
0154 static int pcpu_nr_groups __ro_after_init;
0155 static const unsigned long *pcpu_group_offsets __ro_after_init;
0156 static const size_t *pcpu_group_sizes __ro_after_init;
0157 
0158 /*
0159  * The first chunk which always exists.  Note that unlike other
0160  * chunks, this one can be allocated and mapped in several different
0161  * ways and thus often doesn't live in the vmalloc area.
0162  */
0163 struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
0164 
0165 /*
0166  * Optional reserved chunk.  This chunk reserves part of the first
0167  * chunk and serves it for reserved allocations.  When the reserved
0168  * region doesn't exist, the following variable is NULL.
0169  */
0170 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
0171 
0172 DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
0173 static DEFINE_MUTEX(pcpu_alloc_mutex);  /* chunk create/destroy, [de]pop, map ext */
0174 
0175 struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
0176 
0177 /* chunks which need their map areas extended, protected by pcpu_lock */
0178 static LIST_HEAD(pcpu_map_extend_chunks);
0179 
0180 /*
0181  * The number of empty populated pages, protected by pcpu_lock.
0182  * The reserved chunk doesn't contribute to the count.
0183  */
0184 int pcpu_nr_empty_pop_pages;
0185 
0186 /*
0187  * The number of populated pages in use by the allocator, protected by
0188  * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
0189  * allocated/deallocated, it is allocated/deallocated in all units of a chunk
0190  * and increments/decrements this count by 1).
0191  */
0192 static unsigned long pcpu_nr_populated;
0193 
0194 /*
0195  * Balance work is used to populate or destroy chunks asynchronously.  We
0196  * try to keep the number of populated free pages between
0197  * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
0198  * empty chunk.
0199  */
0200 static void pcpu_balance_workfn(struct work_struct *work);
0201 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
0202 static bool pcpu_async_enabled __read_mostly;
0203 static bool pcpu_atomic_alloc_failed;
0204 
0205 static void pcpu_schedule_balance_work(void)
0206 {
0207     if (pcpu_async_enabled)
0208         schedule_work(&pcpu_balance_work);
0209 }
0210 
0211 /**
0212  * pcpu_addr_in_chunk - check if the address is served from this chunk
0213  * @chunk: chunk of interest
0214  * @addr: percpu address
0215  *
0216  * RETURNS:
0217  * True if the address is served from this chunk.
0218  */
0219 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
0220 {
0221     void *start_addr, *end_addr;
0222 
0223     if (!chunk)
0224         return false;
0225 
0226     start_addr = chunk->base_addr + chunk->start_offset;
0227     end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
0228            chunk->end_offset;
0229 
0230     return addr >= start_addr && addr < end_addr;
0231 }
0232 
0233 static int __pcpu_size_to_slot(int size)
0234 {
0235     int highbit = fls(size);    /* size is in bytes */
0236     return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
0237 }
0238 
0239 static int pcpu_size_to_slot(int size)
0240 {
0241     if (size == pcpu_unit_size)
0242         return pcpu_free_slot;
0243     return __pcpu_size_to_slot(size);
0244 }
0245 
0246 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
0247 {
0248     const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
0249 
0250     if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
0251         chunk_md->contig_hint == 0)
0252         return 0;
0253 
0254     return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
0255 }
0256 
0257 /* set the pointer to a chunk in a page struct */
0258 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
0259 {
0260     page->index = (unsigned long)pcpu;
0261 }
0262 
0263 /* obtain pointer to a chunk from a page struct */
0264 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
0265 {
0266     return (struct pcpu_chunk *)page->index;
0267 }
0268 
0269 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
0270 {
0271     return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
0272 }
0273 
0274 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
0275 {
0276     return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
0277 }
0278 
0279 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
0280                      unsigned int cpu, int page_idx)
0281 {
0282     return (unsigned long)chunk->base_addr +
0283            pcpu_unit_page_offset(cpu, page_idx);
0284 }
0285 
0286 /*
0287  * The following are helper functions to help access bitmaps and convert
0288  * between bitmap offsets to address offsets.
0289  */
0290 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
0291 {
0292     return chunk->alloc_map +
0293            (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
0294 }
0295 
0296 static unsigned long pcpu_off_to_block_index(int off)
0297 {
0298     return off / PCPU_BITMAP_BLOCK_BITS;
0299 }
0300 
0301 static unsigned long pcpu_off_to_block_off(int off)
0302 {
0303     return off & (PCPU_BITMAP_BLOCK_BITS - 1);
0304 }
0305 
0306 static unsigned long pcpu_block_off_to_off(int index, int off)
0307 {
0308     return index * PCPU_BITMAP_BLOCK_BITS + off;
0309 }
0310 
0311 /**
0312  * pcpu_check_block_hint - check against the contig hint
0313  * @block: block of interest
0314  * @bits: size of allocation
0315  * @align: alignment of area (max PAGE_SIZE)
0316  *
0317  * Check to see if the allocation can fit in the block's contig hint.
0318  * Note, a chunk uses the same hints as a block so this can also check against
0319  * the chunk's contig hint.
0320  */
0321 static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
0322                   size_t align)
0323 {
0324     int bit_off = ALIGN(block->contig_hint_start, align) -
0325         block->contig_hint_start;
0326 
0327     return bit_off + bits <= block->contig_hint;
0328 }
0329 
0330 /*
0331  * pcpu_next_hint - determine which hint to use
0332  * @block: block of interest
0333  * @alloc_bits: size of allocation
0334  *
0335  * This determines if we should scan based on the scan_hint or first_free.
0336  * In general, we want to scan from first_free to fulfill allocations by
0337  * first fit.  However, if we know a scan_hint at position scan_hint_start
0338  * cannot fulfill an allocation, we can begin scanning from there knowing
0339  * the contig_hint will be our fallback.
0340  */
0341 static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
0342 {
0343     /*
0344      * The three conditions below determine if we can skip past the
0345      * scan_hint.  First, does the scan hint exist.  Second, is the
0346      * contig_hint after the scan_hint (possibly not true iff
0347      * contig_hint == scan_hint).  Third, is the allocation request
0348      * larger than the scan_hint.
0349      */
0350     if (block->scan_hint &&
0351         block->contig_hint_start > block->scan_hint_start &&
0352         alloc_bits > block->scan_hint)
0353         return block->scan_hint_start + block->scan_hint;
0354 
0355     return block->first_free;
0356 }
0357 
0358 /**
0359  * pcpu_next_md_free_region - finds the next hint free area
0360  * @chunk: chunk of interest
0361  * @bit_off: chunk offset
0362  * @bits: size of free area
0363  *
0364  * Helper function for pcpu_for_each_md_free_region.  It checks
0365  * block->contig_hint and performs aggregation across blocks to find the
0366  * next hint.  It modifies bit_off and bits in-place to be consumed in the
0367  * loop.
0368  */
0369 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
0370                      int *bits)
0371 {
0372     int i = pcpu_off_to_block_index(*bit_off);
0373     int block_off = pcpu_off_to_block_off(*bit_off);
0374     struct pcpu_block_md *block;
0375 
0376     *bits = 0;
0377     for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
0378          block++, i++) {
0379         /* handles contig area across blocks */
0380         if (*bits) {
0381             *bits += block->left_free;
0382             if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
0383                 continue;
0384             return;
0385         }
0386 
0387         /*
0388          * This checks three things.  First is there a contig_hint to
0389          * check.  Second, have we checked this hint before by
0390          * comparing the block_off.  Third, is this the same as the
0391          * right contig hint.  In the last case, it spills over into
0392          * the next block and should be handled by the contig area
0393          * across blocks code.
0394          */
0395         *bits = block->contig_hint;
0396         if (*bits && block->contig_hint_start >= block_off &&
0397             *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
0398             *bit_off = pcpu_block_off_to_off(i,
0399                     block->contig_hint_start);
0400             return;
0401         }
0402         /* reset to satisfy the second predicate above */
0403         block_off = 0;
0404 
0405         *bits = block->right_free;
0406         *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
0407     }
0408 }
0409 
0410 /**
0411  * pcpu_next_fit_region - finds fit areas for a given allocation request
0412  * @chunk: chunk of interest
0413  * @alloc_bits: size of allocation
0414  * @align: alignment of area (max PAGE_SIZE)
0415  * @bit_off: chunk offset
0416  * @bits: size of free area
0417  *
0418  * Finds the next free region that is viable for use with a given size and
0419  * alignment.  This only returns if there is a valid area to be used for this
0420  * allocation.  block->first_free is returned if the allocation request fits
0421  * within the block to see if the request can be fulfilled prior to the contig
0422  * hint.
0423  */
0424 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
0425                  int align, int *bit_off, int *bits)
0426 {
0427     int i = pcpu_off_to_block_index(*bit_off);
0428     int block_off = pcpu_off_to_block_off(*bit_off);
0429     struct pcpu_block_md *block;
0430 
0431     *bits = 0;
0432     for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
0433          block++, i++) {
0434         /* handles contig area across blocks */
0435         if (*bits) {
0436             *bits += block->left_free;
0437             if (*bits >= alloc_bits)
0438                 return;
0439             if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
0440                 continue;
0441         }
0442 
0443         /* check block->contig_hint */
0444         *bits = ALIGN(block->contig_hint_start, align) -
0445             block->contig_hint_start;
0446         /*
0447          * This uses the block offset to determine if this has been
0448          * checked in the prior iteration.
0449          */
0450         if (block->contig_hint &&
0451             block->contig_hint_start >= block_off &&
0452             block->contig_hint >= *bits + alloc_bits) {
0453             int start = pcpu_next_hint(block, alloc_bits);
0454 
0455             *bits += alloc_bits + block->contig_hint_start -
0456                  start;
0457             *bit_off = pcpu_block_off_to_off(i, start);
0458             return;
0459         }
0460         /* reset to satisfy the second predicate above */
0461         block_off = 0;
0462 
0463         *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
0464                  align);
0465         *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
0466         *bit_off = pcpu_block_off_to_off(i, *bit_off);
0467         if (*bits >= alloc_bits)
0468             return;
0469     }
0470 
0471     /* no valid offsets were found - fail condition */
0472     *bit_off = pcpu_chunk_map_bits(chunk);
0473 }
0474 
0475 /*
0476  * Metadata free area iterators.  These perform aggregation of free areas
0477  * based on the metadata blocks and return the offset @bit_off and size in
0478  * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
0479  * a fit is found for the allocation request.
0480  */
0481 #define pcpu_for_each_md_free_region(chunk, bit_off, bits)      \
0482     for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));    \
0483          (bit_off) < pcpu_chunk_map_bits((chunk));          \
0484          (bit_off) += (bits) + 1,                   \
0485          pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
0486 
0487 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
0488     for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
0489                   &(bits));                   \
0490          (bit_off) < pcpu_chunk_map_bits((chunk));                \
0491          (bit_off) += (bits),                         \
0492          pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
0493                   &(bits)))
0494 
0495 /**
0496  * pcpu_mem_zalloc - allocate memory
0497  * @size: bytes to allocate
0498  * @gfp: allocation flags
0499  *
0500  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
0501  * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
0502  * This is to facilitate passing through whitelisted flags.  The
0503  * returned memory is always zeroed.
0504  *
0505  * RETURNS:
0506  * Pointer to the allocated area on success, NULL on failure.
0507  */
0508 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
0509 {
0510     if (WARN_ON_ONCE(!slab_is_available()))
0511         return NULL;
0512 
0513     if (size <= PAGE_SIZE)
0514         return kzalloc(size, gfp);
0515     else
0516         return __vmalloc(size, gfp | __GFP_ZERO);
0517 }
0518 
0519 /**
0520  * pcpu_mem_free - free memory
0521  * @ptr: memory to free
0522  *
0523  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
0524  */
0525 static void pcpu_mem_free(void *ptr)
0526 {
0527     kvfree(ptr);
0528 }
0529 
0530 static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
0531                   bool move_front)
0532 {
0533     if (chunk != pcpu_reserved_chunk) {
0534         if (move_front)
0535             list_move(&chunk->list, &pcpu_chunk_lists[slot]);
0536         else
0537             list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
0538     }
0539 }
0540 
0541 static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
0542 {
0543     __pcpu_chunk_move(chunk, slot, true);
0544 }
0545 
0546 /**
0547  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
0548  * @chunk: chunk of interest
0549  * @oslot: the previous slot it was on
0550  *
0551  * This function is called after an allocation or free changed @chunk.
0552  * New slot according to the changed state is determined and @chunk is
0553  * moved to the slot.  Note that the reserved chunk is never put on
0554  * chunk slots.
0555  *
0556  * CONTEXT:
0557  * pcpu_lock.
0558  */
0559 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
0560 {
0561     int nslot = pcpu_chunk_slot(chunk);
0562 
0563     /* leave isolated chunks in-place */
0564     if (chunk->isolated)
0565         return;
0566 
0567     if (oslot != nslot)
0568         __pcpu_chunk_move(chunk, nslot, oslot < nslot);
0569 }
0570 
0571 static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
0572 {
0573     lockdep_assert_held(&pcpu_lock);
0574 
0575     if (!chunk->isolated) {
0576         chunk->isolated = true;
0577         pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
0578     }
0579     list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
0580 }
0581 
0582 static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
0583 {
0584     lockdep_assert_held(&pcpu_lock);
0585 
0586     if (chunk->isolated) {
0587         chunk->isolated = false;
0588         pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
0589         pcpu_chunk_relocate(chunk, -1);
0590     }
0591 }
0592 
0593 /*
0594  * pcpu_update_empty_pages - update empty page counters
0595  * @chunk: chunk of interest
0596  * @nr: nr of empty pages
0597  *
0598  * This is used to keep track of the empty pages now based on the premise
0599  * a md_block covers a page.  The hint update functions recognize if a block
0600  * is made full or broken to calculate deltas for keeping track of free pages.
0601  */
0602 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
0603 {
0604     chunk->nr_empty_pop_pages += nr;
0605     if (chunk != pcpu_reserved_chunk && !chunk->isolated)
0606         pcpu_nr_empty_pop_pages += nr;
0607 }
0608 
0609 /*
0610  * pcpu_region_overlap - determines if two regions overlap
0611  * @a: start of first region, inclusive
0612  * @b: end of first region, exclusive
0613  * @x: start of second region, inclusive
0614  * @y: end of second region, exclusive
0615  *
0616  * This is used to determine if the hint region [a, b) overlaps with the
0617  * allocated region [x, y).
0618  */
0619 static inline bool pcpu_region_overlap(int a, int b, int x, int y)
0620 {
0621     return (a < y) && (x < b);
0622 }
0623 
0624 /**
0625  * pcpu_block_update - updates a block given a free area
0626  * @block: block of interest
0627  * @start: start offset in block
0628  * @end: end offset in block
0629  *
0630  * Updates a block given a known free area.  The region [start, end) is
0631  * expected to be the entirety of the free area within a block.  Chooses
0632  * the best starting offset if the contig hints are equal.
0633  */
0634 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
0635 {
0636     int contig = end - start;
0637 
0638     block->first_free = min(block->first_free, start);
0639     if (start == 0)
0640         block->left_free = contig;
0641 
0642     if (end == block->nr_bits)
0643         block->right_free = contig;
0644 
0645     if (contig > block->contig_hint) {
0646         /* promote the old contig_hint to be the new scan_hint */
0647         if (start > block->contig_hint_start) {
0648             if (block->contig_hint > block->scan_hint) {
0649                 block->scan_hint_start =
0650                     block->contig_hint_start;
0651                 block->scan_hint = block->contig_hint;
0652             } else if (start < block->scan_hint_start) {
0653                 /*
0654                  * The old contig_hint == scan_hint.  But, the
0655                  * new contig is larger so hold the invariant
0656                  * scan_hint_start < contig_hint_start.
0657                  */
0658                 block->scan_hint = 0;
0659             }
0660         } else {
0661             block->scan_hint = 0;
0662         }
0663         block->contig_hint_start = start;
0664         block->contig_hint = contig;
0665     } else if (contig == block->contig_hint) {
0666         if (block->contig_hint_start &&
0667             (!start ||
0668              __ffs(start) > __ffs(block->contig_hint_start))) {
0669             /* start has a better alignment so use it */
0670             block->contig_hint_start = start;
0671             if (start < block->scan_hint_start &&
0672                 block->contig_hint > block->scan_hint)
0673                 block->scan_hint = 0;
0674         } else if (start > block->scan_hint_start ||
0675                block->contig_hint > block->scan_hint) {
0676             /*
0677              * Knowing contig == contig_hint, update the scan_hint
0678              * if it is farther than or larger than the current
0679              * scan_hint.
0680              */
0681             block->scan_hint_start = start;
0682             block->scan_hint = contig;
0683         }
0684     } else {
0685         /*
0686          * The region is smaller than the contig_hint.  So only update
0687          * the scan_hint if it is larger than or equal and farther than
0688          * the current scan_hint.
0689          */
0690         if ((start < block->contig_hint_start &&
0691              (contig > block->scan_hint ||
0692               (contig == block->scan_hint &&
0693                start > block->scan_hint_start)))) {
0694             block->scan_hint_start = start;
0695             block->scan_hint = contig;
0696         }
0697     }
0698 }
0699 
0700 /*
0701  * pcpu_block_update_scan - update a block given a free area from a scan
0702  * @chunk: chunk of interest
0703  * @bit_off: chunk offset
0704  * @bits: size of free area
0705  *
0706  * Finding the final allocation spot first goes through pcpu_find_block_fit()
0707  * to find a block that can hold the allocation and then pcpu_alloc_area()
0708  * where a scan is used.  When allocations require specific alignments,
0709  * we can inadvertently create holes which will not be seen in the alloc
0710  * or free paths.
0711  *
0712  * This takes a given free area hole and updates a block as it may change the
0713  * scan_hint.  We need to scan backwards to ensure we don't miss free bits
0714  * from alignment.
0715  */
0716 static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
0717                    int bits)
0718 {
0719     int s_off = pcpu_off_to_block_off(bit_off);
0720     int e_off = s_off + bits;
0721     int s_index, l_bit;
0722     struct pcpu_block_md *block;
0723 
0724     if (e_off > PCPU_BITMAP_BLOCK_BITS)
0725         return;
0726 
0727     s_index = pcpu_off_to_block_index(bit_off);
0728     block = chunk->md_blocks + s_index;
0729 
0730     /* scan backwards in case of alignment skipping free bits */
0731     l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
0732     s_off = (s_off == l_bit) ? 0 : l_bit + 1;
0733 
0734     pcpu_block_update(block, s_off, e_off);
0735 }
0736 
0737 /**
0738  * pcpu_chunk_refresh_hint - updates metadata about a chunk
0739  * @chunk: chunk of interest
0740  * @full_scan: if we should scan from the beginning
0741  *
0742  * Iterates over the metadata blocks to find the largest contig area.
0743  * A full scan can be avoided on the allocation path as this is triggered
0744  * if we broke the contig_hint.  In doing so, the scan_hint will be before
0745  * the contig_hint or after if the scan_hint == contig_hint.  This cannot
0746  * be prevented on freeing as we want to find the largest area possibly
0747  * spanning blocks.
0748  */
0749 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
0750 {
0751     struct pcpu_block_md *chunk_md = &chunk->chunk_md;
0752     int bit_off, bits;
0753 
0754     /* promote scan_hint to contig_hint */
0755     if (!full_scan && chunk_md->scan_hint) {
0756         bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
0757         chunk_md->contig_hint_start = chunk_md->scan_hint_start;
0758         chunk_md->contig_hint = chunk_md->scan_hint;
0759         chunk_md->scan_hint = 0;
0760     } else {
0761         bit_off = chunk_md->first_free;
0762         chunk_md->contig_hint = 0;
0763     }
0764 
0765     bits = 0;
0766     pcpu_for_each_md_free_region(chunk, bit_off, bits)
0767         pcpu_block_update(chunk_md, bit_off, bit_off + bits);
0768 }
0769 
0770 /**
0771  * pcpu_block_refresh_hint
0772  * @chunk: chunk of interest
0773  * @index: index of the metadata block
0774  *
0775  * Scans over the block beginning at first_free and updates the block
0776  * metadata accordingly.
0777  */
0778 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
0779 {
0780     struct pcpu_block_md *block = chunk->md_blocks + index;
0781     unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
0782     unsigned int start, end;    /* region start, region end */
0783 
0784     /* promote scan_hint to contig_hint */
0785     if (block->scan_hint) {
0786         start = block->scan_hint_start + block->scan_hint;
0787         block->contig_hint_start = block->scan_hint_start;
0788         block->contig_hint = block->scan_hint;
0789         block->scan_hint = 0;
0790     } else {
0791         start = block->first_free;
0792         block->contig_hint = 0;
0793     }
0794 
0795     block->right_free = 0;
0796 
0797     /* iterate over free areas and update the contig hints */
0798     for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
0799         pcpu_block_update(block, start, end);
0800 }
0801 
0802 /**
0803  * pcpu_block_update_hint_alloc - update hint on allocation path
0804  * @chunk: chunk of interest
0805  * @bit_off: chunk offset
0806  * @bits: size of request
0807  *
0808  * Updates metadata for the allocation path.  The metadata only has to be
0809  * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
0810  * scans are required if the block's contig hint is broken.
0811  */
0812 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
0813                      int bits)
0814 {
0815     struct pcpu_block_md *chunk_md = &chunk->chunk_md;
0816     int nr_empty_pages = 0;
0817     struct pcpu_block_md *s_block, *e_block, *block;
0818     int s_index, e_index;   /* block indexes of the freed allocation */
0819     int s_off, e_off;   /* block offsets of the freed allocation */
0820 
0821     /*
0822      * Calculate per block offsets.
0823      * The calculation uses an inclusive range, but the resulting offsets
0824      * are [start, end).  e_index always points to the last block in the
0825      * range.
0826      */
0827     s_index = pcpu_off_to_block_index(bit_off);
0828     e_index = pcpu_off_to_block_index(bit_off + bits - 1);
0829     s_off = pcpu_off_to_block_off(bit_off);
0830     e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
0831 
0832     s_block = chunk->md_blocks + s_index;
0833     e_block = chunk->md_blocks + e_index;
0834 
0835     /*
0836      * Update s_block.
0837      * block->first_free must be updated if the allocation takes its place.
0838      * If the allocation breaks the contig_hint, a scan is required to
0839      * restore this hint.
0840      */
0841     if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
0842         nr_empty_pages++;
0843 
0844     if (s_off == s_block->first_free)
0845         s_block->first_free = find_next_zero_bit(
0846                     pcpu_index_alloc_map(chunk, s_index),
0847                     PCPU_BITMAP_BLOCK_BITS,
0848                     s_off + bits);
0849 
0850     if (pcpu_region_overlap(s_block->scan_hint_start,
0851                 s_block->scan_hint_start + s_block->scan_hint,
0852                 s_off,
0853                 s_off + bits))
0854         s_block->scan_hint = 0;
0855 
0856     if (pcpu_region_overlap(s_block->contig_hint_start,
0857                 s_block->contig_hint_start +
0858                 s_block->contig_hint,
0859                 s_off,
0860                 s_off + bits)) {
0861         /* block contig hint is broken - scan to fix it */
0862         if (!s_off)
0863             s_block->left_free = 0;
0864         pcpu_block_refresh_hint(chunk, s_index);
0865     } else {
0866         /* update left and right contig manually */
0867         s_block->left_free = min(s_block->left_free, s_off);
0868         if (s_index == e_index)
0869             s_block->right_free = min_t(int, s_block->right_free,
0870                     PCPU_BITMAP_BLOCK_BITS - e_off);
0871         else
0872             s_block->right_free = 0;
0873     }
0874 
0875     /*
0876      * Update e_block.
0877      */
0878     if (s_index != e_index) {
0879         if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
0880             nr_empty_pages++;
0881 
0882         /*
0883          * When the allocation is across blocks, the end is along
0884          * the left part of the e_block.
0885          */
0886         e_block->first_free = find_next_zero_bit(
0887                 pcpu_index_alloc_map(chunk, e_index),
0888                 PCPU_BITMAP_BLOCK_BITS, e_off);
0889 
0890         if (e_off == PCPU_BITMAP_BLOCK_BITS) {
0891             /* reset the block */
0892             e_block++;
0893         } else {
0894             if (e_off > e_block->scan_hint_start)
0895                 e_block->scan_hint = 0;
0896 
0897             e_block->left_free = 0;
0898             if (e_off > e_block->contig_hint_start) {
0899                 /* contig hint is broken - scan to fix it */
0900                 pcpu_block_refresh_hint(chunk, e_index);
0901             } else {
0902                 e_block->right_free =
0903                     min_t(int, e_block->right_free,
0904                           PCPU_BITMAP_BLOCK_BITS - e_off);
0905             }
0906         }
0907 
0908         /* update in-between md_blocks */
0909         nr_empty_pages += (e_index - s_index - 1);
0910         for (block = s_block + 1; block < e_block; block++) {
0911             block->scan_hint = 0;
0912             block->contig_hint = 0;
0913             block->left_free = 0;
0914             block->right_free = 0;
0915         }
0916     }
0917 
0918     if (nr_empty_pages)
0919         pcpu_update_empty_pages(chunk, -nr_empty_pages);
0920 
0921     if (pcpu_region_overlap(chunk_md->scan_hint_start,
0922                 chunk_md->scan_hint_start +
0923                 chunk_md->scan_hint,
0924                 bit_off,
0925                 bit_off + bits))
0926         chunk_md->scan_hint = 0;
0927 
0928     /*
0929      * The only time a full chunk scan is required is if the chunk
0930      * contig hint is broken.  Otherwise, it means a smaller space
0931      * was used and therefore the chunk contig hint is still correct.
0932      */
0933     if (pcpu_region_overlap(chunk_md->contig_hint_start,
0934                 chunk_md->contig_hint_start +
0935                 chunk_md->contig_hint,
0936                 bit_off,
0937                 bit_off + bits))
0938         pcpu_chunk_refresh_hint(chunk, false);
0939 }
0940 
0941 /**
0942  * pcpu_block_update_hint_free - updates the block hints on the free path
0943  * @chunk: chunk of interest
0944  * @bit_off: chunk offset
0945  * @bits: size of request
0946  *
0947  * Updates metadata for the allocation path.  This avoids a blind block
0948  * refresh by making use of the block contig hints.  If this fails, it scans
0949  * forward and backward to determine the extent of the free area.  This is
0950  * capped at the boundary of blocks.
0951  *
0952  * A chunk update is triggered if a page becomes free, a block becomes free,
0953  * or the free spans across blocks.  This tradeoff is to minimize iterating
0954  * over the block metadata to update chunk_md->contig_hint.
0955  * chunk_md->contig_hint may be off by up to a page, but it will never be more
0956  * than the available space.  If the contig hint is contained in one block, it
0957  * will be accurate.
0958  */
0959 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
0960                     int bits)
0961 {
0962     int nr_empty_pages = 0;
0963     struct pcpu_block_md *s_block, *e_block, *block;
0964     int s_index, e_index;   /* block indexes of the freed allocation */
0965     int s_off, e_off;   /* block offsets of the freed allocation */
0966     int start, end;     /* start and end of the whole free area */
0967 
0968     /*
0969      * Calculate per block offsets.
0970      * The calculation uses an inclusive range, but the resulting offsets
0971      * are [start, end).  e_index always points to the last block in the
0972      * range.
0973      */
0974     s_index = pcpu_off_to_block_index(bit_off);
0975     e_index = pcpu_off_to_block_index(bit_off + bits - 1);
0976     s_off = pcpu_off_to_block_off(bit_off);
0977     e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
0978 
0979     s_block = chunk->md_blocks + s_index;
0980     e_block = chunk->md_blocks + e_index;
0981 
0982     /*
0983      * Check if the freed area aligns with the block->contig_hint.
0984      * If it does, then the scan to find the beginning/end of the
0985      * larger free area can be avoided.
0986      *
0987      * start and end refer to beginning and end of the free area
0988      * within each their respective blocks.  This is not necessarily
0989      * the entire free area as it may span blocks past the beginning
0990      * or end of the block.
0991      */
0992     start = s_off;
0993     if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
0994         start = s_block->contig_hint_start;
0995     } else {
0996         /*
0997          * Scan backwards to find the extent of the free area.
0998          * find_last_bit returns the starting bit, so if the start bit
0999          * is returned, that means there was no last bit and the
1000          * remainder of the chunk is free.
1001          */
1002         int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1003                       start);
1004         start = (start == l_bit) ? 0 : l_bit + 1;
1005     }
1006 
1007     end = e_off;
1008     if (e_off == e_block->contig_hint_start)
1009         end = e_block->contig_hint_start + e_block->contig_hint;
1010     else
1011         end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1012                     PCPU_BITMAP_BLOCK_BITS, end);
1013 
1014     /* update s_block */
1015     e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
1016     if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1017         nr_empty_pages++;
1018     pcpu_block_update(s_block, start, e_off);
1019 
1020     /* freeing in the same block */
1021     if (s_index != e_index) {
1022         /* update e_block */
1023         if (end == PCPU_BITMAP_BLOCK_BITS)
1024             nr_empty_pages++;
1025         pcpu_block_update(e_block, 0, end);
1026 
1027         /* reset md_blocks in the middle */
1028         nr_empty_pages += (e_index - s_index - 1);
1029         for (block = s_block + 1; block < e_block; block++) {
1030             block->first_free = 0;
1031             block->scan_hint = 0;
1032             block->contig_hint_start = 0;
1033             block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1034             block->left_free = PCPU_BITMAP_BLOCK_BITS;
1035             block->right_free = PCPU_BITMAP_BLOCK_BITS;
1036         }
1037     }
1038 
1039     if (nr_empty_pages)
1040         pcpu_update_empty_pages(chunk, nr_empty_pages);
1041 
1042     /*
1043      * Refresh chunk metadata when the free makes a block free or spans
1044      * across blocks.  The contig_hint may be off by up to a page, but if
1045      * the contig_hint is contained in a block, it will be accurate with
1046      * the else condition below.
1047      */
1048     if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1049         pcpu_chunk_refresh_hint(chunk, true);
1050     else
1051         pcpu_block_update(&chunk->chunk_md,
1052                   pcpu_block_off_to_off(s_index, start),
1053                   end);
1054 }
1055 
1056 /**
1057  * pcpu_is_populated - determines if the region is populated
1058  * @chunk: chunk of interest
1059  * @bit_off: chunk offset
1060  * @bits: size of area
1061  * @next_off: return value for the next offset to start searching
1062  *
1063  * For atomic allocations, check if the backing pages are populated.
1064  *
1065  * RETURNS:
1066  * Bool if the backing pages are populated.
1067  * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1068  */
1069 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1070                   int *next_off)
1071 {
1072     unsigned int start, end;
1073 
1074     start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1075     end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1076 
1077     start = find_next_zero_bit(chunk->populated, end, start);
1078     if (start >= end)
1079         return true;
1080 
1081     end = find_next_bit(chunk->populated, end, start + 1);
1082 
1083     *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1084     return false;
1085 }
1086 
1087 /**
1088  * pcpu_find_block_fit - finds the block index to start searching
1089  * @chunk: chunk of interest
1090  * @alloc_bits: size of request in allocation units
1091  * @align: alignment of area (max PAGE_SIZE bytes)
1092  * @pop_only: use populated regions only
1093  *
1094  * Given a chunk and an allocation spec, find the offset to begin searching
1095  * for a free region.  This iterates over the bitmap metadata blocks to
1096  * find an offset that will be guaranteed to fit the requirements.  It is
1097  * not quite first fit as if the allocation does not fit in the contig hint
1098  * of a block or chunk, it is skipped.  This errs on the side of caution
1099  * to prevent excess iteration.  Poor alignment can cause the allocator to
1100  * skip over blocks and chunks that have valid free areas.
1101  *
1102  * RETURNS:
1103  * The offset in the bitmap to begin searching.
1104  * -1 if no offset is found.
1105  */
1106 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1107                    size_t align, bool pop_only)
1108 {
1109     struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1110     int bit_off, bits, next_off;
1111 
1112     /*
1113      * This is an optimization to prevent scanning by assuming if the
1114      * allocation cannot fit in the global hint, there is memory pressure
1115      * and creating a new chunk would happen soon.
1116      */
1117     if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
1118         return -1;
1119 
1120     bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1121     bits = 0;
1122     pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1123         if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1124                            &next_off))
1125             break;
1126 
1127         bit_off = next_off;
1128         bits = 0;
1129     }
1130 
1131     if (bit_off == pcpu_chunk_map_bits(chunk))
1132         return -1;
1133 
1134     return bit_off;
1135 }
1136 
1137 /*
1138  * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1139  * @map: the address to base the search on
1140  * @size: the bitmap size in bits
1141  * @start: the bitnumber to start searching at
1142  * @nr: the number of zeroed bits we're looking for
1143  * @align_mask: alignment mask for zero area
1144  * @largest_off: offset of the largest area skipped
1145  * @largest_bits: size of the largest area skipped
1146  *
1147  * The @align_mask should be one less than a power of 2.
1148  *
1149  * This is a modified version of bitmap_find_next_zero_area_off() to remember
1150  * the largest area that was skipped.  This is imperfect, but in general is
1151  * good enough.  The largest remembered region is the largest failed region
1152  * seen.  This does not include anything we possibly skipped due to alignment.
1153  * pcpu_block_update_scan() does scan backwards to try and recover what was
1154  * lost to alignment.  While this can cause scanning to miss earlier possible
1155  * free areas, smaller allocations will eventually fill those holes.
1156  */
1157 static unsigned long pcpu_find_zero_area(unsigned long *map,
1158                      unsigned long size,
1159                      unsigned long start,
1160                      unsigned long nr,
1161                      unsigned long align_mask,
1162                      unsigned long *largest_off,
1163                      unsigned long *largest_bits)
1164 {
1165     unsigned long index, end, i, area_off, area_bits;
1166 again:
1167     index = find_next_zero_bit(map, size, start);
1168 
1169     /* Align allocation */
1170     index = __ALIGN_MASK(index, align_mask);
1171     area_off = index;
1172 
1173     end = index + nr;
1174     if (end > size)
1175         return end;
1176     i = find_next_bit(map, end, index);
1177     if (i < end) {
1178         area_bits = i - area_off;
1179         /* remember largest unused area with best alignment */
1180         if (area_bits > *largest_bits ||
1181             (area_bits == *largest_bits && *largest_off &&
1182              (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1183             *largest_off = area_off;
1184             *largest_bits = area_bits;
1185         }
1186 
1187         start = i + 1;
1188         goto again;
1189     }
1190     return index;
1191 }
1192 
1193 /**
1194  * pcpu_alloc_area - allocates an area from a pcpu_chunk
1195  * @chunk: chunk of interest
1196  * @alloc_bits: size of request in allocation units
1197  * @align: alignment of area (max PAGE_SIZE)
1198  * @start: bit_off to start searching
1199  *
1200  * This function takes in a @start offset to begin searching to fit an
1201  * allocation of @alloc_bits with alignment @align.  It needs to scan
1202  * the allocation map because if it fits within the block's contig hint,
1203  * @start will be block->first_free. This is an attempt to fill the
1204  * allocation prior to breaking the contig hint.  The allocation and
1205  * boundary maps are updated accordingly if it confirms a valid
1206  * free area.
1207  *
1208  * RETURNS:
1209  * Allocated addr offset in @chunk on success.
1210  * -1 if no matching area is found.
1211  */
1212 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1213                size_t align, int start)
1214 {
1215     struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1216     size_t align_mask = (align) ? (align - 1) : 0;
1217     unsigned long area_off = 0, area_bits = 0;
1218     int bit_off, end, oslot;
1219 
1220     lockdep_assert_held(&pcpu_lock);
1221 
1222     oslot = pcpu_chunk_slot(chunk);
1223 
1224     /*
1225      * Search to find a fit.
1226      */
1227     end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1228             pcpu_chunk_map_bits(chunk));
1229     bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1230                       align_mask, &area_off, &area_bits);
1231     if (bit_off >= end)
1232         return -1;
1233 
1234     if (area_bits)
1235         pcpu_block_update_scan(chunk, area_off, area_bits);
1236 
1237     /* update alloc map */
1238     bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1239 
1240     /* update boundary map */
1241     set_bit(bit_off, chunk->bound_map);
1242     bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1243     set_bit(bit_off + alloc_bits, chunk->bound_map);
1244 
1245     chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1246 
1247     /* update first free bit */
1248     if (bit_off == chunk_md->first_free)
1249         chunk_md->first_free = find_next_zero_bit(
1250                     chunk->alloc_map,
1251                     pcpu_chunk_map_bits(chunk),
1252                     bit_off + alloc_bits);
1253 
1254     pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1255 
1256     pcpu_chunk_relocate(chunk, oslot);
1257 
1258     return bit_off * PCPU_MIN_ALLOC_SIZE;
1259 }
1260 
1261 /**
1262  * pcpu_free_area - frees the corresponding offset
1263  * @chunk: chunk of interest
1264  * @off: addr offset into chunk
1265  *
1266  * This function determines the size of an allocation to free using
1267  * the boundary bitmap and clears the allocation map.
1268  *
1269  * RETURNS:
1270  * Number of freed bytes.
1271  */
1272 static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1273 {
1274     struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1275     int bit_off, bits, end, oslot, freed;
1276 
1277     lockdep_assert_held(&pcpu_lock);
1278     pcpu_stats_area_dealloc(chunk);
1279 
1280     oslot = pcpu_chunk_slot(chunk);
1281 
1282     bit_off = off / PCPU_MIN_ALLOC_SIZE;
1283 
1284     /* find end index */
1285     end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1286                 bit_off + 1);
1287     bits = end - bit_off;
1288     bitmap_clear(chunk->alloc_map, bit_off, bits);
1289 
1290     freed = bits * PCPU_MIN_ALLOC_SIZE;
1291 
1292     /* update metadata */
1293     chunk->free_bytes += freed;
1294 
1295     /* update first free bit */
1296     chunk_md->first_free = min(chunk_md->first_free, bit_off);
1297 
1298     pcpu_block_update_hint_free(chunk, bit_off, bits);
1299 
1300     pcpu_chunk_relocate(chunk, oslot);
1301 
1302     return freed;
1303 }
1304 
1305 static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1306 {
1307     block->scan_hint = 0;
1308     block->contig_hint = nr_bits;
1309     block->left_free = nr_bits;
1310     block->right_free = nr_bits;
1311     block->first_free = 0;
1312     block->nr_bits = nr_bits;
1313 }
1314 
1315 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1316 {
1317     struct pcpu_block_md *md_block;
1318 
1319     /* init the chunk's block */
1320     pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1321 
1322     for (md_block = chunk->md_blocks;
1323          md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1324          md_block++)
1325         pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1326 }
1327 
1328 /**
1329  * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1330  * @tmp_addr: the start of the region served
1331  * @map_size: size of the region served
1332  *
1333  * This is responsible for creating the chunks that serve the first chunk.  The
1334  * base_addr is page aligned down of @tmp_addr while the region end is page
1335  * aligned up.  Offsets are kept track of to determine the region served. All
1336  * this is done to appease the bitmap allocator in avoiding partial blocks.
1337  *
1338  * RETURNS:
1339  * Chunk serving the region at @tmp_addr of @map_size.
1340  */
1341 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1342                              int map_size)
1343 {
1344     struct pcpu_chunk *chunk;
1345     unsigned long aligned_addr, lcm_align;
1346     int start_offset, offset_bits, region_size, region_bits;
1347     size_t alloc_size;
1348 
1349     /* region calculations */
1350     aligned_addr = tmp_addr & PAGE_MASK;
1351 
1352     start_offset = tmp_addr - aligned_addr;
1353 
1354     /*
1355      * Align the end of the region with the LCM of PAGE_SIZE and
1356      * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
1357      * the other.
1358      */
1359     lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1360     region_size = ALIGN(start_offset + map_size, lcm_align);
1361 
1362     /* allocate chunk */
1363     alloc_size = struct_size(chunk, populated,
1364                  BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1365     chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1366     if (!chunk)
1367         panic("%s: Failed to allocate %zu bytes\n", __func__,
1368               alloc_size);
1369 
1370     INIT_LIST_HEAD(&chunk->list);
1371 
1372     chunk->base_addr = (void *)aligned_addr;
1373     chunk->start_offset = start_offset;
1374     chunk->end_offset = region_size - chunk->start_offset - map_size;
1375 
1376     chunk->nr_pages = region_size >> PAGE_SHIFT;
1377     region_bits = pcpu_chunk_map_bits(chunk);
1378 
1379     alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1380     chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1381     if (!chunk->alloc_map)
1382         panic("%s: Failed to allocate %zu bytes\n", __func__,
1383               alloc_size);
1384 
1385     alloc_size =
1386         BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1387     chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1388     if (!chunk->bound_map)
1389         panic("%s: Failed to allocate %zu bytes\n", __func__,
1390               alloc_size);
1391 
1392     alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1393     chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1394     if (!chunk->md_blocks)
1395         panic("%s: Failed to allocate %zu bytes\n", __func__,
1396               alloc_size);
1397 
1398 #ifdef CONFIG_MEMCG_KMEM
1399     /* first chunk is free to use */
1400     chunk->obj_cgroups = NULL;
1401 #endif
1402     pcpu_init_md_blocks(chunk);
1403 
1404     /* manage populated page bitmap */
1405     chunk->immutable = true;
1406     bitmap_fill(chunk->populated, chunk->nr_pages);
1407     chunk->nr_populated = chunk->nr_pages;
1408     chunk->nr_empty_pop_pages = chunk->nr_pages;
1409 
1410     chunk->free_bytes = map_size;
1411 
1412     if (chunk->start_offset) {
1413         /* hide the beginning of the bitmap */
1414         offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1415         bitmap_set(chunk->alloc_map, 0, offset_bits);
1416         set_bit(0, chunk->bound_map);
1417         set_bit(offset_bits, chunk->bound_map);
1418 
1419         chunk->chunk_md.first_free = offset_bits;
1420 
1421         pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1422     }
1423 
1424     if (chunk->end_offset) {
1425         /* hide the end of the bitmap */
1426         offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1427         bitmap_set(chunk->alloc_map,
1428                pcpu_chunk_map_bits(chunk) - offset_bits,
1429                offset_bits);
1430         set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1431             chunk->bound_map);
1432         set_bit(region_bits, chunk->bound_map);
1433 
1434         pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1435                          - offset_bits, offset_bits);
1436     }
1437 
1438     return chunk;
1439 }
1440 
1441 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1442 {
1443     struct pcpu_chunk *chunk;
1444     int region_bits;
1445 
1446     chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1447     if (!chunk)
1448         return NULL;
1449 
1450     INIT_LIST_HEAD(&chunk->list);
1451     chunk->nr_pages = pcpu_unit_pages;
1452     region_bits = pcpu_chunk_map_bits(chunk);
1453 
1454     chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1455                        sizeof(chunk->alloc_map[0]), gfp);
1456     if (!chunk->alloc_map)
1457         goto alloc_map_fail;
1458 
1459     chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1460                        sizeof(chunk->bound_map[0]), gfp);
1461     if (!chunk->bound_map)
1462         goto bound_map_fail;
1463 
1464     chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1465                        sizeof(chunk->md_blocks[0]), gfp);
1466     if (!chunk->md_blocks)
1467         goto md_blocks_fail;
1468 
1469 #ifdef CONFIG_MEMCG_KMEM
1470     if (!mem_cgroup_kmem_disabled()) {
1471         chunk->obj_cgroups =
1472             pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1473                     sizeof(struct obj_cgroup *), gfp);
1474         if (!chunk->obj_cgroups)
1475             goto objcg_fail;
1476     }
1477 #endif
1478 
1479     pcpu_init_md_blocks(chunk);
1480 
1481     /* init metadata */
1482     chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1483 
1484     return chunk;
1485 
1486 #ifdef CONFIG_MEMCG_KMEM
1487 objcg_fail:
1488     pcpu_mem_free(chunk->md_blocks);
1489 #endif
1490 md_blocks_fail:
1491     pcpu_mem_free(chunk->bound_map);
1492 bound_map_fail:
1493     pcpu_mem_free(chunk->alloc_map);
1494 alloc_map_fail:
1495     pcpu_mem_free(chunk);
1496 
1497     return NULL;
1498 }
1499 
1500 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1501 {
1502     if (!chunk)
1503         return;
1504 #ifdef CONFIG_MEMCG_KMEM
1505     pcpu_mem_free(chunk->obj_cgroups);
1506 #endif
1507     pcpu_mem_free(chunk->md_blocks);
1508     pcpu_mem_free(chunk->bound_map);
1509     pcpu_mem_free(chunk->alloc_map);
1510     pcpu_mem_free(chunk);
1511 }
1512 
1513 /**
1514  * pcpu_chunk_populated - post-population bookkeeping
1515  * @chunk: pcpu_chunk which got populated
1516  * @page_start: the start page
1517  * @page_end: the end page
1518  *
1519  * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1520  * the bookkeeping information accordingly.  Must be called after each
1521  * successful population.
1522  */
1523 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1524                  int page_end)
1525 {
1526     int nr = page_end - page_start;
1527 
1528     lockdep_assert_held(&pcpu_lock);
1529 
1530     bitmap_set(chunk->populated, page_start, nr);
1531     chunk->nr_populated += nr;
1532     pcpu_nr_populated += nr;
1533 
1534     pcpu_update_empty_pages(chunk, nr);
1535 }
1536 
1537 /**
1538  * pcpu_chunk_depopulated - post-depopulation bookkeeping
1539  * @chunk: pcpu_chunk which got depopulated
1540  * @page_start: the start page
1541  * @page_end: the end page
1542  *
1543  * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1544  * Update the bookkeeping information accordingly.  Must be called after
1545  * each successful depopulation.
1546  */
1547 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1548                    int page_start, int page_end)
1549 {
1550     int nr = page_end - page_start;
1551 
1552     lockdep_assert_held(&pcpu_lock);
1553 
1554     bitmap_clear(chunk->populated, page_start, nr);
1555     chunk->nr_populated -= nr;
1556     pcpu_nr_populated -= nr;
1557 
1558     pcpu_update_empty_pages(chunk, -nr);
1559 }
1560 
1561 /*
1562  * Chunk management implementation.
1563  *
1564  * To allow different implementations, chunk alloc/free and
1565  * [de]population are implemented in a separate file which is pulled
1566  * into this file and compiled together.  The following functions
1567  * should be implemented.
1568  *
1569  * pcpu_populate_chunk      - populate the specified range of a chunk
1570  * pcpu_depopulate_chunk    - depopulate the specified range of a chunk
1571  * pcpu_post_unmap_tlb_flush    - flush tlb for the specified range of a chunk
1572  * pcpu_create_chunk        - create a new chunk
1573  * pcpu_destroy_chunk       - destroy a chunk, always preceded by full depop
1574  * pcpu_addr_to_page        - translate address to physical address
1575  * pcpu_verify_alloc_info   - check alloc_info is acceptable during init
1576  */
1577 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1578                    int page_start, int page_end, gfp_t gfp);
1579 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1580                   int page_start, int page_end);
1581 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1582                       int page_start, int page_end);
1583 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1584 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1585 static struct page *pcpu_addr_to_page(void *addr);
1586 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1587 
1588 #ifdef CONFIG_NEED_PER_CPU_KM
1589 #include "percpu-km.c"
1590 #else
1591 #include "percpu-vm.c"
1592 #endif
1593 
1594 /**
1595  * pcpu_chunk_addr_search - determine chunk containing specified address
1596  * @addr: address for which the chunk needs to be determined.
1597  *
1598  * This is an internal function that handles all but static allocations.
1599  * Static percpu address values should never be passed into the allocator.
1600  *
1601  * RETURNS:
1602  * The address of the found chunk.
1603  */
1604 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1605 {
1606     /* is it in the dynamic region (first chunk)? */
1607     if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1608         return pcpu_first_chunk;
1609 
1610     /* is it in the reserved region? */
1611     if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1612         return pcpu_reserved_chunk;
1613 
1614     /*
1615      * The address is relative to unit0 which might be unused and
1616      * thus unmapped.  Offset the address to the unit space of the
1617      * current processor before looking it up in the vmalloc
1618      * space.  Note that any possible cpu id can be used here, so
1619      * there's no need to worry about preemption or cpu hotplug.
1620      */
1621     addr += pcpu_unit_offsets[raw_smp_processor_id()];
1622     return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1623 }
1624 
1625 #ifdef CONFIG_MEMCG_KMEM
1626 static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1627                       struct obj_cgroup **objcgp)
1628 {
1629     struct obj_cgroup *objcg;
1630 
1631     if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
1632         return true;
1633 
1634     objcg = get_obj_cgroup_from_current();
1635     if (!objcg)
1636         return true;
1637 
1638     if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) {
1639         obj_cgroup_put(objcg);
1640         return false;
1641     }
1642 
1643     *objcgp = objcg;
1644     return true;
1645 }
1646 
1647 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1648                        struct pcpu_chunk *chunk, int off,
1649                        size_t size)
1650 {
1651     if (!objcg)
1652         return;
1653 
1654     if (likely(chunk && chunk->obj_cgroups)) {
1655         chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1656 
1657         rcu_read_lock();
1658         mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1659                 pcpu_obj_full_size(size));
1660         rcu_read_unlock();
1661     } else {
1662         obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
1663         obj_cgroup_put(objcg);
1664     }
1665 }
1666 
1667 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1668 {
1669     struct obj_cgroup *objcg;
1670 
1671     if (unlikely(!chunk->obj_cgroups))
1672         return;
1673 
1674     objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1675     if (!objcg)
1676         return;
1677     chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1678 
1679     obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
1680 
1681     rcu_read_lock();
1682     mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1683             -pcpu_obj_full_size(size));
1684     rcu_read_unlock();
1685 
1686     obj_cgroup_put(objcg);
1687 }
1688 
1689 #else /* CONFIG_MEMCG_KMEM */
1690 static bool
1691 pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1692 {
1693     return true;
1694 }
1695 
1696 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1697                        struct pcpu_chunk *chunk, int off,
1698                        size_t size)
1699 {
1700 }
1701 
1702 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1703 {
1704 }
1705 #endif /* CONFIG_MEMCG_KMEM */
1706 
1707 /**
1708  * pcpu_alloc - the percpu allocator
1709  * @size: size of area to allocate in bytes
1710  * @align: alignment of area (max PAGE_SIZE)
1711  * @reserved: allocate from the reserved chunk if available
1712  * @gfp: allocation flags
1713  *
1714  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
1715  * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1716  * then no warning will be triggered on invalid or failed allocation
1717  * requests.
1718  *
1719  * RETURNS:
1720  * Percpu pointer to the allocated area on success, NULL on failure.
1721  */
1722 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1723                  gfp_t gfp)
1724 {
1725     gfp_t pcpu_gfp;
1726     bool is_atomic;
1727     bool do_warn;
1728     struct obj_cgroup *objcg = NULL;
1729     static int warn_limit = 10;
1730     struct pcpu_chunk *chunk, *next;
1731     const char *err;
1732     int slot, off, cpu, ret;
1733     unsigned long flags;
1734     void __percpu *ptr;
1735     size_t bits, bit_align;
1736 
1737     gfp = current_gfp_context(gfp);
1738     /* whitelisted flags that can be passed to the backing allocators */
1739     pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1740     is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1741     do_warn = !(gfp & __GFP_NOWARN);
1742 
1743     /*
1744      * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1745      * therefore alignment must be a minimum of that many bytes.
1746      * An allocation may have internal fragmentation from rounding up
1747      * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1748      */
1749     if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1750         align = PCPU_MIN_ALLOC_SIZE;
1751 
1752     size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1753     bits = size >> PCPU_MIN_ALLOC_SHIFT;
1754     bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1755 
1756     if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1757              !is_power_of_2(align))) {
1758         WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1759              size, align);
1760         return NULL;
1761     }
1762 
1763     if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
1764         return NULL;
1765 
1766     if (!is_atomic) {
1767         /*
1768          * pcpu_balance_workfn() allocates memory under this mutex,
1769          * and it may wait for memory reclaim. Allow current task
1770          * to become OOM victim, in case of memory pressure.
1771          */
1772         if (gfp & __GFP_NOFAIL) {
1773             mutex_lock(&pcpu_alloc_mutex);
1774         } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1775             pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1776             return NULL;
1777         }
1778     }
1779 
1780     spin_lock_irqsave(&pcpu_lock, flags);
1781 
1782     /* serve reserved allocations from the reserved chunk if available */
1783     if (reserved && pcpu_reserved_chunk) {
1784         chunk = pcpu_reserved_chunk;
1785 
1786         off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1787         if (off < 0) {
1788             err = "alloc from reserved chunk failed";
1789             goto fail_unlock;
1790         }
1791 
1792         off = pcpu_alloc_area(chunk, bits, bit_align, off);
1793         if (off >= 0)
1794             goto area_found;
1795 
1796         err = "alloc from reserved chunk failed";
1797         goto fail_unlock;
1798     }
1799 
1800 restart:
1801     /* search through normal chunks */
1802     for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
1803         list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1804                      list) {
1805             off = pcpu_find_block_fit(chunk, bits, bit_align,
1806                           is_atomic);
1807             if (off < 0) {
1808                 if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1809                     pcpu_chunk_move(chunk, 0);
1810                 continue;
1811             }
1812 
1813             off = pcpu_alloc_area(chunk, bits, bit_align, off);
1814             if (off >= 0) {
1815                 pcpu_reintegrate_chunk(chunk);
1816                 goto area_found;
1817             }
1818         }
1819     }
1820 
1821     spin_unlock_irqrestore(&pcpu_lock, flags);
1822 
1823     /*
1824      * No space left.  Create a new chunk.  We don't want multiple
1825      * tasks to create chunks simultaneously.  Serialize and create iff
1826      * there's still no empty chunk after grabbing the mutex.
1827      */
1828     if (is_atomic) {
1829         err = "atomic alloc failed, no space left";
1830         goto fail;
1831     }
1832 
1833     if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1834         chunk = pcpu_create_chunk(pcpu_gfp);
1835         if (!chunk) {
1836             err = "failed to allocate new chunk";
1837             goto fail;
1838         }
1839 
1840         spin_lock_irqsave(&pcpu_lock, flags);
1841         pcpu_chunk_relocate(chunk, -1);
1842     } else {
1843         spin_lock_irqsave(&pcpu_lock, flags);
1844     }
1845 
1846     goto restart;
1847 
1848 area_found:
1849     pcpu_stats_area_alloc(chunk, size);
1850     spin_unlock_irqrestore(&pcpu_lock, flags);
1851 
1852     /* populate if not all pages are already there */
1853     if (!is_atomic) {
1854         unsigned int page_end, rs, re;
1855 
1856         rs = PFN_DOWN(off);
1857         page_end = PFN_UP(off + size);
1858 
1859         for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
1860             WARN_ON(chunk->immutable);
1861 
1862             ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1863 
1864             spin_lock_irqsave(&pcpu_lock, flags);
1865             if (ret) {
1866                 pcpu_free_area(chunk, off);
1867                 err = "failed to populate";
1868                 goto fail_unlock;
1869             }
1870             pcpu_chunk_populated(chunk, rs, re);
1871             spin_unlock_irqrestore(&pcpu_lock, flags);
1872         }
1873 
1874         mutex_unlock(&pcpu_alloc_mutex);
1875     }
1876 
1877     if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1878         pcpu_schedule_balance_work();
1879 
1880     /* clear the areas and return address relative to base address */
1881     for_each_possible_cpu(cpu)
1882         memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1883 
1884     ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1885     kmemleak_alloc_percpu(ptr, size, gfp);
1886 
1887     trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
1888                   chunk->base_addr, off, ptr,
1889                   pcpu_obj_full_size(size), gfp);
1890 
1891     pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1892 
1893     return ptr;
1894 
1895 fail_unlock:
1896     spin_unlock_irqrestore(&pcpu_lock, flags);
1897 fail:
1898     trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1899 
1900     if (!is_atomic && do_warn && warn_limit) {
1901         pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1902             size, align, is_atomic, err);
1903         dump_stack();
1904         if (!--warn_limit)
1905             pr_info("limit reached, disable warning\n");
1906     }
1907     if (is_atomic) {
1908         /* see the flag handling in pcpu_balance_workfn() */
1909         pcpu_atomic_alloc_failed = true;
1910         pcpu_schedule_balance_work();
1911     } else {
1912         mutex_unlock(&pcpu_alloc_mutex);
1913     }
1914 
1915     pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1916 
1917     return NULL;
1918 }
1919 
1920 /**
1921  * __alloc_percpu_gfp - allocate dynamic percpu area
1922  * @size: size of area to allocate in bytes
1923  * @align: alignment of area (max PAGE_SIZE)
1924  * @gfp: allocation flags
1925  *
1926  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1927  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1928  * be called from any context but is a lot more likely to fail. If @gfp
1929  * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1930  * allocation requests.
1931  *
1932  * RETURNS:
1933  * Percpu pointer to the allocated area on success, NULL on failure.
1934  */
1935 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1936 {
1937     return pcpu_alloc(size, align, false, gfp);
1938 }
1939 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1940 
1941 /**
1942  * __alloc_percpu - allocate dynamic percpu area
1943  * @size: size of area to allocate in bytes
1944  * @align: alignment of area (max PAGE_SIZE)
1945  *
1946  * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1947  */
1948 void __percpu *__alloc_percpu(size_t size, size_t align)
1949 {
1950     return pcpu_alloc(size, align, false, GFP_KERNEL);
1951 }
1952 EXPORT_SYMBOL_GPL(__alloc_percpu);
1953 
1954 /**
1955  * __alloc_reserved_percpu - allocate reserved percpu area
1956  * @size: size of area to allocate in bytes
1957  * @align: alignment of area (max PAGE_SIZE)
1958  *
1959  * Allocate zero-filled percpu area of @size bytes aligned at @align
1960  * from reserved percpu area if arch has set it up; otherwise,
1961  * allocation is served from the same dynamic area.  Might sleep.
1962  * Might trigger writeouts.
1963  *
1964  * CONTEXT:
1965  * Does GFP_KERNEL allocation.
1966  *
1967  * RETURNS:
1968  * Percpu pointer to the allocated area on success, NULL on failure.
1969  */
1970 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1971 {
1972     return pcpu_alloc(size, align, true, GFP_KERNEL);
1973 }
1974 
1975 /**
1976  * pcpu_balance_free - manage the amount of free chunks
1977  * @empty_only: free chunks only if there are no populated pages
1978  *
1979  * If empty_only is %false, reclaim all fully free chunks regardless of the
1980  * number of populated pages.  Otherwise, only reclaim chunks that have no
1981  * populated pages.
1982  *
1983  * CONTEXT:
1984  * pcpu_lock (can be dropped temporarily)
1985  */
1986 static void pcpu_balance_free(bool empty_only)
1987 {
1988     LIST_HEAD(to_free);
1989     struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
1990     struct pcpu_chunk *chunk, *next;
1991 
1992     lockdep_assert_held(&pcpu_lock);
1993 
1994     /*
1995      * There's no reason to keep around multiple unused chunks and VM
1996      * areas can be scarce.  Destroy all free chunks except for one.
1997      */
1998     list_for_each_entry_safe(chunk, next, free_head, list) {
1999         WARN_ON(chunk->immutable);
2000 
2001         /* spare the first one */
2002         if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
2003             continue;
2004 
2005         if (!empty_only || chunk->nr_empty_pop_pages == 0)
2006             list_move(&chunk->list, &to_free);
2007     }
2008 
2009     if (list_empty(&to_free))
2010         return;
2011 
2012     spin_unlock_irq(&pcpu_lock);
2013     list_for_each_entry_safe(chunk, next, &to_free, list) {
2014         unsigned int rs, re;
2015 
2016         for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
2017             pcpu_depopulate_chunk(chunk, rs, re);
2018             spin_lock_irq(&pcpu_lock);
2019             pcpu_chunk_depopulated(chunk, rs, re);
2020             spin_unlock_irq(&pcpu_lock);
2021         }
2022         pcpu_destroy_chunk(chunk);
2023         cond_resched();
2024     }
2025     spin_lock_irq(&pcpu_lock);
2026 }
2027 
2028 /**
2029  * pcpu_balance_populated - manage the amount of populated pages
2030  *
2031  * Maintain a certain amount of populated pages to satisfy atomic allocations.
2032  * It is possible that this is called when physical memory is scarce causing
2033  * OOM killer to be triggered.  We should avoid doing so until an actual
2034  * allocation causes the failure as it is possible that requests can be
2035  * serviced from already backed regions.
2036  *
2037  * CONTEXT:
2038  * pcpu_lock (can be dropped temporarily)
2039  */
2040 static void pcpu_balance_populated(void)
2041 {
2042     /* gfp flags passed to underlying allocators */
2043     const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
2044     struct pcpu_chunk *chunk;
2045     int slot, nr_to_pop, ret;
2046 
2047     lockdep_assert_held(&pcpu_lock);
2048 
2049     /*
2050      * Ensure there are certain number of free populated pages for
2051      * atomic allocs.  Fill up from the most packed so that atomic
2052      * allocs don't increase fragmentation.  If atomic allocation
2053      * failed previously, always populate the maximum amount.  This
2054      * should prevent atomic allocs larger than PAGE_SIZE from keeping
2055      * failing indefinitely; however, large atomic allocs are not
2056      * something we support properly and can be highly unreliable and
2057      * inefficient.
2058      */
2059 retry_pop:
2060     if (pcpu_atomic_alloc_failed) {
2061         nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2062         /* best effort anyway, don't worry about synchronization */
2063         pcpu_atomic_alloc_failed = false;
2064     } else {
2065         nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2066                   pcpu_nr_empty_pop_pages,
2067                   0, PCPU_EMPTY_POP_PAGES_HIGH);
2068     }
2069 
2070     for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
2071         unsigned int nr_unpop = 0, rs, re;
2072 
2073         if (!nr_to_pop)
2074             break;
2075 
2076         list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
2077             nr_unpop = chunk->nr_pages - chunk->nr_populated;
2078             if (nr_unpop)
2079                 break;
2080         }
2081 
2082         if (!nr_unpop)
2083             continue;
2084 
2085         /* @chunk can't go away while pcpu_alloc_mutex is held */
2086         for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
2087             int nr = min_t(int, re - rs, nr_to_pop);
2088 
2089             spin_unlock_irq(&pcpu_lock);
2090             ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2091             cond_resched();
2092             spin_lock_irq(&pcpu_lock);
2093             if (!ret) {
2094                 nr_to_pop -= nr;
2095                 pcpu_chunk_populated(chunk, rs, rs + nr);
2096             } else {
2097                 nr_to_pop = 0;
2098             }
2099 
2100             if (!nr_to_pop)
2101                 break;
2102         }
2103     }
2104 
2105     if (nr_to_pop) {
2106         /* ran out of chunks to populate, create a new one and retry */
2107         spin_unlock_irq(&pcpu_lock);
2108         chunk = pcpu_create_chunk(gfp);
2109         cond_resched();
2110         spin_lock_irq(&pcpu_lock);
2111         if (chunk) {
2112             pcpu_chunk_relocate(chunk, -1);
2113             goto retry_pop;
2114         }
2115     }
2116 }
2117 
2118 /**
2119  * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2120  *
2121  * Scan over chunks in the depopulate list and try to release unused populated
2122  * pages back to the system.  Depopulated chunks are sidelined to prevent
2123  * repopulating these pages unless required.  Fully free chunks are reintegrated
2124  * and freed accordingly (1 is kept around).  If we drop below the empty
2125  * populated pages threshold, reintegrate the chunk if it has empty free pages.
2126  * Each chunk is scanned in the reverse order to keep populated pages close to
2127  * the beginning of the chunk.
2128  *
2129  * CONTEXT:
2130  * pcpu_lock (can be dropped temporarily)
2131  *
2132  */
2133 static void pcpu_reclaim_populated(void)
2134 {
2135     struct pcpu_chunk *chunk;
2136     struct pcpu_block_md *block;
2137     int freed_page_start, freed_page_end;
2138     int i, end;
2139     bool reintegrate;
2140 
2141     lockdep_assert_held(&pcpu_lock);
2142 
2143     /*
2144      * Once a chunk is isolated to the to_depopulate list, the chunk is no
2145      * longer discoverable to allocations whom may populate pages.  The only
2146      * other accessor is the free path which only returns area back to the
2147      * allocator not touching the populated bitmap.
2148      */
2149     while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) {
2150         chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot],
2151                      struct pcpu_chunk, list);
2152         WARN_ON(chunk->immutable);
2153 
2154         /*
2155          * Scan chunk's pages in the reverse order to keep populated
2156          * pages close to the beginning of the chunk.
2157          */
2158         freed_page_start = chunk->nr_pages;
2159         freed_page_end = 0;
2160         reintegrate = false;
2161         for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2162             /* no more work to do */
2163             if (chunk->nr_empty_pop_pages == 0)
2164                 break;
2165 
2166             /* reintegrate chunk to prevent atomic alloc failures */
2167             if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
2168                 reintegrate = true;
2169                 goto end_chunk;
2170             }
2171 
2172             /*
2173              * If the page is empty and populated, start or
2174              * extend the (i, end) range.  If i == 0, decrease
2175              * i and perform the depopulation to cover the last
2176              * (first) page in the chunk.
2177              */
2178             block = chunk->md_blocks + i;
2179             if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2180                 test_bit(i, chunk->populated)) {
2181                 if (end == -1)
2182                     end = i;
2183                 if (i > 0)
2184                     continue;
2185                 i--;
2186             }
2187 
2188             /* depopulate if there is an active range */
2189             if (end == -1)
2190                 continue;
2191 
2192             spin_unlock_irq(&pcpu_lock);
2193             pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2194             cond_resched();
2195             spin_lock_irq(&pcpu_lock);
2196 
2197             pcpu_chunk_depopulated(chunk, i + 1, end + 1);
2198             freed_page_start = min(freed_page_start, i + 1);
2199             freed_page_end = max(freed_page_end, end + 1);
2200 
2201             /* reset the range and continue */
2202             end = -1;
2203         }
2204 
2205 end_chunk:
2206         /* batch tlb flush per chunk to amortize cost */
2207         if (freed_page_start < freed_page_end) {
2208             spin_unlock_irq(&pcpu_lock);
2209             pcpu_post_unmap_tlb_flush(chunk,
2210                           freed_page_start,
2211                           freed_page_end);
2212             cond_resched();
2213             spin_lock_irq(&pcpu_lock);
2214         }
2215 
2216         if (reintegrate || chunk->free_bytes == pcpu_unit_size)
2217             pcpu_reintegrate_chunk(chunk);
2218         else
2219             list_move_tail(&chunk->list,
2220                        &pcpu_chunk_lists[pcpu_sidelined_slot]);
2221     }
2222 }
2223 
2224 /**
2225  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2226  * @work: unused
2227  *
2228  * For each chunk type, manage the number of fully free chunks and the number of
2229  * populated pages.  An important thing to consider is when pages are freed and
2230  * how they contribute to the global counts.
2231  */
2232 static void pcpu_balance_workfn(struct work_struct *work)
2233 {
2234     /*
2235      * pcpu_balance_free() is called twice because the first time we may
2236      * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2237      * to grow other chunks.  This then gives pcpu_reclaim_populated() time
2238      * to move fully free chunks to the active list to be freed if
2239      * appropriate.
2240      */
2241     mutex_lock(&pcpu_alloc_mutex);
2242     spin_lock_irq(&pcpu_lock);
2243 
2244     pcpu_balance_free(false);
2245     pcpu_reclaim_populated();
2246     pcpu_balance_populated();
2247     pcpu_balance_free(true);
2248 
2249     spin_unlock_irq(&pcpu_lock);
2250     mutex_unlock(&pcpu_alloc_mutex);
2251 }
2252 
2253 /**
2254  * free_percpu - free percpu area
2255  * @ptr: pointer to area to free
2256  *
2257  * Free percpu area @ptr.
2258  *
2259  * CONTEXT:
2260  * Can be called from atomic context.
2261  */
2262 void free_percpu(void __percpu *ptr)
2263 {
2264     void *addr;
2265     struct pcpu_chunk *chunk;
2266     unsigned long flags;
2267     int size, off;
2268     bool need_balance = false;
2269 
2270     if (!ptr)
2271         return;
2272 
2273     kmemleak_free_percpu(ptr);
2274 
2275     addr = __pcpu_ptr_to_addr(ptr);
2276 
2277     spin_lock_irqsave(&pcpu_lock, flags);
2278 
2279     chunk = pcpu_chunk_addr_search(addr);
2280     off = addr - chunk->base_addr;
2281 
2282     size = pcpu_free_area(chunk, off);
2283 
2284     pcpu_memcg_free_hook(chunk, off, size);
2285 
2286     /*
2287      * If there are more than one fully free chunks, wake up grim reaper.
2288      * If the chunk is isolated, it may be in the process of being
2289      * reclaimed.  Let reclaim manage cleaning up of that chunk.
2290      */
2291     if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2292         struct pcpu_chunk *pos;
2293 
2294         list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
2295             if (pos != chunk) {
2296                 need_balance = true;
2297                 break;
2298             }
2299     } else if (pcpu_should_reclaim_chunk(chunk)) {
2300         pcpu_isolate_chunk(chunk);
2301         need_balance = true;
2302     }
2303 
2304     trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2305 
2306     spin_unlock_irqrestore(&pcpu_lock, flags);
2307 
2308     if (need_balance)
2309         pcpu_schedule_balance_work();
2310 }
2311 EXPORT_SYMBOL_GPL(free_percpu);
2312 
2313 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2314 {
2315 #ifdef CONFIG_SMP
2316     const size_t static_size = __per_cpu_end - __per_cpu_start;
2317     void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2318     unsigned int cpu;
2319 
2320     for_each_possible_cpu(cpu) {
2321         void *start = per_cpu_ptr(base, cpu);
2322         void *va = (void *)addr;
2323 
2324         if (va >= start && va < start + static_size) {
2325             if (can_addr) {
2326                 *can_addr = (unsigned long) (va - start);
2327                 *can_addr += (unsigned long)
2328                     per_cpu_ptr(base, get_boot_cpu_id());
2329             }
2330             return true;
2331         }
2332     }
2333 #endif
2334     /* on UP, can't distinguish from other static vars, always false */
2335     return false;
2336 }
2337 
2338 /**
2339  * is_kernel_percpu_address - test whether address is from static percpu area
2340  * @addr: address to test
2341  *
2342  * Test whether @addr belongs to in-kernel static percpu area.  Module
2343  * static percpu areas are not considered.  For those, use
2344  * is_module_percpu_address().
2345  *
2346  * RETURNS:
2347  * %true if @addr is from in-kernel static percpu area, %false otherwise.
2348  */
2349 bool is_kernel_percpu_address(unsigned long addr)
2350 {
2351     return __is_kernel_percpu_address(addr, NULL);
2352 }
2353 
2354 /**
2355  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2356  * @addr: the address to be converted to physical address
2357  *
2358  * Given @addr which is dereferenceable address obtained via one of
2359  * percpu access macros, this function translates it into its physical
2360  * address.  The caller is responsible for ensuring @addr stays valid
2361  * until this function finishes.
2362  *
2363  * percpu allocator has special setup for the first chunk, which currently
2364  * supports either embedding in linear address space or vmalloc mapping,
2365  * and, from the second one, the backing allocator (currently either vm or
2366  * km) provides translation.
2367  *
2368  * The addr can be translated simply without checking if it falls into the
2369  * first chunk. But the current code reflects better how percpu allocator
2370  * actually works, and the verification can discover both bugs in percpu
2371  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2372  * code.
2373  *
2374  * RETURNS:
2375  * The physical address for @addr.
2376  */
2377 phys_addr_t per_cpu_ptr_to_phys(void *addr)
2378 {
2379     void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2380     bool in_first_chunk = false;
2381     unsigned long first_low, first_high;
2382     unsigned int cpu;
2383 
2384     /*
2385      * The following test on unit_low/high isn't strictly
2386      * necessary but will speed up lookups of addresses which
2387      * aren't in the first chunk.
2388      *
2389      * The address check is against full chunk sizes.  pcpu_base_addr
2390      * points to the beginning of the first chunk including the
2391      * static region.  Assumes good intent as the first chunk may
2392      * not be full (ie. < pcpu_unit_pages in size).
2393      */
2394     first_low = (unsigned long)pcpu_base_addr +
2395             pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2396     first_high = (unsigned long)pcpu_base_addr +
2397              pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2398     if ((unsigned long)addr >= first_low &&
2399         (unsigned long)addr < first_high) {
2400         for_each_possible_cpu(cpu) {
2401             void *start = per_cpu_ptr(base, cpu);
2402 
2403             if (addr >= start && addr < start + pcpu_unit_size) {
2404                 in_first_chunk = true;
2405                 break;
2406             }
2407         }
2408     }
2409 
2410     if (in_first_chunk) {
2411         if (!is_vmalloc_addr(addr))
2412             return __pa(addr);
2413         else
2414             return page_to_phys(vmalloc_to_page(addr)) +
2415                    offset_in_page(addr);
2416     } else
2417         return page_to_phys(pcpu_addr_to_page(addr)) +
2418                offset_in_page(addr);
2419 }
2420 
2421 /**
2422  * pcpu_alloc_alloc_info - allocate percpu allocation info
2423  * @nr_groups: the number of groups
2424  * @nr_units: the number of units
2425  *
2426  * Allocate ai which is large enough for @nr_groups groups containing
2427  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2428  * cpu_map array which is long enough for @nr_units and filled with
2429  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2430  * pointer of other groups.
2431  *
2432  * RETURNS:
2433  * Pointer to the allocated pcpu_alloc_info on success, NULL on
2434  * failure.
2435  */
2436 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2437                               int nr_units)
2438 {
2439     struct pcpu_alloc_info *ai;
2440     size_t base_size, ai_size;
2441     void *ptr;
2442     int unit;
2443 
2444     base_size = ALIGN(struct_size(ai, groups, nr_groups),
2445               __alignof__(ai->groups[0].cpu_map[0]));
2446     ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2447 
2448     ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2449     if (!ptr)
2450         return NULL;
2451     ai = ptr;
2452     ptr += base_size;
2453 
2454     ai->groups[0].cpu_map = ptr;
2455 
2456     for (unit = 0; unit < nr_units; unit++)
2457         ai->groups[0].cpu_map[unit] = NR_CPUS;
2458 
2459     ai->nr_groups = nr_groups;
2460     ai->__ai_size = PFN_ALIGN(ai_size);
2461 
2462     return ai;
2463 }
2464 
2465 /**
2466  * pcpu_free_alloc_info - free percpu allocation info
2467  * @ai: pcpu_alloc_info to free
2468  *
2469  * Free @ai which was allocated by pcpu_alloc_alloc_info().
2470  */
2471 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2472 {
2473     memblock_free(ai, ai->__ai_size);
2474 }
2475 
2476 /**
2477  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2478  * @lvl: loglevel
2479  * @ai: allocation info to dump
2480  *
2481  * Print out information about @ai using loglevel @lvl.
2482  */
2483 static void pcpu_dump_alloc_info(const char *lvl,
2484                  const struct pcpu_alloc_info *ai)
2485 {
2486     int group_width = 1, cpu_width = 1, width;
2487     char empty_str[] = "--------";
2488     int alloc = 0, alloc_end = 0;
2489     int group, v;
2490     int upa, apl;   /* units per alloc, allocs per line */
2491 
2492     v = ai->nr_groups;
2493     while (v /= 10)
2494         group_width++;
2495 
2496     v = num_possible_cpus();
2497     while (v /= 10)
2498         cpu_width++;
2499     empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2500 
2501     upa = ai->alloc_size / ai->unit_size;
2502     width = upa * (cpu_width + 1) + group_width + 3;
2503     apl = rounddown_pow_of_two(max(60 / width, 1));
2504 
2505     printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2506            lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2507            ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2508 
2509     for (group = 0; group < ai->nr_groups; group++) {
2510         const struct pcpu_group_info *gi = &ai->groups[group];
2511         int unit = 0, unit_end = 0;
2512 
2513         BUG_ON(gi->nr_units % upa);
2514         for (alloc_end += gi->nr_units / upa;
2515              alloc < alloc_end; alloc++) {
2516             if (!(alloc % apl)) {
2517                 pr_cont("\n");
2518                 printk("%spcpu-alloc: ", lvl);
2519             }
2520             pr_cont("[%0*d] ", group_width, group);
2521 
2522             for (unit_end += upa; unit < unit_end; unit++)
2523                 if (gi->cpu_map[unit] != NR_CPUS)
2524                     pr_cont("%0*d ",
2525                         cpu_width, gi->cpu_map[unit]);
2526                 else
2527                     pr_cont("%s ", empty_str);
2528         }
2529     }
2530     pr_cont("\n");
2531 }
2532 
2533 /**
2534  * pcpu_setup_first_chunk - initialize the first percpu chunk
2535  * @ai: pcpu_alloc_info describing how to percpu area is shaped
2536  * @base_addr: mapped address
2537  *
2538  * Initialize the first percpu chunk which contains the kernel static
2539  * percpu area.  This function is to be called from arch percpu area
2540  * setup path.
2541  *
2542  * @ai contains all information necessary to initialize the first
2543  * chunk and prime the dynamic percpu allocator.
2544  *
2545  * @ai->static_size is the size of static percpu area.
2546  *
2547  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2548  * reserve after the static area in the first chunk.  This reserves
2549  * the first chunk such that it's available only through reserved
2550  * percpu allocation.  This is primarily used to serve module percpu
2551  * static areas on architectures where the addressing model has
2552  * limited offset range for symbol relocations to guarantee module
2553  * percpu symbols fall inside the relocatable range.
2554  *
2555  * @ai->dyn_size determines the number of bytes available for dynamic
2556  * allocation in the first chunk.  The area between @ai->static_size +
2557  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2558  *
2559  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2560  * and equal to or larger than @ai->static_size + @ai->reserved_size +
2561  * @ai->dyn_size.
2562  *
2563  * @ai->atom_size is the allocation atom size and used as alignment
2564  * for vm areas.
2565  *
2566  * @ai->alloc_size is the allocation size and always multiple of
2567  * @ai->atom_size.  This is larger than @ai->atom_size if
2568  * @ai->unit_size is larger than @ai->atom_size.
2569  *
2570  * @ai->nr_groups and @ai->groups describe virtual memory layout of
2571  * percpu areas.  Units which should be colocated are put into the
2572  * same group.  Dynamic VM areas will be allocated according to these
2573  * groupings.  If @ai->nr_groups is zero, a single group containing
2574  * all units is assumed.
2575  *
2576  * The caller should have mapped the first chunk at @base_addr and
2577  * copied static data to each unit.
2578  *
2579  * The first chunk will always contain a static and a dynamic region.
2580  * However, the static region is not managed by any chunk.  If the first
2581  * chunk also contains a reserved region, it is served by two chunks -
2582  * one for the reserved region and one for the dynamic region.  They
2583  * share the same vm, but use offset regions in the area allocation map.
2584  * The chunk serving the dynamic region is circulated in the chunk slots
2585  * and available for dynamic allocation like any other chunk.
2586  */
2587 void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2588                    void *base_addr)
2589 {
2590     size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2591     size_t static_size, dyn_size;
2592     struct pcpu_chunk *chunk;
2593     unsigned long *group_offsets;
2594     size_t *group_sizes;
2595     unsigned long *unit_off;
2596     unsigned int cpu;
2597     int *unit_map;
2598     int group, unit, i;
2599     int map_size;
2600     unsigned long tmp_addr;
2601     size_t alloc_size;
2602 
2603 #define PCPU_SETUP_BUG_ON(cond) do {                    \
2604     if (unlikely(cond)) {                       \
2605         pr_emerg("failed to initialize, %s\n", #cond);      \
2606         pr_emerg("cpu_possible_mask=%*pb\n",            \
2607              cpumask_pr_args(cpu_possible_mask));       \
2608         pcpu_dump_alloc_info(KERN_EMERG, ai);           \
2609         BUG();                          \
2610     }                               \
2611 } while (0)
2612 
2613     /* sanity checks */
2614     PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2615 #ifdef CONFIG_SMP
2616     PCPU_SETUP_BUG_ON(!ai->static_size);
2617     PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2618 #endif
2619     PCPU_SETUP_BUG_ON(!base_addr);
2620     PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2621     PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2622     PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2623     PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2624     PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2625     PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2626     PCPU_SETUP_BUG_ON(!ai->dyn_size);
2627     PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2628     PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2629                 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2630     PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2631 
2632     /* process group information and build config tables accordingly */
2633     alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2634     group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2635     if (!group_offsets)
2636         panic("%s: Failed to allocate %zu bytes\n", __func__,
2637               alloc_size);
2638 
2639     alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2640     group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2641     if (!group_sizes)
2642         panic("%s: Failed to allocate %zu bytes\n", __func__,
2643               alloc_size);
2644 
2645     alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2646     unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2647     if (!unit_map)
2648         panic("%s: Failed to allocate %zu bytes\n", __func__,
2649               alloc_size);
2650 
2651     alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2652     unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2653     if (!unit_off)
2654         panic("%s: Failed to allocate %zu bytes\n", __func__,
2655               alloc_size);
2656 
2657     for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2658         unit_map[cpu] = UINT_MAX;
2659 
2660     pcpu_low_unit_cpu = NR_CPUS;
2661     pcpu_high_unit_cpu = NR_CPUS;
2662 
2663     for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2664         const struct pcpu_group_info *gi = &ai->groups[group];
2665 
2666         group_offsets[group] = gi->base_offset;
2667         group_sizes[group] = gi->nr_units * ai->unit_size;
2668 
2669         for (i = 0; i < gi->nr_units; i++) {
2670             cpu = gi->cpu_map[i];
2671             if (cpu == NR_CPUS)
2672                 continue;
2673 
2674             PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2675             PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2676             PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2677 
2678             unit_map[cpu] = unit + i;
2679             unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2680 
2681             /* determine low/high unit_cpu */
2682             if (pcpu_low_unit_cpu == NR_CPUS ||
2683                 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2684                 pcpu_low_unit_cpu = cpu;
2685             if (pcpu_high_unit_cpu == NR_CPUS ||
2686                 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2687                 pcpu_high_unit_cpu = cpu;
2688         }
2689     }
2690     pcpu_nr_units = unit;
2691 
2692     for_each_possible_cpu(cpu)
2693         PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2694 
2695     /* we're done parsing the input, undefine BUG macro and dump config */
2696 #undef PCPU_SETUP_BUG_ON
2697     pcpu_dump_alloc_info(KERN_DEBUG, ai);
2698 
2699     pcpu_nr_groups = ai->nr_groups;
2700     pcpu_group_offsets = group_offsets;
2701     pcpu_group_sizes = group_sizes;
2702     pcpu_unit_map = unit_map;
2703     pcpu_unit_offsets = unit_off;
2704 
2705     /* determine basic parameters */
2706     pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2707     pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2708     pcpu_atom_size = ai->atom_size;
2709     pcpu_chunk_struct_size = struct_size(chunk, populated,
2710                          BITS_TO_LONGS(pcpu_unit_pages));
2711 
2712     pcpu_stats_save_ai(ai);
2713 
2714     /*
2715      * Allocate chunk slots.  The slots after the active slots are:
2716      *   sidelined_slot - isolated, depopulated chunks
2717      *   free_slot - fully free chunks
2718      *   to_depopulate_slot - isolated, chunks to depopulate
2719      */
2720     pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2721     pcpu_free_slot = pcpu_sidelined_slot + 1;
2722     pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2723     pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
2724     pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2725                       sizeof(pcpu_chunk_lists[0]),
2726                       SMP_CACHE_BYTES);
2727     if (!pcpu_chunk_lists)
2728         panic("%s: Failed to allocate %zu bytes\n", __func__,
2729               pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
2730 
2731     for (i = 0; i < pcpu_nr_slots; i++)
2732         INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
2733 
2734     /*
2735      * The end of the static region needs to be aligned with the
2736      * minimum allocation size as this offsets the reserved and
2737      * dynamic region.  The first chunk ends page aligned by
2738      * expanding the dynamic region, therefore the dynamic region
2739      * can be shrunk to compensate while still staying above the
2740      * configured sizes.
2741      */
2742     static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2743     dyn_size = ai->dyn_size - (static_size - ai->static_size);
2744 
2745     /*
2746      * Initialize first chunk.
2747      * If the reserved_size is non-zero, this initializes the reserved
2748      * chunk.  If the reserved_size is zero, the reserved chunk is NULL
2749      * and the dynamic region is initialized here.  The first chunk,
2750      * pcpu_first_chunk, will always point to the chunk that serves
2751      * the dynamic region.
2752      */
2753     tmp_addr = (unsigned long)base_addr + static_size;
2754     map_size = ai->reserved_size ?: dyn_size;
2755     chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2756 
2757     /* init dynamic chunk if necessary */
2758     if (ai->reserved_size) {
2759         pcpu_reserved_chunk = chunk;
2760 
2761         tmp_addr = (unsigned long)base_addr + static_size +
2762                ai->reserved_size;
2763         map_size = dyn_size;
2764         chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2765     }
2766 
2767     /* link the first chunk in */
2768     pcpu_first_chunk = chunk;
2769     pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2770     pcpu_chunk_relocate(pcpu_first_chunk, -1);
2771 
2772     /* include all regions of the first chunk */
2773     pcpu_nr_populated += PFN_DOWN(size_sum);
2774 
2775     pcpu_stats_chunk_alloc();
2776     trace_percpu_create_chunk(base_addr);
2777 
2778     /* we're done */
2779     pcpu_base_addr = base_addr;
2780 }
2781 
2782 #ifdef CONFIG_SMP
2783 
2784 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2785     [PCPU_FC_AUTO]  = "auto",
2786     [PCPU_FC_EMBED] = "embed",
2787     [PCPU_FC_PAGE]  = "page",
2788 };
2789 
2790 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2791 
2792 static int __init percpu_alloc_setup(char *str)
2793 {
2794     if (!str)
2795         return -EINVAL;
2796 
2797     if (0)
2798         /* nada */;
2799 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2800     else if (!strcmp(str, "embed"))
2801         pcpu_chosen_fc = PCPU_FC_EMBED;
2802 #endif
2803 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2804     else if (!strcmp(str, "page"))
2805         pcpu_chosen_fc = PCPU_FC_PAGE;
2806 #endif
2807     else
2808         pr_warn("unknown allocator %s specified\n", str);
2809 
2810     return 0;
2811 }
2812 early_param("percpu_alloc", percpu_alloc_setup);
2813 
2814 /*
2815  * pcpu_embed_first_chunk() is used by the generic percpu setup.
2816  * Build it if needed by the arch config or the generic setup is going
2817  * to be used.
2818  */
2819 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2820     !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2821 #define BUILD_EMBED_FIRST_CHUNK
2822 #endif
2823 
2824 /* build pcpu_page_first_chunk() iff needed by the arch config */
2825 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2826 #define BUILD_PAGE_FIRST_CHUNK
2827 #endif
2828 
2829 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2830 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2831 /**
2832  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2833  * @reserved_size: the size of reserved percpu area in bytes
2834  * @dyn_size: minimum free size for dynamic allocation in bytes
2835  * @atom_size: allocation atom size
2836  * @cpu_distance_fn: callback to determine distance between cpus, optional
2837  *
2838  * This function determines grouping of units, their mappings to cpus
2839  * and other parameters considering needed percpu size, allocation
2840  * atom size and distances between CPUs.
2841  *
2842  * Groups are always multiples of atom size and CPUs which are of
2843  * LOCAL_DISTANCE both ways are grouped together and share space for
2844  * units in the same group.  The returned configuration is guaranteed
2845  * to have CPUs on different nodes on different groups and >=75% usage
2846  * of allocated virtual address space.
2847  *
2848  * RETURNS:
2849  * On success, pointer to the new allocation_info is returned.  On
2850  * failure, ERR_PTR value is returned.
2851  */
2852 static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2853                 size_t reserved_size, size_t dyn_size,
2854                 size_t atom_size,
2855                 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2856 {
2857     static int group_map[NR_CPUS] __initdata;
2858     static int group_cnt[NR_CPUS] __initdata;
2859     static struct cpumask mask __initdata;
2860     const size_t static_size = __per_cpu_end - __per_cpu_start;
2861     int nr_groups = 1, nr_units = 0;
2862     size_t size_sum, min_unit_size, alloc_size;
2863     int upa, max_upa, best_upa; /* units_per_alloc */
2864     int last_allocs, group, unit;
2865     unsigned int cpu, tcpu;
2866     struct pcpu_alloc_info *ai;
2867     unsigned int *cpu_map;
2868 
2869     /* this function may be called multiple times */
2870     memset(group_map, 0, sizeof(group_map));
2871     memset(group_cnt, 0, sizeof(group_cnt));
2872     cpumask_clear(&mask);
2873 
2874     /* calculate size_sum and ensure dyn_size is enough for early alloc */
2875     size_sum = PFN_ALIGN(static_size + reserved_size +
2876                 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2877     dyn_size = size_sum - static_size - reserved_size;
2878 
2879     /*
2880      * Determine min_unit_size, alloc_size and max_upa such that
2881      * alloc_size is multiple of atom_size and is the smallest
2882      * which can accommodate 4k aligned segments which are equal to
2883      * or larger than min_unit_size.
2884      */
2885     min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2886 
2887     /* determine the maximum # of units that can fit in an allocation */
2888     alloc_size = roundup(min_unit_size, atom_size);
2889     upa = alloc_size / min_unit_size;
2890     while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2891         upa--;
2892     max_upa = upa;
2893 
2894     cpumask_copy(&mask, cpu_possible_mask);
2895 
2896     /* group cpus according to their proximity */
2897     for (group = 0; !cpumask_empty(&mask); group++) {
2898         /* pop the group's first cpu */
2899         cpu = cpumask_first(&mask);
2900         group_map[cpu] = group;
2901         group_cnt[group]++;
2902         cpumask_clear_cpu(cpu, &mask);
2903 
2904         for_each_cpu(tcpu, &mask) {
2905             if (!cpu_distance_fn ||
2906                 (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2907                  cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2908                 group_map[tcpu] = group;
2909                 group_cnt[group]++;
2910                 cpumask_clear_cpu(tcpu, &mask);
2911             }
2912         }
2913     }
2914     nr_groups = group;
2915 
2916     /*
2917      * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2918      * Expand the unit_size until we use >= 75% of the units allocated.
2919      * Related to atom_size, which could be much larger than the unit_size.
2920      */
2921     last_allocs = INT_MAX;
2922     best_upa = 0;
2923     for (upa = max_upa; upa; upa--) {
2924         int allocs = 0, wasted = 0;
2925 
2926         if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2927             continue;
2928 
2929         for (group = 0; group < nr_groups; group++) {
2930             int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2931             allocs += this_allocs;
2932             wasted += this_allocs * upa - group_cnt[group];
2933         }
2934 
2935         /*
2936          * Don't accept if wastage is over 1/3.  The
2937          * greater-than comparison ensures upa==1 always
2938          * passes the following check.
2939          */
2940         if (wasted > num_possible_cpus() / 3)
2941             continue;
2942 
2943         /* and then don't consume more memory */
2944         if (allocs > last_allocs)
2945             break;
2946         last_allocs = allocs;
2947         best_upa = upa;
2948     }
2949     BUG_ON(!best_upa);
2950     upa = best_upa;
2951 
2952     /* allocate and fill alloc_info */
2953     for (group = 0; group < nr_groups; group++)
2954         nr_units += roundup(group_cnt[group], upa);
2955 
2956     ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2957     if (!ai)
2958         return ERR_PTR(-ENOMEM);
2959     cpu_map = ai->groups[0].cpu_map;
2960 
2961     for (group = 0; group < nr_groups; group++) {
2962         ai->groups[group].cpu_map = cpu_map;
2963         cpu_map += roundup(group_cnt[group], upa);
2964     }
2965 
2966     ai->static_size = static_size;
2967     ai->reserved_size = reserved_size;
2968     ai->dyn_size = dyn_size;
2969     ai->unit_size = alloc_size / upa;
2970     ai->atom_size = atom_size;
2971     ai->alloc_size = alloc_size;
2972 
2973     for (group = 0, unit = 0; group < nr_groups; group++) {
2974         struct pcpu_group_info *gi = &ai->groups[group];
2975 
2976         /*
2977          * Initialize base_offset as if all groups are located
2978          * back-to-back.  The caller should update this to
2979          * reflect actual allocation.
2980          */
2981         gi->base_offset = unit * ai->unit_size;
2982 
2983         for_each_possible_cpu(cpu)
2984             if (group_map[cpu] == group)
2985                 gi->cpu_map[gi->nr_units++] = cpu;
2986         gi->nr_units = roundup(gi->nr_units, upa);
2987         unit += gi->nr_units;
2988     }
2989     BUG_ON(unit != nr_units);
2990 
2991     return ai;
2992 }
2993 
2994 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
2995                    pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
2996 {
2997     const unsigned long goal = __pa(MAX_DMA_ADDRESS);
2998 #ifdef CONFIG_NUMA
2999     int node = NUMA_NO_NODE;
3000     void *ptr;
3001 
3002     if (cpu_to_nd_fn)
3003         node = cpu_to_nd_fn(cpu);
3004 
3005     if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
3006         ptr = memblock_alloc_from(size, align, goal);
3007         pr_info("cpu %d has no node %d or node-local memory\n",
3008             cpu, node);
3009         pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
3010              cpu, size, (u64)__pa(ptr));
3011     } else {
3012         ptr = memblock_alloc_try_nid(size, align, goal,
3013                          MEMBLOCK_ALLOC_ACCESSIBLE,
3014                          node);
3015 
3016         pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
3017              cpu, size, node, (u64)__pa(ptr));
3018     }
3019     return ptr;
3020 #else
3021     return memblock_alloc_from(size, align, goal);
3022 #endif
3023 }
3024 
3025 static void __init pcpu_fc_free(void *ptr, size_t size)
3026 {
3027     memblock_free(ptr, size);
3028 }
3029 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
3030 
3031 #if defined(BUILD_EMBED_FIRST_CHUNK)
3032 /**
3033  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
3034  * @reserved_size: the size of reserved percpu area in bytes
3035  * @dyn_size: minimum free size for dynamic allocation in bytes
3036  * @atom_size: allocation atom size
3037  * @cpu_distance_fn: callback to determine distance between cpus, optional
3038  * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
3039  *
3040  * This is a helper to ease setting up embedded first percpu chunk and
3041  * can be called where pcpu_setup_first_chunk() is expected.
3042  *
3043  * If this function is used to setup the first chunk, it is allocated
3044  * by calling pcpu_fc_alloc and used as-is without being mapped into
3045  * vmalloc area.  Allocations are always whole multiples of @atom_size
3046  * aligned to @atom_size.
3047  *
3048  * This enables the first chunk to piggy back on the linear physical
3049  * mapping which often uses larger page size.  Please note that this
3050  * can result in very sparse cpu->unit mapping on NUMA machines thus
3051  * requiring large vmalloc address space.  Don't use this allocator if
3052  * vmalloc space is not orders of magnitude larger than distances
3053  * between node memory addresses (ie. 32bit NUMA machines).
3054  *
3055  * @dyn_size specifies the minimum dynamic area size.
3056  *
3057  * If the needed size is smaller than the minimum or specified unit
3058  * size, the leftover is returned using pcpu_fc_free.
3059  *
3060  * RETURNS:
3061  * 0 on success, -errno on failure.
3062  */
3063 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
3064                   size_t atom_size,
3065                   pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
3066                   pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
3067 {
3068     void *base = (void *)ULONG_MAX;
3069     void **areas = NULL;
3070     struct pcpu_alloc_info *ai;
3071     size_t size_sum, areas_size;
3072     unsigned long max_distance;
3073     int group, i, highest_group, rc = 0;
3074 
3075     ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3076                    cpu_distance_fn);
3077     if (IS_ERR(ai))
3078         return PTR_ERR(ai);
3079 
3080     size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3081     areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3082 
3083     areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
3084     if (!areas) {
3085         rc = -ENOMEM;
3086         goto out_free;
3087     }
3088 
3089     /* allocate, copy and determine base address & max_distance */
3090     highest_group = 0;
3091     for (group = 0; group < ai->nr_groups; group++) {
3092         struct pcpu_group_info *gi = &ai->groups[group];
3093         unsigned int cpu = NR_CPUS;
3094         void *ptr;
3095 
3096         for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3097             cpu = gi->cpu_map[i];
3098         BUG_ON(cpu == NR_CPUS);
3099 
3100         /* allocate space for the whole group */
3101         ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
3102         if (!ptr) {
3103             rc = -ENOMEM;
3104             goto out_free_areas;
3105         }
3106         /* kmemleak tracks the percpu allocations separately */
3107         kmemleak_ignore_phys(__pa(ptr));
3108         areas[group] = ptr;
3109 
3110         base = min(ptr, base);
3111         if (ptr > areas[highest_group])
3112             highest_group = group;
3113     }
3114     max_distance = areas[highest_group] - base;
3115     max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3116 
3117     /* warn if maximum distance is further than 75% of vmalloc space */
3118     if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3119         pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3120                 max_distance, VMALLOC_TOTAL);
3121 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3122         /* and fail if we have fallback */
3123         rc = -EINVAL;
3124         goto out_free_areas;
3125 #endif
3126     }
3127 
3128     /*
3129      * Copy data and free unused parts.  This should happen after all
3130      * allocations are complete; otherwise, we may end up with
3131      * overlapping groups.
3132      */
3133     for (group = 0; group < ai->nr_groups; group++) {
3134         struct pcpu_group_info *gi = &ai->groups[group];
3135         void *ptr = areas[group];
3136 
3137         for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3138             if (gi->cpu_map[i] == NR_CPUS) {
3139                 /* unused unit, free whole */
3140                 pcpu_fc_free(ptr, ai->unit_size);
3141                 continue;
3142             }
3143             /* copy and return the unused part */
3144             memcpy(ptr, __per_cpu_load, ai->static_size);
3145             pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
3146         }
3147     }
3148 
3149     /* base address is now known, determine group base offsets */
3150     for (group = 0; group < ai->nr_groups; group++) {
3151         ai->groups[group].base_offset = areas[group] - base;
3152     }
3153 
3154     pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3155         PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3156         ai->dyn_size, ai->unit_size);
3157 
3158     pcpu_setup_first_chunk(ai, base);
3159     goto out_free;
3160 
3161 out_free_areas:
3162     for (group = 0; group < ai->nr_groups; group++)
3163         if (areas[group])
3164             pcpu_fc_free(areas[group],
3165                 ai->groups[group].nr_units * ai->unit_size);
3166 out_free:
3167     pcpu_free_alloc_info(ai);
3168     if (areas)
3169         memblock_free(areas, areas_size);
3170     return rc;
3171 }
3172 #endif /* BUILD_EMBED_FIRST_CHUNK */
3173 
3174 #ifdef BUILD_PAGE_FIRST_CHUNK
3175 #include <asm/pgalloc.h>
3176 
3177 #ifndef P4D_TABLE_SIZE
3178 #define P4D_TABLE_SIZE PAGE_SIZE
3179 #endif
3180 
3181 #ifndef PUD_TABLE_SIZE
3182 #define PUD_TABLE_SIZE PAGE_SIZE
3183 #endif
3184 
3185 #ifndef PMD_TABLE_SIZE
3186 #define PMD_TABLE_SIZE PAGE_SIZE
3187 #endif
3188 
3189 #ifndef PTE_TABLE_SIZE
3190 #define PTE_TABLE_SIZE PAGE_SIZE
3191 #endif
3192 void __init __weak pcpu_populate_pte(unsigned long addr)
3193 {
3194     pgd_t *pgd = pgd_offset_k(addr);
3195     p4d_t *p4d;
3196     pud_t *pud;
3197     pmd_t *pmd;
3198 
3199     if (pgd_none(*pgd)) {
3200         p4d_t *new;
3201 
3202         new = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
3203         if (!new)
3204             goto err_alloc;
3205         pgd_populate(&init_mm, pgd, new);
3206     }
3207 
3208     p4d = p4d_offset(pgd, addr);
3209     if (p4d_none(*p4d)) {
3210         pud_t *new;
3211 
3212         new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
3213         if (!new)
3214             goto err_alloc;
3215         p4d_populate(&init_mm, p4d, new);
3216     }
3217 
3218     pud = pud_offset(p4d, addr);
3219     if (pud_none(*pud)) {
3220         pmd_t *new;
3221 
3222         new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
3223         if (!new)
3224             goto err_alloc;
3225         pud_populate(&init_mm, pud, new);
3226     }
3227 
3228     pmd = pmd_offset(pud, addr);
3229     if (!pmd_present(*pmd)) {
3230         pte_t *new;
3231 
3232         new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
3233         if (!new)
3234             goto err_alloc;
3235         pmd_populate_kernel(&init_mm, pmd, new);
3236     }
3237 
3238     return;
3239 
3240 err_alloc:
3241     panic("%s: Failed to allocate memory\n", __func__);
3242 }
3243 
3244 /**
3245  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3246  * @reserved_size: the size of reserved percpu area in bytes
3247  * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
3248  *
3249  * This is a helper to ease setting up page-remapped first percpu
3250  * chunk and can be called where pcpu_setup_first_chunk() is expected.
3251  *
3252  * This is the basic allocator.  Static percpu area is allocated
3253  * page-by-page into vmalloc area.
3254  *
3255  * RETURNS:
3256  * 0 on success, -errno on failure.
3257  */
3258 int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
3259 {
3260     static struct vm_struct vm;
3261     struct pcpu_alloc_info *ai;
3262     char psize_str[16];
3263     int unit_pages;
3264     size_t pages_size;
3265     struct page **pages;
3266     int unit, i, j, rc = 0;
3267     int upa;
3268     int nr_g0_units;
3269 
3270     snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3271 
3272     ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3273     if (IS_ERR(ai))
3274         return PTR_ERR(ai);
3275     BUG_ON(ai->nr_groups != 1);
3276     upa = ai->alloc_size/ai->unit_size;
3277     nr_g0_units = roundup(num_possible_cpus(), upa);
3278     if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3279         pcpu_free_alloc_info(ai);
3280         return -EINVAL;
3281     }
3282 
3283     unit_pages = ai->unit_size >> PAGE_SHIFT;
3284 
3285     /* unaligned allocations can't be freed, round up to page size */
3286     pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3287                    sizeof(pages[0]));
3288     pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3289     if (!pages)
3290         panic("%s: Failed to allocate %zu bytes\n", __func__,
3291               pages_size);
3292 
3293     /* allocate pages */
3294     j = 0;
3295     for (unit = 0; unit < num_possible_cpus(); unit++) {
3296         unsigned int cpu = ai->groups[0].cpu_map[unit];
3297         for (i = 0; i < unit_pages; i++) {
3298             void *ptr;
3299 
3300             ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
3301             if (!ptr) {
3302                 pr_warn("failed to allocate %s page for cpu%u\n",
3303                         psize_str, cpu);
3304                 goto enomem;
3305             }
3306             /* kmemleak tracks the percpu allocations separately */
3307             kmemleak_ignore_phys(__pa(ptr));
3308             pages[j++] = virt_to_page(ptr);
3309         }
3310     }
3311 
3312     /* allocate vm area, map the pages and copy static data */
3313     vm.flags = VM_ALLOC;
3314     vm.size = num_possible_cpus() * ai->unit_size;
3315     vm_area_register_early(&vm, PAGE_SIZE);
3316 
3317     for (unit = 0; unit < num_possible_cpus(); unit++) {
3318         unsigned long unit_addr =
3319             (unsigned long)vm.addr + unit * ai->unit_size;
3320 
3321         for (i = 0; i < unit_pages; i++)
3322             pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
3323 
3324         /* pte already populated, the following shouldn't fail */
3325         rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3326                       unit_pages);
3327         if (rc < 0)
3328             panic("failed to map percpu area, err=%d\n", rc);
3329 
3330         /*
3331          * FIXME: Archs with virtual cache should flush local
3332          * cache for the linear mapping here - something
3333          * equivalent to flush_cache_vmap() on the local cpu.
3334          * flush_cache_vmap() can't be used as most supporting
3335          * data structures are not set up yet.
3336          */
3337 
3338         /* copy static data */
3339         memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3340     }
3341 
3342     /* we're ready, commit */
3343     pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3344         unit_pages, psize_str, ai->static_size,
3345         ai->reserved_size, ai->dyn_size);
3346 
3347     pcpu_setup_first_chunk(ai, vm.addr);
3348     goto out_free_ar;
3349 
3350 enomem:
3351     while (--j >= 0)
3352         pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
3353     rc = -ENOMEM;
3354 out_free_ar:
3355     memblock_free(pages, pages_size);
3356     pcpu_free_alloc_info(ai);
3357     return rc;
3358 }
3359 #endif /* BUILD_PAGE_FIRST_CHUNK */
3360 
3361 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
3362 /*
3363  * Generic SMP percpu area setup.
3364  *
3365  * The embedding helper is used because its behavior closely resembles
3366  * the original non-dynamic generic percpu area setup.  This is
3367  * important because many archs have addressing restrictions and might
3368  * fail if the percpu area is located far away from the previous
3369  * location.  As an added bonus, in non-NUMA cases, embedding is
3370  * generally a good idea TLB-wise because percpu area can piggy back
3371  * on the physical linear memory mapping which uses large page
3372  * mappings on applicable archs.
3373  */
3374 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3375 EXPORT_SYMBOL(__per_cpu_offset);
3376 
3377 void __init setup_per_cpu_areas(void)
3378 {
3379     unsigned long delta;
3380     unsigned int cpu;
3381     int rc;
3382 
3383     /*
3384      * Always reserve area for module percpu variables.  That's
3385      * what the legacy allocator did.
3386      */
3387     rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
3388                     PAGE_SIZE, NULL, NULL);
3389     if (rc < 0)
3390         panic("Failed to initialize percpu areas.");
3391 
3392     delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3393     for_each_possible_cpu(cpu)
3394         __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3395 }
3396 #endif  /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3397 
3398 #else   /* CONFIG_SMP */
3399 
3400 /*
3401  * UP percpu area setup.
3402  *
3403  * UP always uses km-based percpu allocator with identity mapping.
3404  * Static percpu variables are indistinguishable from the usual static
3405  * variables and don't require any special preparation.
3406  */
3407 void __init setup_per_cpu_areas(void)
3408 {
3409     const size_t unit_size =
3410         roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3411                      PERCPU_DYNAMIC_RESERVE));
3412     struct pcpu_alloc_info *ai;
3413     void *fc;
3414 
3415     ai = pcpu_alloc_alloc_info(1, 1);
3416     fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3417     if (!ai || !fc)
3418         panic("Failed to allocate memory for percpu areas.");
3419     /* kmemleak tracks the percpu allocations separately */
3420     kmemleak_ignore_phys(__pa(fc));
3421 
3422     ai->dyn_size = unit_size;
3423     ai->unit_size = unit_size;
3424     ai->atom_size = unit_size;
3425     ai->alloc_size = unit_size;
3426     ai->groups[0].nr_units = 1;
3427     ai->groups[0].cpu_map[0] = 0;
3428 
3429     pcpu_setup_first_chunk(ai, fc);
3430     pcpu_free_alloc_info(ai);
3431 }
3432 
3433 #endif  /* CONFIG_SMP */
3434 
3435 /*
3436  * pcpu_nr_pages - calculate total number of populated backing pages
3437  *
3438  * This reflects the number of pages populated to back chunks.  Metadata is
3439  * excluded in the number exposed in meminfo as the number of backing pages
3440  * scales with the number of cpus and can quickly outweigh the memory used for
3441  * metadata.  It also keeps this calculation nice and simple.
3442  *
3443  * RETURNS:
3444  * Total number of populated backing pages in use by the allocator.
3445  */
3446 unsigned long pcpu_nr_pages(void)
3447 {
3448     return pcpu_nr_populated * pcpu_nr_units;
3449 }
3450 
3451 /*
3452  * Percpu allocator is initialized early during boot when neither slab or
3453  * workqueue is available.  Plug async management until everything is up
3454  * and running.
3455  */
3456 static int __init percpu_enable_async(void)
3457 {
3458     pcpu_async_enabled = true;
3459     return 0;
3460 }
3461 subsys_initcall(percpu_enable_async);