Back to home page

LXR

 
 

    


0001 /*
0002  * mm/percpu-km.c - kernel memory based chunk allocation
0003  *
0004  * Copyright (C) 2010       SUSE Linux Products GmbH
0005  * Copyright (C) 2010       Tejun Heo <tj@kernel.org>
0006  *
0007  * This file is released under the GPLv2.
0008  *
0009  * Chunks are allocated as a contiguous kernel memory using gfp
0010  * allocation.  This is to be used on nommu architectures.
0011  *
0012  * To use percpu-km,
0013  *
0014  * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig.
0015  *
0016  * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined.  It's
0017  *   not compatible with PER_CPU_KM.  EMBED_FIRST_CHUNK should work
0018  *   fine.
0019  *
0020  * - NUMA is not supported.  When setting up the first chunk,
0021  *   @cpu_distance_fn should be NULL or report all CPUs to be nearer
0022  *   than or at LOCAL_DISTANCE.
0023  *
0024  * - It's best if the chunk size is power of two multiple of
0025  *   PAGE_SIZE.  Because each chunk is allocated as a contiguous
0026  *   kernel memory block using alloc_pages(), memory will be wasted if
0027  *   chunk size is not aligned.  percpu-km code will whine about it.
0028  */
0029 
0030 #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
0031 #error "contiguous percpu allocation is incompatible with paged first chunk"
0032 #endif
0033 
0034 #include <linux/log2.h>
0035 
0036 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
0037                    int page_start, int page_end)
0038 {
0039     return 0;
0040 }
0041 
0042 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
0043                   int page_start, int page_end)
0044 {
0045     /* nada */
0046 }
0047 
0048 static struct pcpu_chunk *pcpu_create_chunk(void)
0049 {
0050     const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
0051     struct pcpu_chunk *chunk;
0052     struct page *pages;
0053     int i;
0054 
0055     chunk = pcpu_alloc_chunk();
0056     if (!chunk)
0057         return NULL;
0058 
0059     pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages));
0060     if (!pages) {
0061         pcpu_free_chunk(chunk);
0062         return NULL;
0063     }
0064 
0065     for (i = 0; i < nr_pages; i++)
0066         pcpu_set_page_chunk(nth_page(pages, i), chunk);
0067 
0068     chunk->data = pages;
0069     chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
0070 
0071     spin_lock_irq(&pcpu_lock);
0072     pcpu_chunk_populated(chunk, 0, nr_pages);
0073     spin_unlock_irq(&pcpu_lock);
0074 
0075     return chunk;
0076 }
0077 
0078 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
0079 {
0080     const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
0081 
0082     if (chunk && chunk->data)
0083         __free_pages(chunk->data, order_base_2(nr_pages));
0084     pcpu_free_chunk(chunk);
0085 }
0086 
0087 static struct page *pcpu_addr_to_page(void *addr)
0088 {
0089     return virt_to_page(addr);
0090 }
0091 
0092 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
0093 {
0094     size_t nr_pages, alloc_pages;
0095 
0096     /* all units must be in a single group */
0097     if (ai->nr_groups != 1) {
0098         pr_crit("can't handle more than one group\n");
0099         return -EINVAL;
0100     }
0101 
0102     nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
0103     alloc_pages = roundup_pow_of_two(nr_pages);
0104 
0105     if (alloc_pages > nr_pages)
0106         pr_warn("wasting %zu pages per chunk\n",
0107             alloc_pages - nr_pages);
0108 
0109     return 0;
0110 }