0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
0030 #error "contiguous percpu allocation is incompatible with paged first chunk"
0031 #endif
0032
0033 #include <linux/log2.h>
0034
0035 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
0036 int page_start, int page_end)
0037 {
0038
0039 }
0040
0041 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
0042 int page_start, int page_end, gfp_t gfp)
0043 {
0044 return 0;
0045 }
0046
0047 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
0048 int page_start, int page_end)
0049 {
0050
0051 }
0052
0053 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
0054 {
0055 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
0056 struct pcpu_chunk *chunk;
0057 struct page *pages;
0058 unsigned long flags;
0059 int i;
0060
0061 chunk = pcpu_alloc_chunk(gfp);
0062 if (!chunk)
0063 return NULL;
0064
0065 pages = alloc_pages(gfp, order_base_2(nr_pages));
0066 if (!pages) {
0067 pcpu_free_chunk(chunk);
0068 return NULL;
0069 }
0070
0071 for (i = 0; i < nr_pages; i++)
0072 pcpu_set_page_chunk(nth_page(pages, i), chunk);
0073
0074 chunk->data = pages;
0075 chunk->base_addr = page_address(pages);
0076
0077 spin_lock_irqsave(&pcpu_lock, flags);
0078 pcpu_chunk_populated(chunk, 0, nr_pages);
0079 spin_unlock_irqrestore(&pcpu_lock, flags);
0080
0081 pcpu_stats_chunk_alloc();
0082 trace_percpu_create_chunk(chunk->base_addr);
0083
0084 return chunk;
0085 }
0086
0087 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
0088 {
0089 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
0090
0091 if (!chunk)
0092 return;
0093
0094 pcpu_stats_chunk_dealloc();
0095 trace_percpu_destroy_chunk(chunk->base_addr);
0096
0097 if (chunk->data)
0098 __free_pages(chunk->data, order_base_2(nr_pages));
0099 pcpu_free_chunk(chunk);
0100 }
0101
0102 static struct page *pcpu_addr_to_page(void *addr)
0103 {
0104 return virt_to_page(addr);
0105 }
0106
0107 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
0108 {
0109 size_t nr_pages, alloc_pages;
0110
0111
0112 if (ai->nr_groups != 1) {
0113 pr_crit("can't handle more than one group\n");
0114 return -EINVAL;
0115 }
0116
0117 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
0118 alloc_pages = roundup_pow_of_two(nr_pages);
0119
0120 if (alloc_pages > nr_pages)
0121 pr_warn("wasting %zu pages per chunk\n",
0122 alloc_pages - nr_pages);
0123
0124 return 0;
0125 }
0126
0127 static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk)
0128 {
0129 return false;
0130 }