Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 #include <linux/memblock.h>
0007 #include <linux/export.h>
0008 #include <linux/highmem.h>
0009 #include <linux/pgtable.h>
0010 #include <asm/processor.h>
0011 #include <asm/pgalloc.h>
0012 #include <asm/tlbflush.h>
0013 
0014 /*
0015  * HIGHMEM API:
0016  *
0017  * kmap() API provides sleep semantics hence referred to as "permanent maps"
0018  * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
0019  * for book-keeping
0020  *
0021  * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
0022  * shortlived ala "temporary mappings" which historically were implemented as
0023  * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
0024  *
0025  *  Both these facts combined (preemption disabled and per-cpu allocation)
0026  *  means the total number of concurrent fixmaps will be limited to max
0027  *  such allocations in a single control path. Thus KM_TYPE_NR (another
0028  *  historic relic) is a small'ish number which caps max percpu fixmaps
0029  *
0030  * ARC HIGHMEM Details
0031  *
0032  * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
0033  *   is now shared between vmalloc and kmap (non overlapping though)
0034  *
0035  * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
0036  *   This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
0037  *   2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
0038  *
0039  * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
0040  *   CPU. So the number of CPUs sharing a single PTE page is limited.
0041  *
0042  * - pkmap being preemptible, in theory could do with more than 256 concurrent
0043  *   mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
0044  *   the PGD and only works with a single page table @pkmap_page_table, hence
0045  *   sets the limit
0046  */
0047 
0048 extern pte_t * pkmap_page_table;
0049 
0050 static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
0051 {
0052     pmd_t *pmd_k = pmd_off_k(kvaddr);
0053     pte_t *pte_k;
0054 
0055     pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
0056     if (!pte_k)
0057         panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
0058               __func__, PAGE_SIZE, PAGE_SIZE);
0059 
0060     pmd_populate_kernel(&init_mm, pmd_k, pte_k);
0061     return pte_k;
0062 }
0063 
0064 void __init kmap_init(void)
0065 {
0066     /* Due to recursive include hell, we can't do this in processor.h */
0067     BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
0068     BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
0069     BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
0070 
0071     pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
0072     alloc_kmap_pgtable(FIXMAP_BASE);
0073 }